func (memory *MemoryMap) Allocate( memtype MemoryType, start platform.Paddr, end platform.Paddr, size uint64, top bool) (platform.Paddr, []byte, error) { if top { for ; end >= start; end -= platform.PageSize { mmap, _ := memory.Map(memtype, end, size, true) if mmap != nil { return end, mmap, nil } } } else { for ; start <= end; start += platform.PageSize { mmap, _ := memory.Map(memtype, start, size, true) if mmap != nil { return start, mmap, nil } } } // Couldn't find available memory. return platform.Paddr(0), nil, MemoryNotFound }
func (vchannel *VirtioChannel) remap() error { if vchannel.QueueAddress.Value != 0 { // Can we map this address? vchannel_size := C.vring_size( C.uint(vchannel.QueueSize.Value), platform.PageSize) mmap, err := vchannel.VirtioDevice.mmap( platform.Paddr(4096*vchannel.QueueAddress.Value), uint64(vchannel_size)) if err != nil { return err } // Initialize the ring. C.vring_init( &vchannel.vring, C.uint(vchannel.QueueSize.Value), unsafe.Pointer(&mmap[0]), platform.PageSize) // Notify the consumer. vchannel.notifications <- VirtioNotification{} } else { // Leave the address cleared. No notifcations // will be processed as per the Write() function. vchannel.Consumed = 0 } return nil }
func (msix *MsiXDevice) SendInterrupt(vector int) error { // Figure out our vector. entry := msix.FindEntry(vector) if entry == nil { // Nothing? msix.Debug("msix signal invalid entry?") return PciMSIError } if msix.IsMasked(vector) { // Set our pending bit. msix.SetPending(vector) return nil } else { // Clear our pending bit. msix.ClearPending(vector) } // Read our address and value. paddr := entry.Address.Value data := entry.Data.Value msix.Debug( "msix signal sending %x @ %x", entry.Data.Value, paddr) return msix.msi_interrupt(platform.Paddr(paddr), uint32(data)) }
//export doLoad func doLoad( self unsafe.Pointer, offset C.size_t, source unsafe.Pointer, length C.size_t) C.int { model := (*machine.Model)(self) // Bump up the size to the end of the page. new_length := platform.Align(uint64(length), platform.PageSize, true) // Allocate the backing data. data, err := model.Map( machine.MemoryTypeUser, platform.Paddr(offset), new_length, true) if err != nil { // Things are broken. log.Print("Error during ElfLoad: ", err) return -C.int(syscall.EINVAL) } // Copy the data in. C.memcpy(unsafe.Pointer(&data[0]), source, length) // All good. return 0 }
func (bios *Bios) Attach(vm *platform.Vm, model *Model) error { // Reserve our basic "BIOS" memory. // This is done simply to match expectations. // Nothing should be allocated in the first page. err := model.Reserve( vm, bios, MemoryTypeReserved, platform.Paddr(0), // Start. platform.PageSize, // Size. nil) if err != nil { return err } // Now reserve our TSS. err = model.Reserve( vm, bios, MemoryTypeSpecial, bios.TSSAddr, vm.SizeSpecialMemory(), nil) if err != nil { return err } // Finish the region. tss_end := bios.TSSAddr.After(vm.SizeSpecialMemory()) err = model.Reserve( vm, bios, MemoryTypeReserved, tss_end, uint64(platform.Paddr(0x100000000)-tss_end), nil) if err != nil { return err } // We're good. return nil }
func (memory *MemoryMap) Max() platform.Paddr { if len(*memory) == 0 { // No memory available? return platform.Paddr(0) } // Return the highest available address. top := (*memory)[len(*memory)-1] return top.End() }
func (pcidevice *PciDevice) RebuildBars() { // Build our IO Handlers. pcidevice.IoHandlers = make(IoHandlers) for i := uint(0); i < pcidevice.PciBarCount; i += 1 { barreg := int(0x10 + (i * 4)) baraddr := pcidevice.Config.Get32(barreg) barsize, size_ok := pcidevice.PciBarSizes[i] barops, ops_ok := pcidevice.PciBarOps[i] if !size_ok || !ops_ok { // Not supported? pcidevice.Config.Set32(barreg, 0xffffffff) continue } // Mask out port-I/O bits. newreg := baraddr & ^(barsize-1) | 0xe if newreg != baraddr { pcidevice.Debug( "bar %d @ %x -> %x", i, baraddr, newreg) } // Rebuild our register values. // Save the new value. pcidevice.Config.Set32(barreg, newreg) // Create a new handler. region := MemoryRegion{ platform.Paddr(baraddr & ^uint32(0xf)), uint64(barsize)} pcidevice.IoHandlers[region] = NewIoHandler( pcidevice, region.Start, barops) } }
func (memory *MemoryMap) Load( start platform.Paddr, end platform.Paddr, data []byte, top bool) (platform.Paddr, error) { // Allocate the backing data. addr, backing_mmap, err := memory.Allocate( MemoryTypeUser, start, end, uint64(len(data)), top) if err != nil { return platform.Paddr(0), err } // Copy it in. copy(backing_mmap, data) // We're good. return addr, nil }
func (user *UserMemory) Layout( vm *platform.Vm, model *Model, start uint64, memory uint64) error { // Try to place our user memory. // NOTE: This will be called after all devices // have reserved appropriate memory regions, so // we will not conflict with anything else. last_top := platform.Paddr(0) sort.Sort(&model.MemoryMap) for i := 0; i < len(model.MemoryMap) && memory > 0; i += 1 { region := model.MemoryMap[i] if last_top != region.Start { // How much can we do here? gap := uint64(region.Start) - uint64(last_top) if gap > memory { gap = memory memory = 0 } else { memory -= gap } user.Debug( "physical [%x,%x] -> file [%x,%x]", last_top, gap-1, start, start+gap-1) // Allocate the bits. err := model.Reserve( vm, user, MemoryTypeUser, last_top, gap, user.mmap[start:start+gap]) if err != nil { return err } // Remember this. user.Allocated = append( user.Allocated, UserMemorySegment{ start, MemoryRegion{last_top, gap}}) // Move ahead in the backing store. start += gap } // Remember the top of this region. last_top = region.Start.After(region.Size) } if memory > 0 { err := model.Reserve( vm, user, MemoryTypeUser, last_top, memory, user.mmap[start:]) if err != nil { return err } } // All is good. return nil }
func (vchannel *VirtioChannel) processOne(n uint16) error { var buf *VirtioBuffer var addr C.__u64 var length C.__u32 var buf_flags C.__u16 var next C.__u16 index := C.__u16(n) vchannel.Debug( "vqueue#%d incoming slot [%d]", vchannel.Channel, index) for { // Read the entry. C.vring_get_index( &vchannel.vring, index, &addr, &length, &buf_flags, &next) // Append our buffer. has_next := (buf_flags & C.__u16(C.VirtioDescFNext)) != C.__u16(0) is_write := (buf_flags & C.__u16(C.VirtioDescFWrite)) != C.__u16(0) is_indirect := (buf_flags & C.__u16(C.VirtioDescFIndirect)) != C.__u16(0) // Do we have a buffer? if buf == nil { buf = NewVirtioBuffer(uint16(index), !is_write) } if is_indirect { // FIXME: Map all indirect buffers. log.Printf("WARNING: Indirect buffers not supported.") } else { // Map the given address. vchannel.Debug("vqueue#%d map [%x-%x]", vchannel.Channel, platform.Paddr(addr), uint64(addr)+uint64(length)-1) data, err := vchannel.VirtioDevice.mmap( platform.Paddr(addr), uint64(length)) if err != nil { log.Printf( "Unable to map [%x,%x]? Flags are %x, next is %x.", addr, addr+C.__u64(length)-1, buf_flags, next) return err } // Append this segment. buf.Append(data) } // Are we finished? if !has_next { // Send these buffers. vchannel.Debug( "vqueue#%d processing slot [%d]", vchannel.Channel, buf.index) // Mark this as outstanding. vchannel.Outstanding[uint16(buf.index)] = true vchannel.incoming <- buf break } else { // Keep chaining. index = next vchannel.Debug( "vqueue#%d next slot [%d]", vchannel.Channel, index) continue } } // We're good. return nil }
func NewAcpi(info *DeviceInfo) (Device, error) { acpi := new(Acpi) acpi.Addr = platform.Paddr(0xf0000) return acpi, acpi.init(info) }
func LoadLinux( vcpu *platform.Vcpu, model *machine.Model, boot_params string, vmlinux string, initrd string, cmdline string, system_map string) (SystemMap, *Convention, error) { // Read the boot_params. log.Print("loader: Reading kernel image...") kernel_data, err := ioutil.ReadFile(boot_params) log.Printf("loader: Kernel is %d bytes.", len(kernel_data)) if err != nil { return nil, nil, err } // They may have passed the entire vmlinuz image as the // parameter here. That's okay, we do an efficient mmap // above. But we need to truncate the visible slice. boot_params_data := kernel_data[0:platform.PageSize] // Load the kernel. log.Print("loader: Reading kernel binary...") vmlinux_data, err := ioutil.ReadFile(vmlinux) log.Printf("loader: Kernel binary is %d bytes.", len(vmlinux_data)) if err != nil { return nil, nil, err } // Load the ramdisk. log.Print("loader: Reading ramdisk...") initrd_data, err := ioutil.ReadFile(initrd) log.Printf("loader: Ramdisk is %d bytes.", len(initrd_data)) if err != nil { return nil, nil, err } // Load the system map. log.Print("loader: Loading system map...") sysmap, err := LoadLinuxSystemMap(system_map) if err != nil { return nil, nil, err } // Load the kernel into memory. log.Print("loader: Loading kernel...") entry_point, is_64bit, err := ElfLoad(vmlinux_data, model) if err != nil { return nil, nil, err } if is_64bit { log.Print("loader: 64-bit kernel found.") } else { log.Print("loader: 32-bit kernel found.") } log.Printf("loader: Entry point is %08x.", entry_point) // Set our calling convention. var convention *Convention if is_64bit { convention = &Linux64Convention } else { convention = &Linux32Convention } // Load the cmdline. // NOTE: Here we create a full page with // trailing zeros. This is the expected form // for the command line. full_cmdline := make( []byte, platform.PageSize, platform.PageSize) copy(full_cmdline, []byte(cmdline)) cmdline_addr, err := model.MemoryMap.Load( platform.Paddr(0), model.Max(), full_cmdline, false) if err != nil { return nil, nil, err } log.Printf("loader: cmdline @ %08x: %s", cmdline_addr, cmdline) // Load the initrd. initrd_addr, err := model.MemoryMap.Load( platform.Paddr(0), model.Max(), initrd_data, true) if err != nil { return nil, nil, err } log.Printf("loader: initrd @ %08x.", initrd_addr) // Create our setup page, // and initialize the VCPU. err = SetupLinux( vcpu, model, boot_params_data, entry_point, is_64bit, initrd_addr, uint64(len(initrd_data)), cmdline_addr) // Everything is okay. return sysmap, convention, err }