Exemple #1
0
func (apic *Apic) Attach(vm *platform.Vm, model *Model) error {

	// Reserve our IOApic and LApic.
	err := model.Reserve(
		vm,
		apic,
		MemoryTypeReserved,
		apic.IOApic,
		platform.PageSize,
		nil)
	if err != nil {
		return err
	}
	err = model.Reserve(
		vm,
		apic,
		MemoryTypeReserved,
		apic.LApic,
		platform.PageSize,
		nil)
	if err != nil {
		return err
	}

	// Create our irqchip.
	err = vm.CreateIrqChip()
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #2
0
func (pcidevice *PciDevice) Attach(vm *platform.Vm, model *Model) error {

	// Find our pcibus.
	var ok bool
	var pcibus *PciBus
	for _, device := range model.devices {
		pcibus, ok = device.(*PciBus)
		if pcibus != nil && ok {
			break
		}
	}
	if pcibus == nil {
		return PciBusNotFound
	}

	// Rebuild our capabilities.
	pcidevice.RebuildCapabilities()

	// FIXME: Everything uses interrupt 1.
	// This is gross, but we hard-coded the line to 1
	// unless you're using MSI. This really should be
	// fixed (if we actually plan on using PCI devices).
	pcidevice.Config.Set8(PciConfigOffsetInterruptLine, 1)
	pcidevice.Config.Set8(PciConfigOffsetInterruptPin, 1)
	pcidevice.std_interrupt = func() error {
		vm.Interrupt(platform.Irq(1), true)
		vm.Interrupt(platform.Irq(1), false)
		return nil
	}

	// Attach to the PciBus.
	return pcibus.AddDevice(pcidevice)
}
func (cache *IoCache) save(
	vm *platform.Vm,
	addr platform.Paddr,
	handler *IoHandler,
	ioevent IoEvent,
	offset uint64) error {

	// Do we have sufficient hits?
	if cache.hits[addr] < 100 {
		return nil
	}

	// Bind an eventfd.
	// Note that we pass in the exactly address here,
	// not the address associated with the IOHandler.
	boundfd, err := vm.NewBoundEventFd(
		addr,
		ioevent.Size(),
		cache.is_pio,
		true,
		ioevent.GetData())
	if err != nil || boundfd == nil {
		return err
	}

	// Create a fake event.
	// This is because the real event will actually
	// reach into the vcpu registers to get the data.
	fake_event := &WriteIoEvent{ioevent.Size(), ioevent.GetData()}

	// Run our function.
	go func(ioevent IoEvent) {

		for {
			// Wait for the next event.
			_, err := boundfd.Wait()
			if err != nil {
				break
			}

			// Call our function.
			// We keep handling this device the same
			// way until it tells us to stop by returning
			// anything other than the SaveIO error.
			err = handler.queue.Submit(ioevent, offset)
			if err != SaveIO {
				break
			}
		}

		// Finished with the eventfd.
		boundfd.Close()

	}(fake_event)

	// Success.
	return nil
}
Exemple #4
0
func (pit *Pit) Attach(vm *platform.Vm, model *Model) error {

	// Create our PIT.
	err := vm.CreatePit()
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #5
0
func (pit *Pit) Save(vm *platform.Vm) error {

	var err error

	// Save our Pit state.
	pit.Pit, err = vm.GetPit()
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #6
0
func (clock *Clock) Save(vm *platform.Vm) error {

	var err error

	// Save our clock state.
	clock.Clock, err = vm.GetClock()
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #7
0
func (apic *Apic) Save(vm *platform.Vm) error {

	var err error

	// Save our IrqChip state.
	apic.State, err = vm.GetIrqChip()
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #8
0
func (msix *MsiXDevice) Attach(vm *platform.Vm, model *Model) error {

	// Reset all transient links.
	// These may be lost in serialization.
	for _, entry := range msix.Entries {
		entry.MsiXDevice = msix
	}

	// Save our interrupt function.
	msix.msi_interrupt = func(addr platform.Paddr, data uint32) error {
		return vm.SignalMSI(addr, data, 0)
	}

	// Attach to the PciBus.
	return msix.PciDevice.Attach(vm, model)
}
Exemple #9
0
func (memory *MemoryMap) Reserve(
	vm *platform.Vm,
	device Device,
	memtype MemoryType,
	start platform.Paddr,
	size uint64,
	user []byte) error {

	// Verbose messages.
	device.Debug(
		"reserving (type: %d) of size %x in [%x,%x]",
		memtype,
		size,
		start,
		start.After(size-1))

	// Ensure all targets are aligned.
	if (start.Align(platform.PageSize, false) != start) ||
		(size%platform.PageSize != 0) {
		return MemoryUnaligned
	}

	// Ensure underlying map is aligned.
	// This may be harder to detect later on.
	if user != nil &&
		uintptr(unsafe.Pointer(&user[0]))%platform.PageSize != 0 {
		return MemoryUnaligned
	}

	// Add the region.
	region := &TypedMemoryRegion{
		MemoryRegion: MemoryRegion{start, size},
		MemoryType:   memtype,
		Device:       device,
		user:         user,
		allocated:    make(map[uint64]uint64),
	}
	err := memory.Add(region)
	if err != nil {
		return err
	}

	// Do the mapping.
	switch region.MemoryType {
	case MemoryTypeUser:
		err = vm.MapUserMemory(region.Start, region.Size, region.user)
	case MemoryTypeReserved:
		err = vm.MapReservedMemory(region.Start, region.Size)
	case MemoryTypeAcpi:
		err = vm.MapUserMemory(region.Start, region.Size, region.user)
	case MemoryTypeSpecial:
		err = vm.MapSpecialMemory(region.Start)
	}

	return err
}
Exemple #10
0
func (bios *Bios) Attach(vm *platform.Vm, model *Model) error {

	// Reserve our basic "BIOS" memory.
	// This is done simply to match expectations.
	// Nothing should be allocated in the first page.
	err := model.Reserve(
		vm,
		bios,
		MemoryTypeReserved,
		platform.Paddr(0), // Start.
		platform.PageSize, // Size.
		nil)
	if err != nil {
		return err
	}

	// Now reserve our TSS.
	err = model.Reserve(
		vm,
		bios,
		MemoryTypeSpecial,
		bios.TSSAddr,
		vm.SizeSpecialMemory(),
		nil)
	if err != nil {
		return err
	}

	// Finish the region.
	tss_end := bios.TSSAddr.After(vm.SizeSpecialMemory())
	err = model.Reserve(
		vm,
		bios,
		MemoryTypeReserved,
		tss_end,
		uint64(platform.Paddr(0x100000000)-tss_end),
		nil)
	if err != nil {
		return err
	}

	// We're good.
	return nil
}
Exemple #11
0
func SaveState(vm *platform.Vm, model *machine.Model) (State, error) {

	// Pause the vm.
	// NOTE: Our model will also be stopped automatically
	// with model.DeviceInfo() below, but we manually pause
	// the Vcpus here in order to ensure they are completely
	// stopped prior to saving all device state.
	err := vm.Pause(false)
	if err != nil {
		return State{}, err
	}
	defer vm.Unpause(false)

	// Grab our vcpu states.
	vcpus, err := vm.VcpuInfo()
	if err != nil {
		return State{}, err
	}

	// Grab our devices.
	// NOTE: This should block until devices have
	// actually quiesed (finished processing outstanding
	// requests generated by the VCPUs).
	devices, err := model.DeviceInfo(vm)
	if err != nil {
		return State{}, err
	}

	// Done.
	return State{Vcpus: vcpus, Devices: devices}, nil
}
Exemple #12
0
func (pit *Pit) Load(vm *platform.Vm) error {
	// Load state.
	return vm.SetPit(pit.Pit)
}
Exemple #13
0
func (clock *Clock) Load(vm *platform.Vm) error {
	// Load state.
	return vm.SetClock(clock.Clock)
}
Exemple #14
0
func (apic *Apic) Load(vm *platform.Vm) error {
	// Load state.
	return vm.SetIrqChip(apic.State)
}
Exemple #15
0
func (acpi *Acpi) Attach(vm *platform.Vm, model *Model) error {

	// Do we already have data?
	rebuild := true
	if acpi.Data == nil {
		// Create our data.
		acpi.Data = make([]byte, platform.PageSize, platform.PageSize)
	} else {
		// Align our data.
		// This is necessary because we map this in
		// directly. It's possible that the data was
		// decoded and refers to the middle of some
		// larger array somewhere, and isn't aligned.
		acpi.Data = platform.AlignBytes(acpi.Data)
		rebuild = false
	}

	// Allocate our memory block.
	err := model.Reserve(
		vm,
		acpi,
		MemoryTypeAcpi,
		acpi.Addr,
		platform.PageSize,
		acpi.Data)
	if err != nil {
		return err
	}

	// Already done.
	if !rebuild {
		return nil
	}

	// Find our APIC information.
	// This will find the APIC device if it
	// is attached, otherwise the MADT table
	// will unfortunately have be a bit invalid.
	var IOApic platform.Paddr
	var LApic platform.Paddr
	for _, device := range model.Devices() {
		apic, ok := device.(*Apic)
		if ok {
			IOApic = apic.IOApic
			LApic = apic.LApic
			break
		}
	}

	// Load the MADT.
	madt_bytes := C.build_madt(
		unsafe.Pointer(&acpi.Data[0]),
		C.__u32(LApic),
		C.int(len(vm.Vcpus())),
		C.__u32(IOApic),
		C.__u32(0), // I/O APIC interrupt?
	)
	acpi.Debug("MADT %x @ %x", madt_bytes, acpi.Addr)

	// Align offset.
	offset := madt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the DSDT.
	dsdt_address := uint64(acpi.Addr) + uint64(offset)
	dsdt_bytes := C.build_dsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
	)
	acpi.Debug("DSDT %x @ %x", dsdt_bytes, dsdt_address)

	// Align offset.
	offset += dsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the XSDT.
	xsdt_address := uint64(acpi.Addr) + uint64(offset)
	xsdt_bytes := C.build_xsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u64(acpi.Addr), // MADT address.
	)
	acpi.Debug("XSDT %x @ %x", xsdt_bytes, xsdt_address)

	// Align offset.
	offset += xsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDT.
	rsdt_address := uint64(acpi.Addr) + uint64(offset)
	rsdt_bytes := C.build_rsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(acpi.Addr), // MADT address.
	)
	acpi.Debug("RSDT %x @ %x", rsdt_bytes, rsdt_address)

	// Align offset.
	offset += rsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDP.
	rsdp_address := uint64(acpi.Addr) + uint64(offset)
	rsdp_bytes := C.build_rsdp(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(rsdt_address), // RSDT address.
		C.__u64(xsdt_address), // XSDT address.
	)
	acpi.Debug("RSDP %x @ %x", rsdp_bytes, rsdp_address)

	// Everything went okay.
	return nil
}