func (vm *Vm) MapUserMemory(
	start Paddr,
	size uint64,
	mmap []byte) error {

	// See NOTE above about read-only memory.
	// As we will not support it for the moment,
	// we do not expose it through the interface.
	// Leveraging that feature will likely require
	// a small amount of re-architecting in any case.
	var region C.struct_kvm_userspace_memory_region
	region.slot = C.__u32(vm.mem_region)
	region.flags = C.__u32(0)
	region.guest_phys_addr = C.__u64(start)
	region.memory_size = C.__u64(size)
	region.userspace_addr = C.__u64(uintptr(unsafe.Pointer(&mmap[0])))

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetUserMemoryRegion),
		uintptr(unsafe.Pointer(&region)))
	if e != 0 {
		return e
	}

	// We're set, bump our slot.
	vm.mem_region += 1
	return nil
}
func (vm *Vm) Interrupt(
	irq Irq,
	level bool) error {

	// Prepare the IRQ.
	var irq_level C.struct_irq_level
	irq_level.irq = C.__u32(irq)
	if level {
		irq_level.level = C.__u32(1)
	} else {
		irq_level.level = C.__u32(0)
	}

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlIrqLine),
		uintptr(unsafe.Pointer(&irq_level)))
	if e != 0 {
		return e
	}

	return nil
}
// sched_setattr(2)
func SetAttr(pid int, attr SchedAttr) error {
	cAttr := C.struct_sched_attr{
		C.__u32(C.SCHED_ATTR_SIZE),
		C.__u32(attr.Policy),
		C.__u64(attr.Flags),
		C.__s32(attr.Nice),
		C.__u32(attr.Priority),
		C.__u64(attr.Runtime.Nanoseconds()),
		C.__u64(attr.Deadline.Nanoseconds()),
		C.__u64(attr.Period.Nanoseconds()),
	}
	_, err := C.sched_setattr(C.pid_t(pid), &cAttr, C.uint(0))
	return err
}
Exemple #4
0
// GetQuota - get the quota limits of a directory that was configured with SetQuota
func (q *QuotaCtl) GetQuota(targetPath string, quota *Quota) error {

	projectID, ok := q.quotas[targetPath]
	if !ok {
		return fmt.Errorf("quota not found for path : %s", targetPath)
	}

	//
	// get the quota limit for the container's project id
	//
	var d C.fs_disk_quota_t

	var cs = C.CString(q.backingFsBlockDev)
	defer C.free(unsafe.Pointer(cs))

	_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA,
		uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
		uintptr(unsafe.Pointer(&d)), 0, 0)
	if errno != 0 {
		return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v",
			projectID, q.backingFsBlockDev, errno.Error())
	}
	quota.Size = uint64(d.d_blk_hardlimit) * 512

	return nil
}
func (vcpu *Vcpu) SetStepping(step bool) error {

	var guest_debug C.struct_kvm_guest_debug

	if step == vcpu.is_stepping {
		// Already set.
		return nil
	} else if step {
		guest_debug.control = C.__u32(C.IoctlGuestDebugEnable)
	} else {
		guest_debug.control = 0
	}

	// Execute our debug ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetGuestDebug),
		uintptr(unsafe.Pointer(&guest_debug)))
	if e != 0 {
		return e
	}

	// We're okay.
	vcpu.is_stepping = step
	return nil
}
func (vcpu *Vcpu) SetFpuState(state Fpu) error {

	// Prepare our data.
	var kvm_fpu C.struct_kvm_fpu
	for i := 0; i < len(state.FPR); i += 1 {
		for j := 0; j < len(state.FPR[i]); j += 1 {
			kvm_fpu.fpr[i][j] = C.__u8(state.FPR[i][j])
		}
	}
	kvm_fpu.fcw = C.__u16(state.FCW)
	kvm_fpu.fsw = C.__u16(state.FSW)
	kvm_fpu.ftwx = C.__u8(state.FTWX)
	kvm_fpu.last_opcode = C.__u16(state.LastOpcode)
	kvm_fpu.last_ip = C.__u64(state.LastIp)
	kvm_fpu.last_dp = C.__u64(state.LastDp)
	for i := 0; i < len(state.XMM); i += 1 {
		for j := 0; j < len(state.XMM[i]); j += 1 {
			kvm_fpu.xmm[i][j] = C.__u8(state.XMM[i][j])
		}
	}
	kvm_fpu.mxcsr = C.__u32(state.MXCSR)

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetFpu),
		uintptr(unsafe.Pointer(&kvm_fpu)))
	if e != 0 {
		return e
	}

	return nil
}
func (vcpu *Vcpu) SetCpuid(cpuids []Cpuid) error {

	// Initialize our cpuid data.
	cpuidData := make([]byte, PageSize, PageSize)
	for i, cpuid := range cpuids {
		e := C.cpuid_set(
			unsafe.Pointer(&cpuidData[0]),
			C.int(PageSize),
			C.int(i),
			C.__u32(cpuid.Function),
			C.__u32(cpuid.Index),
			C.__u32(cpuid.Flags),
			C.__u32(cpuid.EAX),
			C.__u32(cpuid.EBX),
			C.__u32(cpuid.ECX),
			C.__u32(cpuid.EDX))
		if e != 0 {
			return syscall.Errno(e)
		}
	}

	// Set our vcpuid.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetCpuid),
		uintptr(unsafe.Pointer(&cpuidData[0])))
	if e != 0 {
		return e
	}

	// We're good.
	vcpu.cpuid = cpuids
	return nil
}
func (vcpu *Vcpu) SetXcrs(xcrs []Xcr) error {

	// Build our parameter.
	var kvm_xcrs C.struct_kvm_xcrs
	kvm_xcrs.nr_xcrs = C.__u32(len(xcrs))
	for i, xcr := range xcrs {
		kvm_xcrs.xcrs[i].xcr = C.__u32(xcr.Id)
		kvm_xcrs.xcrs[i].value = C.__u64(xcr.Value)
	}

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetXcrs),
		uintptr(unsafe.Pointer(&kvm_xcrs)))
	if e != 0 {
		return e
	}

	return nil
}
func (vm *Vm) SetEventFd(
	eventfd *EventFd,
	paddr Paddr,
	size uint,
	is_pio bool,
	unbind bool,
	has_value bool,
	value uint64) error {

	var ioeventfd C.struct_kvm_ioeventfd
	ioeventfd.addr = C.__u64(paddr)
	ioeventfd.len = C.__u32(size)
	ioeventfd.fd = C.__s32(eventfd.Fd())
	ioeventfd.datamatch = C.__u64(value)

	if is_pio {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagPio)
	}
	if unbind {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDeassign)
	}
	if has_value {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDatamatch)
	}

	// Bind / unbind the eventfd.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlIoEventFd),
		uintptr(unsafe.Pointer(&ioeventfd)))
	if e != 0 {
		return e
	}

	// Success.
	return nil
}
func (vm *Vm) SignalMSI(
	addr Paddr,
	data uint32,
	flags uint32) error {

	// Prepare the MSI.
	var msi C.struct_kvm_msi
	msi.address_lo = C.__u32(addr & 0xffffffff)
	msi.address_hi = C.__u32(addr >> 32)
	msi.data = C.__u32(data)
	msi.flags = C.__u32(flags)

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSignalMsi),
		uintptr(unsafe.Pointer(&msi)))
	if e != 0 {
		return e
	}

	return nil
}
func (vcpu *Vcpu) SetMpState(state MpState) error {

	// Execute the ioctl.
	var kvm_state C.struct_kvm_mp_state
	kvm_state.mp_state = C.__u32(state)
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetMpState),
		uintptr(unsafe.Pointer(&kvm_state)))
	if e != 0 {
		return e
	}

	return nil
}
func (vm *Vm) SetClock(clock Clock) error {

	// Execute the ioctl.
	var kvm_clock_data C.struct_kvm_clock_data
	kvm_clock_data.clock = C.__u64(clock.Time)
	kvm_clock_data.flags = C.__u32(clock.Flags)
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetClock),
		uintptr(unsafe.Pointer(&kvm_clock_data)))
	if e != 0 {
		return e
	}

	return nil
}
func nativeCpuid(function uint32) Cpuid {

	var eax C.__u32
	var ebx C.__u32
	var ecx C.__u32
	var edx C.__u32

	// Query our native function.
	C.cpuid_native(C.__u32(function), &eax, &ebx, &ecx, &edx)

	// Transform.
	return Cpuid{
		Function: function,
		EAX:      uint32(eax),
		EBX:      uint32(ebx),
		ECX:      uint32(ecx),
		EDX:      uint32(edx)}
}
func (vcpu *Vcpu) SetXSave(state XSave) error {

	// Execute the ioctl.
	var kvm_xsave C.struct_kvm_xsave
	for i := 0; i < len(state.Region); i += 1 {
		kvm_xsave.region[i] = C.__u32(state.Region[i])
	}
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetXSave),
		uintptr(unsafe.Pointer(&kvm_xsave)))
	if e != 0 {
		return e
	}

	return nil
}
Exemple #15
0
func (vchannel *VirtioChannel) ProcessOutgoing() error {

	for buf := range vchannel.outgoing {
		// The device is active.
		vchannel.VirtioDevice.Acquire()

		// Put in the virtqueue.
		vchannel.Debug(
			"vqueue#%d outgoing slot [%d]",
			vchannel.Channel,
			buf.index)

		var evt_interrupt C.int
		var no_interrupt C.int
		C.vring_put_buf(
			&vchannel.vring,
			C.__u16(buf.index),
			C.__u32(buf.length),
			&evt_interrupt,
			&no_interrupt)

		if vchannel.HasFeatures(VirtioRingFEventIdx) {
			// This is used the event index.
			if evt_interrupt != C.int(0) {
				// Interrupt the guest.
				vchannel.Interrupt(true)
			}
		} else {
			// We have no event index.
			if no_interrupt == C.int(0) {
				// Interrupt the guest.
				vchannel.Interrupt(true)
			}
		}

		// Remove from our outstanding list.
		delete(vchannel.Outstanding, uint16(buf.index))

		// We can release until the next buffer comes back.
		vchannel.VirtioDevice.Release()
	}

	return nil
}
Exemple #16
0
func (vm *Vm) CreatePit() error {
	// Prepare the PIT config.
	// The only flag supported at the time of writing
	// was KVM_PIT_SPEAKER_DUMMY, which I really have no
	// interest in supporting.
	var pit C.struct_kvm_pit_config
	pit.flags = C.__u32(0)

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlCreatePit2),
		uintptr(unsafe.Pointer(&pit)))
	if e != 0 {
		return e
	}

	return nil
}
func (vcpu *Vcpu) SetMsr(index uint32, value uint64) error {

	// Setup our structure.
	data := make([]byte, C.msr_size(), C.msr_size())

	// Set our index and value.
	C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(value))

	// Execute our ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetMsrs),
		uintptr(unsafe.Pointer(&data[0])))
	if e != 0 {
		return e
	}

	return nil
}
func (vcpu *Vcpu) GetMsr(index uint32) (uint64, error) {

	// Setup our structure.
	data := make([]byte, C.msr_size(), C.msr_size())

	// Set our index to retrieve.
	C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(0))

	// Execute our ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlGetMsrs),
		uintptr(unsafe.Pointer(&data[0])))
	if e != 0 {
		return 0, e
	}

	// Return our value.
	return uint64(C.msr_get(unsafe.Pointer(&data[0]))), nil
}
Exemple #19
0
// setProjectQuota - set the quota for project id on xfs block device
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
	var d C.fs_disk_quota_t
	d.d_version = C.FS_DQUOT_VERSION
	d.d_id = C.__u32(projectID)
	d.d_flags = C.XFS_PROJ_QUOTA

	d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
	d.d_blk_hardlimit = C.__u64(quota.Size / 512)
	d.d_blk_softlimit = d.d_blk_hardlimit

	var cs = C.CString(backingFsBlockDev)
	defer C.free(unsafe.Pointer(cs))

	_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM,
		uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
		uintptr(unsafe.Pointer(&d)), 0, 0)
	if errno != 0 {
		return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
			projectID, backingFsBlockDev, errno.Error())
	}

	return nil
}
Exemple #20
0
// setProjectID - set the project id of path on xfs
func setProjectID(targetPath string, projectID uint32) error {
	dir, err := openDir(targetPath)
	if err != nil {
		return err
	}
	defer closeDir(dir)

	var fsx C.struct_fsxattr
	_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
		uintptr(unsafe.Pointer(&fsx)))
	if errno != 0 {
		return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
	}
	fsx.fsx_projid = C.__u32(projectID)
	fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
	_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
		uintptr(unsafe.Pointer(&fsx)))
	if errno != 0 {
		return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
	}

	return nil
}
Exemple #21
0
func findRootGen(dir *C.DIR) (uint64, error) {
	fd := getDirFd(dir)

	var maxFound uint64 = 0
	var inoArgs C.struct_btrfs_ioctl_ino_lookup_args
	var args C.struct_btrfs_ioctl_search_args
	var sk *C.struct_btrfs_ioctl_search_key = &args.key
	var sh C.struct_btrfs_ioctl_search_header

	inoArgs.objectid = C.BTRFS_FIRST_FREE_OBJECTID

	_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BTRFS_IOC_INO_LOOKUP, uintptr(unsafe.Pointer(&inoArgs)))
	if errno != 0 {
		return 0, fmt.Errorf("Failed to perform the inode lookup %v", errno.Error())
	}

	sk.tree_id = 1
	sk.min_objectid = inoArgs.treeid
	sk.max_objectid = inoArgs.treeid
	sk.max_type = C.BTRFS_ROOT_ITEM_KEY
	sk.min_type = C.BTRFS_ROOT_ITEM_KEY
	sk.max_offset = math.MaxUint64
	sk.max_transid = math.MaxUint64
	sk.nr_items = 4096

	for {
		_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args)))
		if errno != 0 {
			return 0, fmt.Errorf("Failed to perform the search %v", errno.Error())
		}

		if sk.nr_items == 0 {
			break
		}

		var off uintptr = 0

		for i := C.__u32(0); i < sk.nr_items; i++ {
			var item *C.struct_btrfs_root_item

			C.memcpy(unsafe.Pointer(&sh), addptr(unsafe.Pointer(&args.buf), off), C.sizeof_struct_btrfs_ioctl_search_header)
			off += C.sizeof_struct_btrfs_ioctl_search_header

			item = (*C.struct_btrfs_root_item)(unsafe.Pointer(&args.buf[off]))
			off += uintptr(sh.len)

			sk.min_objectid = sh.objectid
			sk.min_type = sh._type
			sk.min_offset = sh.offset

			if sh.objectid > inoArgs.treeid {
				break
			}

			if sh.objectid == inoArgs.treeid && sh._type == C.BTRFS_ROOT_ITEM_KEY {
				rootGeneration := item.generation
				if maxFound < uint64(rootGeneration) {
					maxFound = uint64(rootGeneration)
				}
			}
		}

		if sk.min_offset < math.MaxUint64 {
			sk.min_offset++
		} else {
			break
		}

		if sk.min_type != C.BTRFS_ROOT_ITEM_KEY {
			break
		}
		if sk.min_objectid != inoArgs.treeid {
			break
		}

	}

	return maxFound, nil
}
Exemple #22
0
func (vcpu *Vcpu) SetSegment(
	seg Segment,
	val SegmentValue,
	sync bool) error {

	err := vcpu.refreshSRegs(true)
	if err != nil {
		return err
	}

	switch seg {
	case CS:
		vcpu.sregs.cs.base = C.__u64(val.Base)
		vcpu.sregs.cs.limit = C.__u32(val.Limit)
		vcpu.sregs.cs.selector = C.__u16(val.Selector)
		vcpu.sregs.cs._type = C.__u8(val.Type)
		vcpu.sregs.cs.present = C.__u8(val.Present)
		vcpu.sregs.cs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.cs.db = C.__u8(val.Db)
		vcpu.sregs.cs.s = C.__u8(val.S)
		vcpu.sregs.cs.l = C.__u8(val.L)
		vcpu.sregs.cs.g = C.__u8(val.G)
		vcpu.sregs.cs.avl = C.__u8(val.Avl)
		vcpu.sregs.cs.unusable = C.__u8(^val.Present & 0x1)
	case DS:
		vcpu.sregs.ds.base = C.__u64(val.Base)
		vcpu.sregs.ds.limit = C.__u32(val.Limit)
		vcpu.sregs.ds.selector = C.__u16(val.Selector)
		vcpu.sregs.ds._type = C.__u8(val.Type)
		vcpu.sregs.ds.present = C.__u8(val.Present)
		vcpu.sregs.ds.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ds.db = C.__u8(val.Db)
		vcpu.sregs.ds.s = C.__u8(val.S)
		vcpu.sregs.ds.l = C.__u8(val.L)
		vcpu.sregs.ds.g = C.__u8(val.G)
		vcpu.sregs.ds.avl = C.__u8(val.Avl)
		vcpu.sregs.ds.unusable = C.__u8(^val.Present & 0x1)
	case ES:
		vcpu.sregs.es.base = C.__u64(val.Base)
		vcpu.sregs.es.limit = C.__u32(val.Limit)
		vcpu.sregs.es.selector = C.__u16(val.Selector)
		vcpu.sregs.es._type = C.__u8(val.Type)
		vcpu.sregs.es.present = C.__u8(val.Present)
		vcpu.sregs.es.dpl = C.__u8(val.Dpl)
		vcpu.sregs.es.db = C.__u8(val.Db)
		vcpu.sregs.es.s = C.__u8(val.S)
		vcpu.sregs.es.l = C.__u8(val.L)
		vcpu.sregs.es.g = C.__u8(val.G)
		vcpu.sregs.es.avl = C.__u8(val.Avl)
		vcpu.sregs.es.unusable = C.__u8(^val.Present & 0x1)
	case FS:
		vcpu.sregs.fs.base = C.__u64(val.Base)
		vcpu.sregs.fs.limit = C.__u32(val.Limit)
		vcpu.sregs.fs.selector = C.__u16(val.Selector)
		vcpu.sregs.fs._type = C.__u8(val.Type)
		vcpu.sregs.fs.present = C.__u8(val.Present)
		vcpu.sregs.fs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.fs.db = C.__u8(val.Db)
		vcpu.sregs.fs.s = C.__u8(val.S)
		vcpu.sregs.fs.l = C.__u8(val.L)
		vcpu.sregs.fs.g = C.__u8(val.G)
		vcpu.sregs.fs.avl = C.__u8(val.Avl)
		vcpu.sregs.fs.unusable = C.__u8(^val.Present & 0x1)
	case GS:
		vcpu.sregs.gs.base = C.__u64(val.Base)
		vcpu.sregs.gs.limit = C.__u32(val.Limit)
		vcpu.sregs.gs.selector = C.__u16(val.Selector)
		vcpu.sregs.gs._type = C.__u8(val.Type)
		vcpu.sregs.gs.present = C.__u8(val.Present)
		vcpu.sregs.gs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.gs.db = C.__u8(val.Db)
		vcpu.sregs.gs.s = C.__u8(val.S)
		vcpu.sregs.gs.l = C.__u8(val.L)
		vcpu.sregs.gs.g = C.__u8(val.G)
		vcpu.sregs.gs.avl = C.__u8(val.Avl)
		vcpu.sregs.gs.unusable = C.__u8(^val.Present & 0x1)
	case SS:
		vcpu.sregs.ss.base = C.__u64(val.Base)
		vcpu.sregs.ss.limit = C.__u32(val.Limit)
		vcpu.sregs.ss.selector = C.__u16(val.Selector)
		vcpu.sregs.ss._type = C.__u8(val.Type)
		vcpu.sregs.ss.present = C.__u8(val.Present)
		vcpu.sregs.ss.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ss.db = C.__u8(val.Db)
		vcpu.sregs.ss.s = C.__u8(val.S)
		vcpu.sregs.ss.l = C.__u8(val.L)
		vcpu.sregs.ss.g = C.__u8(val.G)
		vcpu.sregs.ss.avl = C.__u8(val.Avl)
		vcpu.sregs.ss.unusable = C.__u8(^val.Present & 0x1)
	case TR:
		vcpu.sregs.tr.base = C.__u64(val.Base)
		vcpu.sregs.tr.limit = C.__u32(val.Limit)
		vcpu.sregs.tr.selector = C.__u16(val.Selector)
		vcpu.sregs.tr._type = C.__u8(val.Type)
		vcpu.sregs.tr.present = C.__u8(val.Present)
		vcpu.sregs.tr.dpl = C.__u8(val.Dpl)
		vcpu.sregs.tr.db = C.__u8(val.Db)
		vcpu.sregs.tr.s = C.__u8(val.S)
		vcpu.sregs.tr.l = C.__u8(val.L)
		vcpu.sregs.tr.g = C.__u8(val.G)
		vcpu.sregs.tr.avl = C.__u8(val.Avl)
		vcpu.sregs.tr.unusable = C.__u8(^val.Present & 0x1)
	case LDT:
		vcpu.sregs.ldt.base = C.__u64(val.Base)
		vcpu.sregs.ldt.limit = C.__u32(val.Limit)
		vcpu.sregs.ldt.selector = C.__u16(val.Selector)
		vcpu.sregs.ldt._type = C.__u8(val.Type)
		vcpu.sregs.ldt.present = C.__u8(val.Present)
		vcpu.sregs.ldt.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ldt.db = C.__u8(val.Db)
		vcpu.sregs.ldt.s = C.__u8(val.S)
		vcpu.sregs.ldt.l = C.__u8(val.L)
		vcpu.sregs.ldt.g = C.__u8(val.G)
		vcpu.sregs.ldt.avl = C.__u8(val.Avl)
		vcpu.sregs.ldt.unusable = C.__u8(^val.Present & 0x1)
	default:
		return UnknownRegister
	}

	if sync {
		err = vcpu.flushSRegs()
		if err != nil {
			return err
		}
	}

	return nil
}
Exemple #23
0
func (acpi *Acpi) Attach(vm *platform.Vm, model *Model) error {

	// Do we already have data?
	rebuild := true
	if acpi.Data == nil {
		// Create our data.
		acpi.Data = make([]byte, platform.PageSize, platform.PageSize)
	} else {
		// Align our data.
		// This is necessary because we map this in
		// directly. It's possible that the data was
		// decoded and refers to the middle of some
		// larger array somewhere, and isn't aligned.
		acpi.Data = platform.AlignBytes(acpi.Data)
		rebuild = false
	}

	// Allocate our memory block.
	err := model.Reserve(
		vm,
		acpi,
		MemoryTypeAcpi,
		acpi.Addr,
		platform.PageSize,
		acpi.Data)
	if err != nil {
		return err
	}

	// Already done.
	if !rebuild {
		return nil
	}

	// Find our APIC information.
	// This will find the APIC device if it
	// is attached, otherwise the MADT table
	// will unfortunately have be a bit invalid.
	var IOApic platform.Paddr
	var LApic platform.Paddr
	for _, device := range model.Devices() {
		apic, ok := device.(*Apic)
		if ok {
			IOApic = apic.IOApic
			LApic = apic.LApic
			break
		}
	}

	// Load the MADT.
	madt_bytes := C.build_madt(
		unsafe.Pointer(&acpi.Data[0]),
		C.__u32(LApic),
		C.int(len(vm.Vcpus())),
		C.__u32(IOApic),
		C.__u32(0), // I/O APIC interrupt?
	)
	acpi.Debug("MADT %x @ %x", madt_bytes, acpi.Addr)

	// Align offset.
	offset := madt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the DSDT.
	dsdt_address := uint64(acpi.Addr) + uint64(offset)
	dsdt_bytes := C.build_dsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
	)
	acpi.Debug("DSDT %x @ %x", dsdt_bytes, dsdt_address)

	// Align offset.
	offset += dsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the XSDT.
	xsdt_address := uint64(acpi.Addr) + uint64(offset)
	xsdt_bytes := C.build_xsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u64(acpi.Addr), // MADT address.
	)
	acpi.Debug("XSDT %x @ %x", xsdt_bytes, xsdt_address)

	// Align offset.
	offset += xsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDT.
	rsdt_address := uint64(acpi.Addr) + uint64(offset)
	rsdt_bytes := C.build_rsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(acpi.Addr), // MADT address.
	)
	acpi.Debug("RSDT %x @ %x", rsdt_bytes, rsdt_address)

	// Align offset.
	offset += rsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDP.
	rsdp_address := uint64(acpi.Addr) + uint64(offset)
	rsdp_bytes := C.build_rsdp(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(rsdt_address), // RSDT address.
		C.__u64(xsdt_address), // XSDT address.
	)
	acpi.Debug("RSDP %x @ %x", rsdp_bytes, rsdp_address)

	// Everything went okay.
	return nil
}
func (vcpu *Vcpu) SetEvents(events Events) error {

	// Prepare our state.
	var kvm_events C.struct_kvm_vcpu_events

	if events.NmiPending {
		kvm_events.nmi.pending = C.__u8(1)
	} else {
		kvm_events.nmi.pending = C.__u8(0)
	}
	if events.NmiMasked {
		kvm_events.nmi.masked = C.__u8(1)
	} else {
		kvm_events.nmi.masked = C.__u8(0)
	}

	kvm_events.sipi_vector = C.__u32(events.SipiVector)
	kvm_events.flags = C.__u32(events.Flags)

	if events.Exception != nil {
		kvm_events.exception.injected = C.__u8(1)
		kvm_events.exception.nr = C.__u8(events.Exception.Number)
		if events.Exception.ErrorCode != nil {
			kvm_events.exception.has_error_code = C.__u8(1)
			kvm_events.exception.error_code = C.__u32(*events.Exception.ErrorCode)
		} else {
			kvm_events.exception.has_error_code = C.__u8(0)
			kvm_events.exception.error_code = C.__u32(0)
		}
	} else {
		kvm_events.exception.injected = C.__u8(0)
		kvm_events.exception.nr = C.__u8(0)
		kvm_events.exception.has_error_code = C.__u8(0)
		kvm_events.exception.error_code = C.__u32(0)
	}
	if events.Interrupt != nil {
		kvm_events.interrupt.injected = C.__u8(1)
		kvm_events.interrupt.nr = C.__u8(events.Interrupt.Number)
		if events.Interrupt.Soft {
			kvm_events.interrupt.soft = C.__u8(1)
		} else {
			kvm_events.interrupt.soft = C.__u8(0)
		}
		if events.Interrupt.Shadow {
			kvm_events.interrupt.shadow = C.__u8(1)
		} else {
			kvm_events.interrupt.shadow = C.__u8(0)
		}
	} else {
		kvm_events.interrupt.injected = C.__u8(0)
		kvm_events.interrupt.nr = C.__u8(0)
		kvm_events.interrupt.soft = C.__u8(0)
		kvm_events.interrupt.shadow = C.__u8(0)
	}

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetVcpuEvents),
		uintptr(unsafe.Pointer(&kvm_events)))
	if e != 0 {
		return e
	}

	return nil
}