Exemplo n.º 1
0
func (vcpu *Vcpu) SetFpuState(state Fpu) error {

	// Prepare our data.
	var kvm_fpu C.struct_kvm_fpu
	for i := 0; i < len(state.FPR); i += 1 {
		for j := 0; j < len(state.FPR[i]); j += 1 {
			kvm_fpu.fpr[i][j] = C.__u8(state.FPR[i][j])
		}
	}
	kvm_fpu.fcw = C.__u16(state.FCW)
	kvm_fpu.fsw = C.__u16(state.FSW)
	kvm_fpu.ftwx = C.__u8(state.FTWX)
	kvm_fpu.last_opcode = C.__u16(state.LastOpcode)
	kvm_fpu.last_ip = C.__u64(state.LastIp)
	kvm_fpu.last_dp = C.__u64(state.LastDp)
	for i := 0; i < len(state.XMM); i += 1 {
		for j := 0; j < len(state.XMM[i]); j += 1 {
			kvm_fpu.xmm[i][j] = C.__u8(state.XMM[i][j])
		}
	}
	kvm_fpu.mxcsr = C.__u32(state.MXCSR)

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetFpu),
		uintptr(unsafe.Pointer(&kvm_fpu)))
	if e != 0 {
		return e
	}

	return nil
}
Exemplo n.º 2
0
func (vm *Vm) MapUserMemory(
	start Paddr,
	size uint64,
	mmap []byte) error {

	// See NOTE above about read-only memory.
	// As we will not support it for the moment,
	// we do not expose it through the interface.
	// Leveraging that feature will likely require
	// a small amount of re-architecting in any case.
	var region C.struct_kvm_userspace_memory_region
	region.slot = C.__u32(vm.mem_region)
	region.flags = C.__u32(0)
	region.guest_phys_addr = C.__u64(start)
	region.memory_size = C.__u64(size)
	region.userspace_addr = C.__u64(uintptr(unsafe.Pointer(&mmap[0])))

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetUserMemoryRegion),
		uintptr(unsafe.Pointer(&region)))
	if e != 0 {
		return e
	}

	// We're set, bump our slot.
	vm.mem_region += 1
	return nil
}
Exemplo n.º 3
0
func (vcpu *Vcpu) SetDescriptor(
	desc Descriptor,
	val DescriptorValue,
	sync bool) error {

	err := vcpu.refreshSRegs(true)
	if err != nil {
		return err
	}

	switch desc {
	case GDT:
		vcpu.sregs.gdt.base = C.__u64(val.Base)
		vcpu.sregs.gdt.limit = C.__u16(val.Limit)
	case IDT:
		vcpu.sregs.idt.base = C.__u64(val.Base)
		vcpu.sregs.idt.limit = C.__u16(val.Limit)
	default:
		return UnknownRegister
	}

	if sync {
		err = vcpu.flushSRegs()
		if err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 4
0
func ffiPropertySetReadonly(path string, readOnly bool) error {
	var flags C.__u64
	if readOnly {
		flags |= C.__u64(C.BTRFS_SUBVOL_RDONLY)
	} else {
		flags = flags &^ C.__u64(C.BTRFS_SUBVOL_RDONLY)
	}
	return ffiIoctl(path, C.BTRFS_IOC_SUBVOL_SETFLAGS, uintptr(unsafe.Pointer(&flags)))
}
Exemplo n.º 5
0
func (ila *inodeLookupArgs) C() C.struct_btrfs_ioctl_ino_lookup_args {
	var args C.struct_btrfs_ioctl_ino_lookup_args
	args.objectid = C.__u64(ila.ObjectID)
	args.treeid = C.__u64(ila.TreeID)
	if ila.Name != "" {
		str := [C.BTRFS_INO_LOOKUP_PATH_MAX]C.char{}
		for i := 0; i < len(ila.Name) && i < C.BTRFS_INO_LOOKUP_PATH_MAX; i++ {
			str[i] = C.char(ila.Name[i])
		}
		args.name = str
	}
	return args
}
Exemplo n.º 6
0
// sched_setattr(2)
func SetAttr(pid int, attr SchedAttr) error {
	cAttr := C.struct_sched_attr{
		C.__u32(C.SCHED_ATTR_SIZE),
		C.__u32(attr.Policy),
		C.__u64(attr.Flags),
		C.__s32(attr.Nice),
		C.__u32(attr.Priority),
		C.__u64(attr.Runtime.Nanoseconds()),
		C.__u64(attr.Deadline.Nanoseconds()),
		C.__u64(attr.Period.Nanoseconds()),
	}
	_, err := C.sched_setattr(C.pid_t(pid), &cAttr, C.uint(0))
	return err
}
Exemplo n.º 7
0
func huurr(dirpath string) ([]string, error) {
	inoArgs, err := inodeLookup(dirpath)
	if err != nil {
		return nil, err
	}

	var searchKey C.struct_btrfs_ioctl_search_key
	searchKey.min_objectid = C.__u64(inoArgs.TreeID)
	searchKey.max_objectid = C.__u64(inoArgs.TreeID)
	searchKey.min_type = C.BTRFS_ROOT_ITEM_KEY
	searchKey.max_type = C.BTRFS_ROOT_ITEM_KEY
	searchKey.max_offset = (1<<48 - 1)
	searchKey.max_transid = (1<<48 - 1)

	return nil, nil
}
Exemplo n.º 8
0
func (vm *Vm) SetEventFd(
	eventfd *EventFd,
	paddr Paddr,
	size uint,
	is_pio bool,
	unbind bool,
	has_value bool,
	value uint64) error {

	var ioeventfd C.struct_kvm_ioeventfd
	ioeventfd.addr = C.__u64(paddr)
	ioeventfd.len = C.__u32(size)
	ioeventfd.fd = C.__s32(eventfd.Fd())
	ioeventfd.datamatch = C.__u64(value)

	if is_pio {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagPio)
	}
	if unbind {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDeassign)
	}
	if has_value {
		ioeventfd.flags |= C.__u32(C.IoctlIoEventFdFlagDatamatch)
	}

	// Bind / unbind the eventfd.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlIoEventFd),
		uintptr(unsafe.Pointer(&ioeventfd)))
	if e != 0 {
		return e
	}

	// Success.
	return nil
}
Exemplo n.º 9
0
func ffiSubvolumeSnapshot(src string, dest string, readOnly bool) error {
	srcDir, err := ffiOpenDir(src)
	if err != nil {
		return err
	}
	defer ffiCloseDir(srcDir)
	var args C.struct_btrfs_ioctl_vol_args_v2
	args.fd = C.__s64(ffiGetDirFd(srcDir))
	if readOnly {
		args.flags |= C.__u64(C.BTRFS_SUBVOL_RDONLY)
	}
	for i, c := range []byte(filepath.Base(dest)) {
		args.name[i] = C.char(c)
	}
	return ffiIoctl(filepath.Dir(dest), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args)))
}
Exemplo n.º 10
0
func (vm *Vm) SetClock(clock Clock) error {

	// Execute the ioctl.
	var kvm_clock_data C.struct_kvm_clock_data
	kvm_clock_data.clock = C.__u64(clock.Time)
	kvm_clock_data.flags = C.__u32(clock.Flags)
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetClock),
		uintptr(unsafe.Pointer(&kvm_clock_data)))
	if e != 0 {
		return e
	}

	return nil
}
Exemplo n.º 11
0
func subvolLimitQgroup(path string, size uint64) error {
	dir, err := openDir(path)
	if err != nil {
		return err
	}
	defer closeDir(dir)

	var args C.struct_btrfs_ioctl_qgroup_limit_args
	args.lim.max_referenced = C.__u64(size)
	args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
	_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
		uintptr(unsafe.Pointer(&args)))
	if errno != 0 {
		return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
	}

	return nil
}
Exemplo n.º 12
0
func (vm *Vm) MapSpecialMemory(addr Paddr) error {

	// We require 1 page for the identity map.
	err := vm.MapReservedMemory(addr, PageSize)
	if err != nil {
		return err
	}

	// Set the EPT identity map.
	// (This requires a single page).
	ept_identity_addr := C.__u64(addr)
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetIdentityMapAddr),
		uintptr(unsafe.Pointer(&ept_identity_addr)))
	if e != 0 {
		log.Printf("Unable to set identity map to %08x!", addr)
		return e
	}

	// We require 3 pages for the TSS address.
	err = vm.MapReservedMemory(addr+PageSize, 3*PageSize)
	if err != nil {
		return err
	}

	// Set the TSS address to above.
	// (This requires three pages).
	_, _, e = syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vm.fd),
		uintptr(C.IoctlSetTssAddr),
		uintptr(addr+PageSize))
	if e != 0 {
		log.Printf("Unable to set TSS ADDR to %08x!", addr+PageSize)
		return e
	}

	// We're okay.
	return nil
}
Exemplo n.º 13
0
func (vcpu *Vcpu) SetMsr(index uint32, value uint64) error {

	// Setup our structure.
	data := make([]byte, C.msr_size(), C.msr_size())

	// Set our index and value.
	C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(value))

	// Execute our ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetMsrs),
		uintptr(unsafe.Pointer(&data[0])))
	if e != 0 {
		return e
	}

	return nil
}
Exemplo n.º 14
0
func (vcpu *Vcpu) GetMsr(index uint32) (uint64, error) {

	// Setup our structure.
	data := make([]byte, C.msr_size(), C.msr_size())

	// Set our index to retrieve.
	C.msr_set(unsafe.Pointer(&data[0]), C.__u32(index), C.__u64(0))

	// Execute our ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlGetMsrs),
		uintptr(unsafe.Pointer(&data[0])))
	if e != 0 {
		return 0, e
	}

	// Return our value.
	return uint64(C.msr_get(unsafe.Pointer(&data[0]))), nil
}
Exemplo n.º 15
0
func (vcpu *Vcpu) Translate(
	vaddr Vaddr) (Paddr, bool, bool, bool, error) {

	// Perform the translation.
	var translation C.struct_kvm_translation
	translation.linear_address = C.__u64(vaddr)
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlTranslate),
		uintptr(unsafe.Pointer(&translation)))
	if e != 0 {
		return Paddr(0), false, false, false, e
	}

	paddr := Paddr(translation.physical_address)
	valid := translation.valid != C.__u8(0)
	writeable := translation.writeable != C.__u8(0)
	usermode := translation.valid != C.__u8(0)

	return paddr, valid, writeable, usermode, nil
}
Exemplo n.º 16
0
func (vcpu *Vcpu) SetXcrs(xcrs []Xcr) error {

	// Build our parameter.
	var kvm_xcrs C.struct_kvm_xcrs
	kvm_xcrs.nr_xcrs = C.__u32(len(xcrs))
	for i, xcr := range xcrs {
		kvm_xcrs.xcrs[i].xcr = C.__u32(xcr.Id)
		kvm_xcrs.xcrs[i].value = C.__u64(xcr.Value)
	}

	// Execute the ioctl.
	_, _, e := syscall.Syscall(
		syscall.SYS_IOCTL,
		uintptr(vcpu.fd),
		uintptr(C.IoctlSetXcrs),
		uintptr(unsafe.Pointer(&kvm_xcrs)))
	if e != 0 {
		return e
	}

	return nil
}
Exemplo n.º 17
0
// setProjectQuota - set the quota for project id on xfs block device
func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error {
	var d C.fs_disk_quota_t
	d.d_version = C.FS_DQUOT_VERSION
	d.d_id = C.__u32(projectID)
	d.d_flags = C.XFS_PROJ_QUOTA

	d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT
	d.d_blk_hardlimit = C.__u64(quota.Size / 512)
	d.d_blk_softlimit = d.d_blk_hardlimit

	var cs = C.CString(backingFsBlockDev)
	defer C.free(unsafe.Pointer(cs))

	_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM,
		uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
		uintptr(unsafe.Pointer(&d)), 0, 0)
	if errno != 0 {
		return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v",
			projectID, backingFsBlockDev, errno.Error())
	}

	return nil
}
Exemplo n.º 18
0
func (vcpu *Vcpu) SetControlRegister(
	reg ControlRegister,
	val ControlRegisterValue,
	sync bool) error {

	err := vcpu.refreshSRegs(true)
	if err != nil {
		return err
	}

	switch reg {
	case CR0:
		vcpu.sregs.cr0 = C.__u64(val)
	case CR2:
		vcpu.sregs.cr2 = C.__u64(val)
	case CR3:
		vcpu.sregs.cr3 = C.__u64(val)
	case CR4:
		vcpu.sregs.cr4 = C.__u64(val)
	case CR8:
		vcpu.sregs.cr8 = C.__u64(val)
	case EFER:
		vcpu.sregs.efer = C.__u64(val)
	case APIC_BASE:
		vcpu.sregs.apic_base = C.__u64(val)
	default:
		return UnknownRegister
	}

	if sync {
		err = vcpu.flushSRegs()
		if err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 19
0
Arquivo: ioctl.go Projeto: plar/btrfs
func findUpdatedFiles(dir *C.DIR, rootId, oldestGen uint64) (uint64, error) {
	var maxFound uint64 = 0

	var args C.struct_btrfs_ioctl_search_args
	var sk *C.struct_btrfs_ioctl_search_key = &args.key
	var sh C.struct_btrfs_ioctl_search_header
	var item *BtrfsFileExtentItem
	var backup BtrfsFileExtentItem

	var foundGen uint64 = 0

	sk.tree_id = C.__u64(rootId)
	sk.max_objectid = math.MaxUint64
	sk.max_offset = math.MaxUint64
	sk.max_transid = math.MaxUint64
	sk.max_type = C.BTRFS_EXTENT_DATA_KEY
	sk.min_transid = C.__u64(oldestGen)
	sk.nr_items = 4096

	fd := getDirFd(dir)

	maxFound, err := findRootGen(dir)
	if err != nil {
		return 0, err
	}

	for {
		_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, C.BTRFS_IOC_TREE_SEARCH, uintptr(unsafe.Pointer(&args)))
		if errno != 0 {
			return 0, fmt.Errorf("Failed to perform the search %v", errno.Error())
		}

		if sk.nr_items == 0 {
			break
		}

		var off uintptr = 0

		for i := uint32(0); i < uint32(sk.nr_items); i++ {
			C.memcpy(unsafe.Pointer(&sh), addptr(unsafe.Pointer(&args.buf), off), C.sizeof_struct_btrfs_ioctl_search_header)
			off += C.sizeof_struct_btrfs_ioctl_search_header

			if sh.len == 0 {
				item = &backup
			} else {
				rawItem := (*C.struct_btrfs_file_extent_item)(addptr(unsafe.Pointer(&args.buf), off))
				item, err = NewBtrfsFileExtentItem(rawItem)
				if err != nil {
					return 0, err
				}
			}

			foundGen = item.Generation
			if sh._type == C.BTRFS_EXTENT_DATA_KEY && foundGen >= uint64(oldestGen) {
				// print...
			}

			off += uintptr(sh.len)

			sk.min_objectid = sh.objectid
			sk.min_offset = sh.offset
			sk.min_type = sh._type
		}

		sk.nr_items = 4096
		if sk.min_offset < math.MaxUint64 {
			sk.min_offset++
		} else if sk.min_objectid < math.MaxUint64 {
			sk.min_objectid++
			sk.min_offset = 0
			sk.min_type = 0

		} else {
			break
		}
	}

	return maxFound, nil
}
Exemplo n.º 20
0
func (vcpu *Vcpu) SetSegment(
	seg Segment,
	val SegmentValue,
	sync bool) error {

	err := vcpu.refreshSRegs(true)
	if err != nil {
		return err
	}

	switch seg {
	case CS:
		vcpu.sregs.cs.base = C.__u64(val.Base)
		vcpu.sregs.cs.limit = C.__u32(val.Limit)
		vcpu.sregs.cs.selector = C.__u16(val.Selector)
		vcpu.sregs.cs._type = C.__u8(val.Type)
		vcpu.sregs.cs.present = C.__u8(val.Present)
		vcpu.sregs.cs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.cs.db = C.__u8(val.Db)
		vcpu.sregs.cs.s = C.__u8(val.S)
		vcpu.sregs.cs.l = C.__u8(val.L)
		vcpu.sregs.cs.g = C.__u8(val.G)
		vcpu.sregs.cs.avl = C.__u8(val.Avl)
		vcpu.sregs.cs.unusable = C.__u8(^val.Present & 0x1)
	case DS:
		vcpu.sregs.ds.base = C.__u64(val.Base)
		vcpu.sregs.ds.limit = C.__u32(val.Limit)
		vcpu.sregs.ds.selector = C.__u16(val.Selector)
		vcpu.sregs.ds._type = C.__u8(val.Type)
		vcpu.sregs.ds.present = C.__u8(val.Present)
		vcpu.sregs.ds.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ds.db = C.__u8(val.Db)
		vcpu.sregs.ds.s = C.__u8(val.S)
		vcpu.sregs.ds.l = C.__u8(val.L)
		vcpu.sregs.ds.g = C.__u8(val.G)
		vcpu.sregs.ds.avl = C.__u8(val.Avl)
		vcpu.sregs.ds.unusable = C.__u8(^val.Present & 0x1)
	case ES:
		vcpu.sregs.es.base = C.__u64(val.Base)
		vcpu.sregs.es.limit = C.__u32(val.Limit)
		vcpu.sregs.es.selector = C.__u16(val.Selector)
		vcpu.sregs.es._type = C.__u8(val.Type)
		vcpu.sregs.es.present = C.__u8(val.Present)
		vcpu.sregs.es.dpl = C.__u8(val.Dpl)
		vcpu.sregs.es.db = C.__u8(val.Db)
		vcpu.sregs.es.s = C.__u8(val.S)
		vcpu.sregs.es.l = C.__u8(val.L)
		vcpu.sregs.es.g = C.__u8(val.G)
		vcpu.sregs.es.avl = C.__u8(val.Avl)
		vcpu.sregs.es.unusable = C.__u8(^val.Present & 0x1)
	case FS:
		vcpu.sregs.fs.base = C.__u64(val.Base)
		vcpu.sregs.fs.limit = C.__u32(val.Limit)
		vcpu.sregs.fs.selector = C.__u16(val.Selector)
		vcpu.sregs.fs._type = C.__u8(val.Type)
		vcpu.sregs.fs.present = C.__u8(val.Present)
		vcpu.sregs.fs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.fs.db = C.__u8(val.Db)
		vcpu.sregs.fs.s = C.__u8(val.S)
		vcpu.sregs.fs.l = C.__u8(val.L)
		vcpu.sregs.fs.g = C.__u8(val.G)
		vcpu.sregs.fs.avl = C.__u8(val.Avl)
		vcpu.sregs.fs.unusable = C.__u8(^val.Present & 0x1)
	case GS:
		vcpu.sregs.gs.base = C.__u64(val.Base)
		vcpu.sregs.gs.limit = C.__u32(val.Limit)
		vcpu.sregs.gs.selector = C.__u16(val.Selector)
		vcpu.sregs.gs._type = C.__u8(val.Type)
		vcpu.sregs.gs.present = C.__u8(val.Present)
		vcpu.sregs.gs.dpl = C.__u8(val.Dpl)
		vcpu.sregs.gs.db = C.__u8(val.Db)
		vcpu.sregs.gs.s = C.__u8(val.S)
		vcpu.sregs.gs.l = C.__u8(val.L)
		vcpu.sregs.gs.g = C.__u8(val.G)
		vcpu.sregs.gs.avl = C.__u8(val.Avl)
		vcpu.sregs.gs.unusable = C.__u8(^val.Present & 0x1)
	case SS:
		vcpu.sregs.ss.base = C.__u64(val.Base)
		vcpu.sregs.ss.limit = C.__u32(val.Limit)
		vcpu.sregs.ss.selector = C.__u16(val.Selector)
		vcpu.sregs.ss._type = C.__u8(val.Type)
		vcpu.sregs.ss.present = C.__u8(val.Present)
		vcpu.sregs.ss.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ss.db = C.__u8(val.Db)
		vcpu.sregs.ss.s = C.__u8(val.S)
		vcpu.sregs.ss.l = C.__u8(val.L)
		vcpu.sregs.ss.g = C.__u8(val.G)
		vcpu.sregs.ss.avl = C.__u8(val.Avl)
		vcpu.sregs.ss.unusable = C.__u8(^val.Present & 0x1)
	case TR:
		vcpu.sregs.tr.base = C.__u64(val.Base)
		vcpu.sregs.tr.limit = C.__u32(val.Limit)
		vcpu.sregs.tr.selector = C.__u16(val.Selector)
		vcpu.sregs.tr._type = C.__u8(val.Type)
		vcpu.sregs.tr.present = C.__u8(val.Present)
		vcpu.sregs.tr.dpl = C.__u8(val.Dpl)
		vcpu.sregs.tr.db = C.__u8(val.Db)
		vcpu.sregs.tr.s = C.__u8(val.S)
		vcpu.sregs.tr.l = C.__u8(val.L)
		vcpu.sregs.tr.g = C.__u8(val.G)
		vcpu.sregs.tr.avl = C.__u8(val.Avl)
		vcpu.sregs.tr.unusable = C.__u8(^val.Present & 0x1)
	case LDT:
		vcpu.sregs.ldt.base = C.__u64(val.Base)
		vcpu.sregs.ldt.limit = C.__u32(val.Limit)
		vcpu.sregs.ldt.selector = C.__u16(val.Selector)
		vcpu.sregs.ldt._type = C.__u8(val.Type)
		vcpu.sregs.ldt.present = C.__u8(val.Present)
		vcpu.sregs.ldt.dpl = C.__u8(val.Dpl)
		vcpu.sregs.ldt.db = C.__u8(val.Db)
		vcpu.sregs.ldt.s = C.__u8(val.S)
		vcpu.sregs.ldt.l = C.__u8(val.L)
		vcpu.sregs.ldt.g = C.__u8(val.G)
		vcpu.sregs.ldt.avl = C.__u8(val.Avl)
		vcpu.sregs.ldt.unusable = C.__u8(^val.Present & 0x1)
	default:
		return UnknownRegister
	}

	if sync {
		err = vcpu.flushSRegs()
		if err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 21
0
func (vcpu *Vcpu) SetRegister(reg Register, val RegisterValue) error {
	err := vcpu.refreshRegs(true)
	if err != nil {
		return err
	}

	switch reg {
	case RAX:
		vcpu.regs.rax = C.__u64(val)
	case RBX:
		vcpu.regs.rbx = C.__u64(val)
	case RCX:
		vcpu.regs.rcx = C.__u64(val)
	case RDX:
		vcpu.regs.rdx = C.__u64(val)
	case RSI:
		vcpu.regs.rsi = C.__u64(val)
	case RDI:
		vcpu.regs.rdi = C.__u64(val)
	case RSP:
		vcpu.regs.rsp = C.__u64(val)
	case RBP:
		vcpu.regs.rbp = C.__u64(val)
	case R8:
		vcpu.regs.r8 = C.__u64(val)
	case R9:
		vcpu.regs.r9 = C.__u64(val)
	case R10:
		vcpu.regs.r10 = C.__u64(val)
	case R11:
		vcpu.regs.r11 = C.__u64(val)
	case R12:
		vcpu.regs.r12 = C.__u64(val)
	case R13:
		vcpu.regs.r13 = C.__u64(val)
	case R14:
		vcpu.regs.r14 = C.__u64(val)
	case R15:
		vcpu.regs.r15 = C.__u64(val)
	case RIP:
		vcpu.regs.rip = C.__u64(val)
	case RFLAGS:
		vcpu.regs.rflags = C.__u64(val)
	default:
		return UnknownRegister
	}

	return nil
}
Exemplo n.º 22
0
func (acpi *Acpi) Attach(vm *platform.Vm, model *Model) error {

	// Do we already have data?
	rebuild := true
	if acpi.Data == nil {
		// Create our data.
		acpi.Data = make([]byte, platform.PageSize, platform.PageSize)
	} else {
		// Align our data.
		// This is necessary because we map this in
		// directly. It's possible that the data was
		// decoded and refers to the middle of some
		// larger array somewhere, and isn't aligned.
		acpi.Data = platform.AlignBytes(acpi.Data)
		rebuild = false
	}

	// Allocate our memory block.
	err := model.Reserve(
		vm,
		acpi,
		MemoryTypeAcpi,
		acpi.Addr,
		platform.PageSize,
		acpi.Data)
	if err != nil {
		return err
	}

	// Already done.
	if !rebuild {
		return nil
	}

	// Find our APIC information.
	// This will find the APIC device if it
	// is attached, otherwise the MADT table
	// will unfortunately have be a bit invalid.
	var IOApic platform.Paddr
	var LApic platform.Paddr
	for _, device := range model.Devices() {
		apic, ok := device.(*Apic)
		if ok {
			IOApic = apic.IOApic
			LApic = apic.LApic
			break
		}
	}

	// Load the MADT.
	madt_bytes := C.build_madt(
		unsafe.Pointer(&acpi.Data[0]),
		C.__u32(LApic),
		C.int(len(vm.Vcpus())),
		C.__u32(IOApic),
		C.__u32(0), // I/O APIC interrupt?
	)
	acpi.Debug("MADT %x @ %x", madt_bytes, acpi.Addr)

	// Align offset.
	offset := madt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the DSDT.
	dsdt_address := uint64(acpi.Addr) + uint64(offset)
	dsdt_bytes := C.build_dsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
	)
	acpi.Debug("DSDT %x @ %x", dsdt_bytes, dsdt_address)

	// Align offset.
	offset += dsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the XSDT.
	xsdt_address := uint64(acpi.Addr) + uint64(offset)
	xsdt_bytes := C.build_xsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u64(acpi.Addr), // MADT address.
	)
	acpi.Debug("XSDT %x @ %x", xsdt_bytes, xsdt_address)

	// Align offset.
	offset += xsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDT.
	rsdt_address := uint64(acpi.Addr) + uint64(offset)
	rsdt_bytes := C.build_rsdt(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(acpi.Addr), // MADT address.
	)
	acpi.Debug("RSDT %x @ %x", rsdt_bytes, rsdt_address)

	// Align offset.
	offset += rsdt_bytes
	if offset%64 != 0 {
		offset += 64 - (offset % 64)
	}

	// Load the RSDP.
	rsdp_address := uint64(acpi.Addr) + uint64(offset)
	rsdp_bytes := C.build_rsdp(
		unsafe.Pointer(&acpi.Data[int(offset)]),
		C.__u32(rsdt_address), // RSDT address.
		C.__u64(xsdt_address), // XSDT address.
	)
	acpi.Debug("RSDP %x @ %x", rsdp_bytes, rsdp_address)

	// Everything went okay.
	return nil
}
Exemplo n.º 23
0
func (vchannel *VirtioChannel) processOne(n uint16) error {

	var buf *VirtioBuffer
	var addr C.__u64
	var length C.__u32
	var buf_flags C.__u16
	var next C.__u16
	index := C.__u16(n)

	vchannel.Debug(
		"vqueue#%d incoming slot [%d]",
		vchannel.Channel,
		index)

	for {
		// Read the entry.
		C.vring_get_index(
			&vchannel.vring,
			index,
			&addr,
			&length,
			&buf_flags,
			&next)

		// Append our buffer.
		has_next := (buf_flags & C.__u16(C.VirtioDescFNext)) != C.__u16(0)
		is_write := (buf_flags & C.__u16(C.VirtioDescFWrite)) != C.__u16(0)
		is_indirect := (buf_flags & C.__u16(C.VirtioDescFIndirect)) != C.__u16(0)

		// Do we have a buffer?
		if buf == nil {
			buf = NewVirtioBuffer(uint16(index), !is_write)
		}

		if is_indirect {
			// FIXME: Map all indirect buffers.
			log.Printf("WARNING: Indirect buffers not supported.")

		} else {
			// Map the given address.
			vchannel.Debug("vqueue#%d map [%x-%x]",
				vchannel.Channel,
				platform.Paddr(addr),
				uint64(addr)+uint64(length)-1)

			data, err := vchannel.VirtioDevice.mmap(
				platform.Paddr(addr),
				uint64(length))

			if err != nil {
				log.Printf(
					"Unable to map [%x,%x]? Flags are %x, next is %x.",
					addr,
					addr+C.__u64(length)-1,
					buf_flags,
					next)
				return err
			}

			// Append this segment.
			buf.Append(data)
		}

		// Are we finished?
		if !has_next {
			// Send these buffers.
			vchannel.Debug(
				"vqueue#%d processing slot [%d]",
				vchannel.Channel,
				buf.index)

			// Mark this as outstanding.
			vchannel.Outstanding[uint16(buf.index)] = true
			vchannel.incoming <- buf
			break

		} else {
			// Keep chaining.
			index = next
			vchannel.Debug(
				"vqueue#%d next slot [%d]",
				vchannel.Channel,
				index)
			continue
		}
	}

	// We're good.
	return nil
}
Exemplo n.º 24
0
func SetupLinux(
	vcpu *platform.Vcpu,
	model *machine.Model,
	orig_boot_data []byte,
	entry_point uint64,
	is_64bit bool,
	initrd_addr platform.Paddr,
	initrd_len uint64,
	cmdline_addr platform.Paddr) error {

	// Copy in the GDT table.
	// These match the segments below.
	gdt_addr, gdt, err := model.Allocate(
		machine.MemoryTypeUser,
		0,                 // Start.
		model.Max(),       // End.
		platform.PageSize, // Size.
		false)             // From bottom.
	if err != nil {
		return err
	}
	if is_64bit {
		C.build_64bit_gdt(unsafe.Pointer(&gdt[0]))
	} else {
		C.build_32bit_gdt(unsafe.Pointer(&gdt[0]))
	}

	BootGdt := platform.DescriptorValue{
		Base:  uint64(gdt_addr),
		Limit: uint16(platform.PageSize),
	}
	err = vcpu.SetDescriptor(platform.GDT, BootGdt, true)
	if err != nil {
		return err
	}

	// Set a null IDT.
	BootIdt := platform.DescriptorValue{
		Base:  0,
		Limit: 0,
	}
	err = vcpu.SetDescriptor(platform.IDT, BootIdt, true)
	if err != nil {
		return err
	}

	// Enable protected-mode.
	// This does not set any flags (e.g. paging) beyond the
	// protected mode flag. This is according to Linux entry
	// protocol for 32-bit protected mode.
	cr0, err := vcpu.GetControlRegister(platform.CR0)
	if err != nil {
		return err
	}
	cr0 = cr0 | (1 << 0) // Protected mode.
	err = vcpu.SetControlRegister(platform.CR0, cr0, true)
	if err != nil {
		return err
	}

	// Always have the PAE bit set.
	cr4, err := vcpu.GetControlRegister(platform.CR4)
	if err != nil {
		return err
	}
	cr4 = cr4 | (1 << 5) // PAE enabled.
	err = vcpu.SetControlRegister(platform.CR4, cr4, true)
	if err != nil {
		return err
	}

	// For 64-bit kernels, we need to enable long mode,
	// and load an identity page table. This will require
	// only a page of pages, as we use huge page sizes.
	if is_64bit {
		// Create our page tables.
		pde_addr, pde, err := model.Allocate(
			machine.MemoryTypeUser,
			0,                 // Start.
			model.Max(),       // End.
			platform.PageSize, // Size.
			false)             // From bottom.
		if err != nil {
			return err
		}
		pgd_addr, pgd, err := model.Allocate(
			machine.MemoryTypeUser,
			0,                 // Start.
			model.Max(),       // End.
			platform.PageSize, // Size.
			false)             // From bottom.
		if err != nil {
			return err
		}
		pml4_addr, pml4, err := model.Allocate(
			machine.MemoryTypeUser,
			0,                 // Start.
			model.Max(),       // End.
			platform.PageSize, // Size.
			false)             // From bottom.
		if err != nil {
			return err
		}

		C.build_pde(unsafe.Pointer(&pde[0]), platform.PageSize)
		C.build_pgd(unsafe.Pointer(&pgd[0]), C.__u64(pde_addr), platform.PageSize)
		C.build_pml4(unsafe.Pointer(&pml4[0]), C.__u64(pgd_addr), platform.PageSize)

		log.Printf("loader: Created PDE @ %08x.", pde_addr)
		log.Printf("loader: Created PGD @ %08x.", pgd_addr)
		log.Printf("loader: Created PML4 @ %08x.", pml4_addr)

		// Set our newly build page table.
		err = vcpu.SetControlRegister(
			platform.CR3,
			platform.ControlRegisterValue(pml4_addr),
			true)
		if err != nil {
			return err
		}

		// Enable long mode.
		efer, err := vcpu.GetControlRegister(platform.EFER)
		if err != nil {
			return err
		}
		efer = efer | (1 << 8) // Long-mode enable.
		err = vcpu.SetControlRegister(platform.EFER, efer, true)
		if err != nil {
			return err
		}

		// Enable paging.
		cr0, err = vcpu.GetControlRegister(platform.CR0)
		if err != nil {
			return err
		}
		cr0 = cr0 | (1 << 31) // Paging enable.
		err = vcpu.SetControlRegister(platform.CR0, cr0, true)
		if err != nil {
			return err
		}
	}

	// NOTE: For 64-bit kernels, we need to enable
	// real 64-bit mode. This means that the L bit in
	// the segments must be one, the Db bit must be
	// zero, and we set the LME bit in EFER (above).
	var lVal uint8
	var dVal uint8
	if is_64bit {
		lVal = 1
		dVal = 0
	} else {
		lVal = 0
		dVal = 1
	}

	// Load the VMCS segments.
	//
	// NOTE: These values are loaded into the VMCS
	// registers and are expected to match the descriptors
	// we've used above. Unfortunately the API format doesn't
	// match, so we need to duplicate some work here. Ah, well
	// at least the below serves as an explanation for what
	// the magic numbers in GDT_ENTRY() above mean.
	BootCs := platform.SegmentValue{
		Base:     0,
		Limit:    0xffffffff,
		Selector: uint16(C.BootCsSelector), // @ 0x10
		Dpl:      0,                        // Privilege level (kernel).
		Db:       dVal,                     // 32-bit segment?
		G:        1,                        // Granularity (page).
		S:        1,                        // As per BOOT_CS (code/data).
		L:        lVal,                     // 64-bit extension.
		Type:     0xb,                      // As per BOOT_CS (access must be set).
		Present:  1,
	}
	BootDs := platform.SegmentValue{
		Base:     0,
		Limit:    0xffffffff,
		Selector: uint16(C.BootDsSelector), // @ 0x18
		Dpl:      0,                        // Privilege level (kernel).
		Db:       1,                        // 32-bit segment?
		G:        1,                        // Granularity (page).
		S:        1,                        // As per BOOT_DS (code/data).
		L:        0,                        // 64-bit extension.
		Type:     0x3,                      // As per BOOT_DS (access must be set).
		Present:  1,
	}
	BootTr := platform.SegmentValue{
		Base:     0,
		Limit:    0xffffffff,
		Selector: uint16(C.BootTrSelector), // @ 0x20
		Dpl:      0,                        // Privilege level (kernel).
		Db:       1,                        // 32-bit segment?
		G:        1,                        // Granularity (page).
		S:        0,                        // As per BOOT_TR (system).
		L:        0,                        // 64-bit extension.
		Type:     0xb,                      // As per BOOT_TR.
		Present:  1,
	}

	err = vcpu.SetSegment(platform.CS, BootCs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.DS, BootDs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.ES, BootDs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.FS, BootDs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.GS, BootDs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.SS, BootDs, true)
	if err != nil {
		return err
	}

	err = vcpu.SetSegment(platform.TR, BootTr, true)
	if err != nil {
		return err
	}

	// Create our boot parameters.
	boot_addr, boot_data, err := model.Allocate(
		machine.MemoryTypeUser,
		0,                 // Start.
		model.Max(),       // End.
		platform.PageSize, // Size.
		false)             // From bottom.
	if err != nil {
		return err
	}
	err = SetupLinuxBootParams(
		model,
		boot_data,
		orig_boot_data,
		cmdline_addr,
		initrd_addr,
		initrd_len)
	if err != nil {
		return err
	}

	// Set our registers.
	// This is according to the Linux 32-bit boot protocol.
	log.Printf("loader: boot_params @ %08x.", boot_addr)
	err = vcpu.SetRegister(platform.RSI, platform.RegisterValue(boot_addr))
	if err != nil {
		return err
	}

	err = vcpu.SetRegister(platform.RBP, 0)
	if err != nil {
		return err
	}

	err = vcpu.SetRegister(platform.RDI, 0)
	if err != nil {
		return err
	}

	err = vcpu.SetRegister(platform.RBX, 0)
	if err != nil {
		return err
	}

	// Jump to our entry point.
	err = vcpu.SetRegister(platform.RIP, platform.RegisterValue(entry_point))
	if err != nil {
		return err
	}

	// Make sure interrupts are disabled.
	// This will actually clear out all other flags.
	rflags, err := vcpu.GetRegister(platform.RFLAGS)
	if err != nil {
		return err
	}
	rflags = rflags &^ (1 << 9) // Interrupts off.
	rflags = rflags | (1 << 1)  // Reserved 1.
	err = vcpu.SetRegister(
		platform.RFLAGS,
		platform.RegisterValue(rflags))
	if err != nil {
		return err
	}

	// We're done.
	return nil
}
Exemplo n.º 25
0
func SetupLinuxBootParams(
	model *machine.Model,
	boot_params_data []byte,
	orig_boot_params_data []byte,
	cmdline_addr platform.Paddr,
	initrd_addr platform.Paddr,
	initrd_len uint64) error {

	// Grab a reference to our boot params struct.
	boot_params := (*C.struct_boot_params)(unsafe.Pointer(&boot_params_data[0]))

	// The setup header.
	// First step is to copy the existing setup_header
	// out of the given kernel image. We copy only the
	// header, and not the rest of the setup page.
	setup_start := 0x01f1
	setup_end := 0x0202 + int(orig_boot_params_data[0x0201])
	if setup_end > platform.PageSize {
		return InvalidSetupHeader
	}
	C.memcpy(
		unsafe.Pointer(&boot_params_data[setup_start]),
		unsafe.Pointer(&orig_boot_params_data[setup_start]),
		C.size_t(setup_end-setup_start))

	// Setup our BIOS memory map.
	// NOTE: We have to do this via C bindings. This is really
	// annoying, but basically because of the unaligned structures
	// in the struct_boot_params, the Go code generated here is
	// actually *incompatible* with the actual C layout.

	// First, the count.
	C.e820_set_count(boot_params, C.int(len(model.MemoryMap)))

	// Then, fill out the region information.
	for index, region := range model.MemoryMap {

		var memtype C.int
		switch region.MemoryType {
		case machine.MemoryTypeUser:
			memtype = C.E820Ram
		case machine.MemoryTypeReserved:
			memtype = C.E820Reserved
		case machine.MemoryTypeSpecial:
			memtype = C.E820Reserved
		case machine.MemoryTypeAcpi:
			memtype = C.E820Acpi
		}

		C.e820_set_region(
			boot_params,
			C.int(index),
			C.__u64(region.Start),
			C.__u64(region.Size),
			C.__u8(memtype))
	}

	// Set necessary setup header bits.
	C.set_header(
		boot_params,
		C.__u64(initrd_addr),
		C.__u64(initrd_len),
		C.__u64(cmdline_addr))

	// All done!
	return nil
}