func (vcpu *Vcpu) SetFpuState(state Fpu) error { // Prepare our data. var kvm_fpu C.struct_kvm_fpu for i := 0; i < len(state.FPR); i += 1 { for j := 0; j < len(state.FPR[i]); j += 1 { kvm_fpu.fpr[i][j] = C.__u8(state.FPR[i][j]) } } kvm_fpu.fcw = C.__u16(state.FCW) kvm_fpu.fsw = C.__u16(state.FSW) kvm_fpu.ftwx = C.__u8(state.FTWX) kvm_fpu.last_opcode = C.__u16(state.LastOpcode) kvm_fpu.last_ip = C.__u64(state.LastIp) kvm_fpu.last_dp = C.__u64(state.LastDp) for i := 0; i < len(state.XMM); i += 1 { for j := 0; j < len(state.XMM[i]); j += 1 { kvm_fpu.xmm[i][j] = C.__u8(state.XMM[i][j]) } } kvm_fpu.mxcsr = C.__u32(state.MXCSR) // Execute the ioctl. _, _, e := syscall.Syscall( syscall.SYS_IOCTL, uintptr(vcpu.fd), uintptr(C.IoctlSetFpu), uintptr(unsafe.Pointer(&kvm_fpu))) if e != 0 { return e } return nil }
func (vcpu *Vcpu) SetDescriptor( desc Descriptor, val DescriptorValue, sync bool) error { err := vcpu.refreshSRegs(true) if err != nil { return err } switch desc { case GDT: vcpu.sregs.gdt.base = C.__u64(val.Base) vcpu.sregs.gdt.limit = C.__u16(val.Limit) case IDT: vcpu.sregs.idt.base = C.__u64(val.Base) vcpu.sregs.idt.limit = C.__u16(val.Limit) default: return UnknownRegister } if sync { err = vcpu.flushSRegs() if err != nil { return err } } return nil }
func (vchannel *VirtioChannel) consumeOne() (bool, error) { var flags C.__u16 var index C.__u16 var used_event C.__u16 // Fetch the next buffer. // FIXME: We are currently not using the flags or the // used_event on the incoming queue. We will need to // support this eventually (notifying when we are short). if C.vring_get_buf( &vchannel.vring, C.__u16(vchannel.Consumed), &flags, &index, &used_event) != 0 { // We're up a buffer. vchannel.Consumed += 1 // Process the buffer. err := vchannel.processOne(uint16(index)) if err != nil { return false, err } return true, nil } return false, nil }
// This command selects a device register (through the cmd byte), sends // 16 bits of data to it, and reads 16 bits of data in return. func (smb SMBus) Process_call(cmd byte, value uint16) (uint16, error) { smb.Set_addr(smb.addr) ret, err := C.i2c_smbus_process_call(C.int(smb.bus.Fd()), C.__u8(cmd), C.__u16(value)) if err != nil { ret = 0 } return uint16(ret & 0x0FFFF), err }
func upKey(key int) error { ev := input_event{} ev._type = _EV_KEY ev.code = C.__u16(key) ev.value = 0 err := binary.Write(fd, binary.LittleEndian, &ev) if err != nil { return err } return nil }
func (vchannel *VirtioChannel) ProcessOutgoing() error { for buf := range vchannel.outgoing { // The device is active. vchannel.VirtioDevice.Acquire() // Put in the virtqueue. vchannel.Debug( "vqueue#%d outgoing slot [%d]", vchannel.Channel, buf.index) var evt_interrupt C.int var no_interrupt C.int C.vring_put_buf( &vchannel.vring, C.__u16(buf.index), C.__u32(buf.length), &evt_interrupt, &no_interrupt) if vchannel.HasFeatures(VirtioRingFEventIdx) { // This is used the event index. if evt_interrupt != C.int(0) { // Interrupt the guest. vchannel.Interrupt(true) } } else { // We have no event index. if no_interrupt == C.int(0) { // Interrupt the guest. vchannel.Interrupt(true) } } // Remove from our outstanding list. delete(vchannel.Outstanding, uint16(buf.index)) // We can release until the next buffer comes back. vchannel.VirtioDevice.Release() } return nil }
func (vcpu *Vcpu) SetSegment( seg Segment, val SegmentValue, sync bool) error { err := vcpu.refreshSRegs(true) if err != nil { return err } switch seg { case CS: vcpu.sregs.cs.base = C.__u64(val.Base) vcpu.sregs.cs.limit = C.__u32(val.Limit) vcpu.sregs.cs.selector = C.__u16(val.Selector) vcpu.sregs.cs._type = C.__u8(val.Type) vcpu.sregs.cs.present = C.__u8(val.Present) vcpu.sregs.cs.dpl = C.__u8(val.Dpl) vcpu.sregs.cs.db = C.__u8(val.Db) vcpu.sregs.cs.s = C.__u8(val.S) vcpu.sregs.cs.l = C.__u8(val.L) vcpu.sregs.cs.g = C.__u8(val.G) vcpu.sregs.cs.avl = C.__u8(val.Avl) vcpu.sregs.cs.unusable = C.__u8(^val.Present & 0x1) case DS: vcpu.sregs.ds.base = C.__u64(val.Base) vcpu.sregs.ds.limit = C.__u32(val.Limit) vcpu.sregs.ds.selector = C.__u16(val.Selector) vcpu.sregs.ds._type = C.__u8(val.Type) vcpu.sregs.ds.present = C.__u8(val.Present) vcpu.sregs.ds.dpl = C.__u8(val.Dpl) vcpu.sregs.ds.db = C.__u8(val.Db) vcpu.sregs.ds.s = C.__u8(val.S) vcpu.sregs.ds.l = C.__u8(val.L) vcpu.sregs.ds.g = C.__u8(val.G) vcpu.sregs.ds.avl = C.__u8(val.Avl) vcpu.sregs.ds.unusable = C.__u8(^val.Present & 0x1) case ES: vcpu.sregs.es.base = C.__u64(val.Base) vcpu.sregs.es.limit = C.__u32(val.Limit) vcpu.sregs.es.selector = C.__u16(val.Selector) vcpu.sregs.es._type = C.__u8(val.Type) vcpu.sregs.es.present = C.__u8(val.Present) vcpu.sregs.es.dpl = C.__u8(val.Dpl) vcpu.sregs.es.db = C.__u8(val.Db) vcpu.sregs.es.s = C.__u8(val.S) vcpu.sregs.es.l = C.__u8(val.L) vcpu.sregs.es.g = C.__u8(val.G) vcpu.sregs.es.avl = C.__u8(val.Avl) vcpu.sregs.es.unusable = C.__u8(^val.Present & 0x1) case FS: vcpu.sregs.fs.base = C.__u64(val.Base) vcpu.sregs.fs.limit = C.__u32(val.Limit) vcpu.sregs.fs.selector = C.__u16(val.Selector) vcpu.sregs.fs._type = C.__u8(val.Type) vcpu.sregs.fs.present = C.__u8(val.Present) vcpu.sregs.fs.dpl = C.__u8(val.Dpl) vcpu.sregs.fs.db = C.__u8(val.Db) vcpu.sregs.fs.s = C.__u8(val.S) vcpu.sregs.fs.l = C.__u8(val.L) vcpu.sregs.fs.g = C.__u8(val.G) vcpu.sregs.fs.avl = C.__u8(val.Avl) vcpu.sregs.fs.unusable = C.__u8(^val.Present & 0x1) case GS: vcpu.sregs.gs.base = C.__u64(val.Base) vcpu.sregs.gs.limit = C.__u32(val.Limit) vcpu.sregs.gs.selector = C.__u16(val.Selector) vcpu.sregs.gs._type = C.__u8(val.Type) vcpu.sregs.gs.present = C.__u8(val.Present) vcpu.sregs.gs.dpl = C.__u8(val.Dpl) vcpu.sregs.gs.db = C.__u8(val.Db) vcpu.sregs.gs.s = C.__u8(val.S) vcpu.sregs.gs.l = C.__u8(val.L) vcpu.sregs.gs.g = C.__u8(val.G) vcpu.sregs.gs.avl = C.__u8(val.Avl) vcpu.sregs.gs.unusable = C.__u8(^val.Present & 0x1) case SS: vcpu.sregs.ss.base = C.__u64(val.Base) vcpu.sregs.ss.limit = C.__u32(val.Limit) vcpu.sregs.ss.selector = C.__u16(val.Selector) vcpu.sregs.ss._type = C.__u8(val.Type) vcpu.sregs.ss.present = C.__u8(val.Present) vcpu.sregs.ss.dpl = C.__u8(val.Dpl) vcpu.sregs.ss.db = C.__u8(val.Db) vcpu.sregs.ss.s = C.__u8(val.S) vcpu.sregs.ss.l = C.__u8(val.L) vcpu.sregs.ss.g = C.__u8(val.G) vcpu.sregs.ss.avl = C.__u8(val.Avl) vcpu.sregs.ss.unusable = C.__u8(^val.Present & 0x1) case TR: vcpu.sregs.tr.base = C.__u64(val.Base) vcpu.sregs.tr.limit = C.__u32(val.Limit) vcpu.sregs.tr.selector = C.__u16(val.Selector) vcpu.sregs.tr._type = C.__u8(val.Type) vcpu.sregs.tr.present = C.__u8(val.Present) vcpu.sregs.tr.dpl = C.__u8(val.Dpl) vcpu.sregs.tr.db = C.__u8(val.Db) vcpu.sregs.tr.s = C.__u8(val.S) vcpu.sregs.tr.l = C.__u8(val.L) vcpu.sregs.tr.g = C.__u8(val.G) vcpu.sregs.tr.avl = C.__u8(val.Avl) vcpu.sregs.tr.unusable = C.__u8(^val.Present & 0x1) case LDT: vcpu.sregs.ldt.base = C.__u64(val.Base) vcpu.sregs.ldt.limit = C.__u32(val.Limit) vcpu.sregs.ldt.selector = C.__u16(val.Selector) vcpu.sregs.ldt._type = C.__u8(val.Type) vcpu.sregs.ldt.present = C.__u8(val.Present) vcpu.sregs.ldt.dpl = C.__u8(val.Dpl) vcpu.sregs.ldt.db = C.__u8(val.Db) vcpu.sregs.ldt.s = C.__u8(val.S) vcpu.sregs.ldt.l = C.__u8(val.L) vcpu.sregs.ldt.g = C.__u8(val.G) vcpu.sregs.ldt.avl = C.__u8(val.Avl) vcpu.sregs.ldt.unusable = C.__u8(^val.Present & 0x1) default: return UnknownRegister } if sync { err = vcpu.flushSRegs() if err != nil { return err } } return nil }
func (vchannel *VirtioChannel) processOne(n uint16) error { var buf *VirtioBuffer var addr C.__u64 var length C.__u32 var buf_flags C.__u16 var next C.__u16 index := C.__u16(n) vchannel.Debug( "vqueue#%d incoming slot [%d]", vchannel.Channel, index) for { // Read the entry. C.vring_get_index( &vchannel.vring, index, &addr, &length, &buf_flags, &next) // Append our buffer. has_next := (buf_flags & C.__u16(C.VirtioDescFNext)) != C.__u16(0) is_write := (buf_flags & C.__u16(C.VirtioDescFWrite)) != C.__u16(0) is_indirect := (buf_flags & C.__u16(C.VirtioDescFIndirect)) != C.__u16(0) // Do we have a buffer? if buf == nil { buf = NewVirtioBuffer(uint16(index), !is_write) } if is_indirect { // FIXME: Map all indirect buffers. log.Printf("WARNING: Indirect buffers not supported.") } else { // Map the given address. vchannel.Debug("vqueue#%d map [%x-%x]", vchannel.Channel, platform.Paddr(addr), uint64(addr)+uint64(length)-1) data, err := vchannel.VirtioDevice.mmap( platform.Paddr(addr), uint64(length)) if err != nil { log.Printf( "Unable to map [%x,%x]? Flags are %x, next is %x.", addr, addr+C.__u64(length)-1, buf_flags, next) return err } // Append this segment. buf.Append(data) } // Are we finished? if !has_next { // Send these buffers. vchannel.Debug( "vqueue#%d processing slot [%d]", vchannel.Channel, buf.index) // Mark this as outstanding. vchannel.Outstanding[uint16(buf.index)] = true vchannel.incoming <- buf break } else { // Keep chaining. index = next vchannel.Debug( "vqueue#%d next slot [%d]", vchannel.Channel, index) continue } } // We're good. return nil }
// This is the opposite of the Read Word operation. 16 bits // of data is written to a device, to the designated register that is // specified through the cmd byte. func (smb SMBus) Write_word_data(cmd byte, value uint16) error { smb.Set_addr(smb.addr) _, err := C.i2c_smbus_write_word_data(C.int(smb.bus.Fd()), C.__u8(cmd), C.__u16(value)) return err }