// gobDecodeOpFor returns the op for a type that is known to implement // GobDecoder. func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) { rcvrType := ut.user if ut.decIndir == -1 { rcvrType = reflect.PtrTo(rcvrType) } else if ut.decIndir > 0 { for i := int8(0); i < ut.decIndir; i++ { rcvrType = rcvrType.Elem() } } var op decOp op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { // Caller has gotten us to within one indirection of our value. if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.New(ut.base) } } // Now p is a pointer to the base type. Do we need to climb out to // get to the receiver type? var v reflect.Value if ut.decIndir == -1 { v = reflect.ValueOf(unsafe.Unreflect(rcvrType, unsafe.Pointer(&p))) } else { v = reflect.ValueOf(unsafe.Unreflect(rcvrType, p)) } state.dec.decodeGobDecoder(state, v) } return &op, int(ut.indir) }
// gobEncodeOpFor returns the op for a type that is known to implement // GobEncoder. func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { rt := ut.user if ut.encIndir == -1 { rt = reflect.PtrTo(rt) } else if ut.encIndir > 0 { for i := int8(0); i < ut.encIndir; i++ { rt = rt.Elem() } } var op encOp op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { var v reflect.Value if ut.encIndir == -1 { // Need to climb up one level to turn value into pointer. v = reflect.ValueOf(unsafe.Unreflect(rt, unsafe.Pointer(&p))) } else { v = reflect.ValueOf(unsafe.Unreflect(rt, p)) } if !state.sendZero && isZero(v) { return } state.update(i) state.enc.encodeGobEncoder(state.b, v) } return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver. }
// Decode an embedded message. func (o *Buffer) dec_struct_message(p *Properties, base uintptr, sbase uintptr) (err error) { raw, e := o.DecodeRawBytes(false) if e != nil { return e } ptr := (**struct{})(unsafe.Pointer(base + p.offset)) typ := p.stype.Elem() structv := unsafe.New(typ) bas := uintptr(structv) *ptr = (*struct{})(structv) // If the object can unmarshal itself, let it. iv := unsafe.Unreflect(p.stype, unsafe.Pointer(ptr)) if u, ok := iv.(Unmarshaler); ok { return u.Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, false, bas) o.buf = obuf o.index = oi return err }
// Usage: // given: foo_array *Foo := native array of Foo // foo_len int := Number of Foos in foo_array // // res := sliceify(foo_array, []Foo(nil), foo_len).([]Foo) func sliceify(arg interface{}, sample interface{}, length int) interface{} { val := reflect.ValueOf(arg) target := unsafe.Pointer(&reflect.SliceHeader{ Data: val.Pointer(), Len: length, Cap: length, }) return unsafe.Unreflect(reflect.TypeOf(sample), target) }
// Usage: // given: foo_array *Foo := native array of Foo // foo_len int := Number of Foos in foo_array // // res := sliceify(foo_array, []Foo(nil), foo_len).([]Foo) func sliceify(arg interface{}, sample interface{}, length int) interface{} { val := reflect.NewValue(arg).(*reflect.PtrValue) target := unsafe.Pointer(&reflect.SliceHeader{ Data: val.Get(), Len: length, Cap: length, }) return unsafe.Unreflect(reflect.Typeof(sample), target) }
// Encode a slice of message structs ([]*struct). func (o *Buffer) enc_slice_struct_message(p *Properties, base uintptr) error { s := *(*[]*struct{})(unsafe.Pointer(base + p.offset)) l := len(s) typ := p.stype.Elem() for i := 0; i < l; i++ { v := s[i] if v == nil { return ErrRepeatedHasNil } // Can the object marshal itself? iv := unsafe.Unreflect(p.stype, unsafe.Pointer(&s[i])) if m, ok := iv.(Marshaler); ok { if isNil(reflect.ValueOf(iv)) { return ErrNil } data, err := m.Marshal() if err != nil { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) continue } obuf := o.buf o.buf = o.bufalloc() b := uintptr(unsafe.Pointer(v)) err := o.enc_struct(typ, b) nbuf := o.buf o.buf = obuf if err != nil { o.buffree(nbuf) if err == ErrNil { return ErrRepeatedHasNil } return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(nbuf) o.buffree(nbuf) } return nil }
func (v *value) Interface() interface{} { if typ, ok := v.typ.(*InterfaceType); ok { // There are two different representations of interface values, // one if the interface type has methods and one if it doesn't. // These two representations require different expressions // to extract correctly. if typ.NumMethod() == 0 { // Extract as interface value without methods. return *(*interface{})(v.addr) } // Extract from v.addr as interface value with methods. return *(*interface { m() })(v.addr) } return unsafe.Unreflect(v.typ, unsafe.Pointer(v.addr)) }
// Decode a slice of structs ([]*struct). func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base uintptr, sbase uintptr) error { x := (*[]*struct{})(unsafe.Pointer(base + p.offset)) y := *x if cap(y) == 0 { initSlice(unsafe.Pointer(x), sbase+p.scratch) y = *x } typ := p.stype.Elem() structv := unsafe.New(typ) bas := uintptr(structv) y = append(y, (*struct{})(structv)) *x = y if is_group { err := o.unmarshalType(p.stype, is_group, bas) return err } raw, err := o.DecodeRawBytes(true) if err != nil { return err } // If the object can unmarshal itself, let it. iv := unsafe.Unreflect(p.stype, unsafe.Pointer(&y[len(y)-1])) if u, ok := iv.(Unmarshaler); ok { return u.Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, is_group, bas) o.buf = obuf o.index = oi return err }
// decodeMap decodes a map and stores its header through p. // Maps are encoded as a length followed by key:value pairs. // Because the internals of maps are not visible to us, we must // use reflection rather than pointer magic. func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) { if indir > 0 { p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect } up := unsafe.Pointer(p) if *(*unsafe.Pointer)(up) == nil { // maps are represented as a pointer in the runtime // Allocate map. *(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.MakeMap(mtyp).Pointer()) } // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for // the iteration. v := reflect.ValueOf(unsafe.Unreflect(mtyp, unsafe.Pointer(p))) n := int(state.decodeUint()) for i := 0; i < n; i++ { key := decodeIntoValue(state, keyOp, keyIndir, allocValue(mtyp.Key()), ovfl) elem := decodeIntoValue(state, elemOp, elemIndir, allocValue(mtyp.Elem()), ovfl) v.SetMapIndex(key, elem) } }
func encodeMap(b *bytes.Buffer, rt reflect.Type, p uintptr, keyOp, elemOp encOp, keyIndir, elemIndir int) os.Error { state := new(encoderState) state.b = b state.fieldnum = -1 state.inArray = true // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for // the iteration. v := reflect.NewValue(unsafe.Unreflect(rt, unsafe.Pointer((p)))) mv := reflect.Indirect(v).(*reflect.MapValue) keys := mv.Keys() encodeUint(state, uint64(len(keys))) for _, key := range keys { if state.err != nil { break } encodeReflectValue(state, key, keyOp, keyIndir) encodeReflectValue(state, mv.Elem(key), elemOp, elemIndir) } return state.err }
func (ctx *Context) GetDeviceList() (dev []*Device, err *UsbError) { var ( baseptr **C.struct_libusb_device devlist []*C.struct_libusb_device ) count, err := decodeUsbError(C.int(C.libusb_get_device_list(ctx.ctx, &baseptr))) if err != nil { dev = nil return } hdr := &reflect.SliceHeader{Data: uintptr(unsafe.Pointer(baseptr)), Len: count, Cap: count} devlist = unsafe.Unreflect(unsafe.Typeof(devlist), unsafe.Pointer(&hdr)).([]*C.struct_libusb_device) dev = make([]*Device, count) for i := 0; i < count; i++ { dev[i] = ctx.wrapDevice(devlist[i]) } devlist = nil C.libusb_free_device_list(baseptr, 1) return dev, nil }
// Encode a message struct. func (o *Buffer) enc_struct_message(p *Properties, base uintptr) error { // Can the object marshal itself? iv := unsafe.Unreflect(p.stype, unsafe.Pointer(base+p.offset)) if m, ok := iv.(Marshaler); ok { if isNil(reflect.ValueOf(iv)) { return ErrNil } data, err := m.Marshal() if err != nil { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) return nil } v := *(**struct{})(unsafe.Pointer(base + p.offset)) if v == nil { return ErrNil } // need the length before we can write out the message itself, // so marshal into a separate byte buffer first. obuf := o.buf o.buf = o.bufalloc() b := uintptr(unsafe.Pointer(v)) typ := p.stype.Elem() err := o.enc_struct(typ, b) nbuf := o.buf o.buf = obuf if err != nil { o.buffree(nbuf) return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(nbuf) o.buffree(nbuf) return nil }
// decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // Discard wire type and field number varint. It isn't needed. _, n := DecodeVarint(b) o := NewBuffer(b[n:]) t := reflect.TypeOf(extension.ExtensionType) props := &Properties{} props.Init(t, "irrelevant_name", extension.Tag, 0) base := unsafe.New(t) var sbase uintptr if t.Elem().Kind() == reflect.Struct { // props.dec will be dec_struct_message, which does not refer to sbase. *(*unsafe.Pointer)(base) = unsafe.New(t.Elem()) } else { sbase = uintptr(unsafe.New(t.Elem())) } if err := props.dec(o, props, uintptr(base), sbase); err != nil { return nil, err } return unsafe.Unreflect(t, base), nil }
func AsByteSlice(b Resliceable) []byte { return unsafe.Unreflect(_BYTE_SLICE, unsafe.Pointer(b.SliceHeader(1))).([]byte) }
var encEngineRunners = map[int]func(*Encoder, *encEngine, uintptr){ indirEngine: func(enc *Encoder, engine *encEngine, p uintptr) { indir := engine.i up := unsafe.Pointer(p) for indir > 0 { up = *(*unsafe.Pointer)(up) if up == nil { raise(PointerError) } indir-- } elemEngine := engine.elems[0].engine elemEngine.runner(enc, elemEngine, uintptr(up)) }, reflectEngine: func(enc *Encoder, engine *encEngine, p uintptr) { enc.encode(unsafe.Unreflect(engine.rt, unsafe.Pointer(p))) }, sliceEngine: func(enc *Encoder, engine *encEngine, p uintptr) { enc.b.Write(engine.typeId) header := (*reflect.SliceHeader)(unsafe.Pointer(p)) mark := header.Data offset := engine.elems[0].offset elemEngine := engine.elems[0].engine if elemEngine.typ == indirEngine { subEngine := elemEngine.elems[0].engine if subEngine.typ == structEngine && header.Len > 100 { fieldElems := subEngine.elems engines := make([]*encEngine, len(fieldElems)) offsets := make([]uintptr, len(fieldElems)) for i, field := range fieldElems { engines[i] = field.engine
func AsUintBuffer(b Resliceable) []uint { return unsafe.Unreflect(_UINT_BUFFER, unsafe.Pointer(b.SliceHeader(_UINT_SIZE))).([]uint) }
func AsFloatBuffer(b Resliceable) FloatBuffer { return unsafe.Unreflect(_FLOAT_BUFFER, unsafe.Pointer(b.SliceHeader(_FLOAT_SIZE))).(FloatBuffer) }
// encOpFor returns (a pointer to) the encoding op for the base type under rt and // the indirection count to reach it. func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) { ut := userType(rt) // If the type implements GobEncoder, we handle it without further processing. if ut.isGobEncoder { return enc.gobEncodeOpFor(ut) } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[rt]; opPtr != nil { return opPtr, ut.indir } typ := ut.base indir := ut.indir k := typ.Kind() var op encOp if int(k) < len(encOpTable) { op = encOpTable[k] } if op == nil { inProgress[rt] = &op // Special cases switch t := typ; t.Kind() { case reflect.Slice: if t.Elem().Kind() == reflect.Uint8 { op = encUint8Array break } // Slices have a header; we decode it to find the underlying array. elemOp, indir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { slice := (*reflect.SliceHeader)(p) if !state.sendZero && slice.Len == 0 { return } state.update(i) state.enc.encodeArray(state.b, slice.Data, *elemOp, t.Elem().Size(), indir, int(slice.Len)) } case reflect.Array: // True arrays have size in the type. elemOp, indir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { state.update(i) state.enc.encodeArray(state.b, uintptr(p), *elemOp, t.Elem().Size(), indir, t.Len()) } case reflect.Map: keyOp, keyIndir := enc.encOpFor(t.Key(), inProgress) elemOp, elemIndir := enc.encOpFor(t.Elem(), inProgress) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for // the iteration. v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p))) mv := reflect.Indirect(v) // We send zero-length (but non-nil) maps because the // receiver might want to use the map. (Maps don't use append.) if !state.sendZero && mv.IsNil() { return } state.update(i) state.enc.encodeMap(state.b, mv, *keyOp, *elemOp, keyIndir, elemIndir) } case reflect.Struct: // Generate a closure that calls out to the engine for the nested type. enc.getEncEngine(userType(typ)) info := mustGetTypeInfo(typ) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { state.update(i) // indirect through info to delay evaluation for recursive structs state.enc.encodeStruct(state.b, info.encoder, uintptr(p)) } case reflect.Interface: op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { // Interfaces transmit the name and contents of the concrete // value they contain. v := reflect.ValueOf(unsafe.Unreflect(t, unsafe.Pointer(p))) iv := reflect.Indirect(v) if !state.sendZero && (!iv.IsValid() || iv.IsNil()) { return } state.update(i) state.enc.encodeInterface(state.b, iv) } } } if op == nil { errorf("can't happen: encode type %s", rt.String()) } return &op, indir }
func AsPointerBuffer(b Resliceable) []uintptr { return unsafe.Unreflect(_PTR_BUFFER, unsafe.Pointer(b.SliceHeader(_PTR_SIZE))).([]uintptr) }
func AsIntBuffer(b Resliceable) IntBuffer { return unsafe.Unreflect(_INT_BUFFER, unsafe.Pointer(b.SliceHeader(_INT_SIZE))).(IntBuffer) }
// unmarshalType does the work of unmarshaling a structure. func (o *Buffer) unmarshalType(t reflect.Type, is_group bool, base uintptr) error { st := t.Elem() prop := GetProperties(st) required, reqFields := prop.reqCount, uint64(0) sbase := getsbase(prop) // scratch area for data items var err error for err == nil && o.index < len(o.buf) { oi := o.index var u uint64 u, err = o.DecodeVarint() if err != nil { break } wire := int(u & 0x7) if wire == WireEndGroup { if is_group { return nil // input is satisfied } return ErrWrongType } tag := int(u >> 3) fieldnum, ok := prop.tags[tag] if !ok { // Maybe it's an extension? o.ptr = base iv := unsafe.Unreflect(t, unsafe.Pointer(&o.ptr)) if e, ok := iv.(extendableProto); ok && isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { e.ExtensionMap()[int32(tag)] = Extension{enc: append([]byte(nil), o.buf[oi:o.index]...)} } continue } err = o.skipAndSave(st, tag, wire, base) continue } p := prop.Prop[fieldnum] if p.dec == nil { fmt.Fprintf(os.Stderr, "no protobuf decoder for %s.%s\n", t, st.Field(fieldnum).Name) continue } dec := p.dec if wire != WireStartGroup && wire != p.WireType { if wire == WireBytes && p.packedDec != nil { // a packable field dec = p.packedDec } else { err = ErrWrongType continue } } err = dec(o, p, base, sbase) if err == nil && p.Required { // Successfully decoded a required field. if tag <= 64 { // use bitmap for fields 1-64 to catch field reuse. var mask uint64 = 1 << uint64(tag-1) if reqFields&mask == 0 { // new required field reqFields |= mask required-- } } else { // This is imprecise. It can be fooled by a required field // with a tag > 64 that is encoded twice; that's very rare. // A fully correct implementation would require allocating // a data structure, which we would like to avoid. required-- } } } if err == nil { if is_group { return io.ErrUnexpectedEOF } if required > 0 { return &ErrRequiredNotSet{st} } } return err }