Esempio n. 1
0
// store stores value v of type T into *addr.
func store(T types.Type, addr *value, v value) {
	switch T := T.Underlying().(type) {
	case *types.Struct:
		lhs := (*addr).(structure)
		rhs := v.(structure)
		for i := range lhs {
			store(T.Field(i).Type(), &lhs[i], rhs[i])
		}
	case *types.Array:
		lhs := (*addr).(array)
		rhs := v.(array)
		for i := range lhs {
			store(T.Elem(), &lhs[i], rhs[i])
		}
	default:
		*addr = v
	}
}
Esempio n. 2
0
// load returns the value of type T in *addr.
func load(T types.Type, addr *value) value {
	switch T := T.Underlying().(type) {
	case *types.Struct:
		v := (*addr).(structure)
		a := make(structure, len(v))
		for i := range a {
			a[i] = load(T.Field(i).Type(), &v[i])
		}
		return a
	case *types.Array:
		v := (*addr).(array)
		a := make(array, len(v))
		for i := range a {
			a[i] = load(T.Elem(), &v[i])
		}
		return a
	default:
		return *addr
	}
}
Esempio n. 3
0
// flatten returns a list of directly contained fields in the preorder
// traversal of the type tree of t.  The resulting elements are all
// scalars (basic types or pointerlike types), except for struct/array
// "identity" nodes, whose type is that of the aggregate.
//
// reflect.Value is considered pointerlike, similar to interface{}.
//
// Callers must not mutate the result.
//
func (a *analysis) flatten(t types.Type) []*fieldInfo {
	fl, ok := a.flattenMemo[t]
	if !ok {
		switch t := t.(type) {
		case *types.Named:
			u := t.Underlying()
			if isInterface(u) {
				// Debuggability hack: don't remove
				// the named type from interfaces as
				// they're very verbose.
				fl = append(fl, &fieldInfo{typ: t})
			} else {
				fl = a.flatten(u)
			}

		case *types.Basic,
			*types.Signature,
			*types.Chan,
			*types.Map,
			*types.Interface,
			*types.Slice,
			*types.Pointer:
			fl = append(fl, &fieldInfo{typ: t})

		case *types.Array:
			fl = append(fl, &fieldInfo{typ: t}) // identity node
			for _, fi := range a.flatten(t.Elem()) {
				fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
			}

		case *types.Struct:
			fl = append(fl, &fieldInfo{typ: t}) // identity node
			for i, n := 0, t.NumFields(); i < n; i++ {
				f := t.Field(i)
				for _, fi := range a.flatten(f.Type()) {
					fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
				}
			}

		case *types.Tuple:
			// No identity node: tuples are never address-taken.
			n := t.Len()
			if n == 1 {
				// Don't add a fieldInfo link for singletons,
				// e.g. in params/results.
				fl = append(fl, a.flatten(t.At(0).Type())...)
			} else {
				for i := 0; i < n; i++ {
					f := t.At(i)
					for _, fi := range a.flatten(f.Type()) {
						fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
					}
				}
			}

		default:
			panic(t)
		}

		a.flattenMemo[t] = fl
	}

	return fl
}
Esempio n. 4
0
func (tm *llvmTypeMap) getBackendType(t types.Type) backendType {
	switch t := t.(type) {
	case *types.Named:
		return tm.getBackendType(t.Underlying())

	case *types.Basic:
		switch t.Kind() {
		case types.Bool, types.Uint8:
			return &intBType{1, false}
		case types.Int8:
			return &intBType{1, true}
		case types.Uint16:
			return &intBType{2, false}
		case types.Int16:
			return &intBType{2, true}
		case types.Uint32:
			return &intBType{4, false}
		case types.Int32:
			return &intBType{4, true}
		case types.Uint64:
			return &intBType{8, false}
		case types.Int64:
			return &intBType{8, true}
		case types.Uint, types.Uintptr:
			return &intBType{tm.target.PointerSize(), false}
		case types.Int:
			return &intBType{tm.target.PointerSize(), true}
		case types.Float32:
			return &floatBType{false}
		case types.Float64:
			return &floatBType{true}
		case types.UnsafePointer:
			return &ptrBType{}
		case types.Complex64:
			f32 := &floatBType{false}
			return &structBType{[]backendType{f32, f32}}
		case types.Complex128:
			f64 := &floatBType{true}
			return &structBType{[]backendType{f64, f64}}
		case types.String:
			return &structBType{[]backendType{&ptrBType{}, &intBType{tm.target.PointerSize(), false}}}
		}

	case *types.Struct:
		var fields []backendType
		for i := 0; i != t.NumFields(); i++ {
			f := t.Field(i)
			fields = append(fields, tm.getBackendType(f.Type()))
		}
		return &structBType{fields}

	case *types.Pointer, *types.Signature, *types.Map, *types.Chan:
		return &ptrBType{}

	case *types.Interface:
		i8ptr := &ptrBType{}
		return &structBType{[]backendType{i8ptr, i8ptr}}

	case *types.Slice:
		return tm.sliceBackendType()

	case *types.Array:
		return &arrayBType{uint64(t.Len()), tm.getBackendType(t.Elem())}
	}

	panic("unhandled type: " + t.String())
}
Esempio n. 5
0
// zero returns a new "zero" value of the specified type.
func zero(t types.Type) value {
	switch t := t.(type) {
	case *types.Basic:
		if t.Kind() == types.UntypedNil {
			panic("untyped nil has no zero value")
		}
		if t.Info()&types.IsUntyped != 0 {
			// TODO(adonovan): make it an invariant that
			// this is unreachable.  Currently some
			// constants have 'untyped' types when they
			// should be defaulted by the typechecker.
			t = ssa.DefaultType(t).(*types.Basic)
		}
		switch t.Kind() {
		case types.Bool:
			return false
		case types.Int:
			return int(0)
		case types.Int8:
			return int8(0)
		case types.Int16:
			return int16(0)
		case types.Int32:
			return int32(0)
		case types.Int64:
			return int64(0)
		case types.Uint:
			return uint(0)
		case types.Uint8:
			return uint8(0)
		case types.Uint16:
			return uint16(0)
		case types.Uint32:
			return uint32(0)
		case types.Uint64:
			return uint64(0)
		case types.Uintptr:
			return uintptr(0)
		case types.Float32:
			return float32(0)
		case types.Float64:
			return float64(0)
		case types.Complex64:
			return complex64(0)
		case types.Complex128:
			return complex128(0)
		case types.String:
			return ""
		case types.UnsafePointer:
			return unsafe.Pointer(nil)
		default:
			panic(fmt.Sprint("zero for unexpected type:", t))
		}
	case *types.Pointer:
		return (*value)(nil)
	case *types.Array:
		a := make(array, t.Len())
		for i := range a {
			a[i] = zero(t.Elem())
		}
		return a
	case *types.Named:
		return zero(t.Underlying())
	case *types.Interface:
		return iface{} // nil type, methodset and value
	case *types.Slice:
		return []value(nil)
	case *types.Struct:
		s := make(structure, t.NumFields())
		for i := range s {
			s[i] = zero(t.Field(i).Type())
		}
		return s
	case *types.Tuple:
		if t.Len() == 1 {
			return zero(t.At(0).Type())
		}
		s := make(tuple, t.Len())
		for i := range s {
			s[i] = zero(t.At(i).Type())
		}
		return s
	case *types.Chan:
		return chan value(nil)
	case *types.Map:
		if usesBuiltinMap(t.Key()) {
			return map[value]value(nil)
		}
		return (*hashmap)(nil)
	case *types.Signature:
		return (*ssa.Function)(nil)
	}
	panic(fmt.Sprint("zero: unexpected ", t))
}
Esempio n. 6
0
// addRuntimeType is called for each concrete type that can be the
// dynamic type of some interface or reflect.Value.
// Adapted from needMethods in go/ssa/builder.go
//
func (r *rta) addRuntimeType(T types.Type, skip bool) {
	if prev, ok := r.result.RuntimeTypes.At(T).(bool); ok {
		if skip && !prev {
			r.result.RuntimeTypes.Set(T, skip)
		}
		return
	}
	r.result.RuntimeTypes.Set(T, skip)

	mset := r.prog.MethodSets.MethodSet(T)

	if _, ok := T.Underlying().(*types.Interface); !ok {
		// T is a new concrete type.
		for i, n := 0, mset.Len(); i < n; i++ {
			sel := mset.At(i)
			m := sel.Obj()

			if m.Exported() {
				// Exported methods are always potentially callable via reflection.
				r.addReachable(r.prog.Method(sel), true)
			}
		}

		// Add callgraph edge for each existing dynamic
		// "invoke"-mode call via that interface.
		for _, I := range r.interfaces(T) {
			sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction)
			for _, site := range sites {
				r.addInvokeEdge(site, T)
			}
		}
	}

	// Precondition: T is not a method signature (*Signature with Recv()!=nil).
	// Recursive case: skip => don't call makeMethods(T).
	// Each package maintains its own set of types it has visited.

	var n *types.Named
	switch T := T.(type) {
	case *types.Named:
		n = T
	case *types.Pointer:
		n, _ = T.Elem().(*types.Named)
	}
	if n != nil {
		owner := n.Obj().Pkg()
		if owner == nil {
			return // built-in error type
		}
	}

	// Recursion over signatures of each exported method.
	for i := 0; i < mset.Len(); i++ {
		if mset.At(i).Obj().Exported() {
			sig := mset.At(i).Type().(*types.Signature)
			r.addRuntimeType(sig.Params(), true)  // skip the Tuple itself
			r.addRuntimeType(sig.Results(), true) // skip the Tuple itself
		}
	}

	switch t := T.(type) {
	case *types.Basic:
		// nop

	case *types.Interface:
		// nop---handled by recursion over method set.

	case *types.Pointer:
		r.addRuntimeType(t.Elem(), false)

	case *types.Slice:
		r.addRuntimeType(t.Elem(), false)

	case *types.Chan:
		r.addRuntimeType(t.Elem(), false)

	case *types.Map:
		r.addRuntimeType(t.Key(), false)
		r.addRuntimeType(t.Elem(), false)

	case *types.Signature:
		if t.Recv() != nil {
			panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
		}
		r.addRuntimeType(t.Params(), true)  // skip the Tuple itself
		r.addRuntimeType(t.Results(), true) // skip the Tuple itself

	case *types.Named:
		// A pointer-to-named type can be derived from a named
		// type via reflection.  It may have methods too.
		r.addRuntimeType(types.NewPointer(T), false)

		// Consider 'type T struct{S}' where S has methods.
		// Reflection provides no way to get from T to struct{S},
		// only to S, so the method set of struct{S} is unwanted,
		// so set 'skip' flag during recursion.
		r.addRuntimeType(t.Underlying(), true)

	case *types.Array:
		r.addRuntimeType(t.Elem(), false)

	case *types.Struct:
		for i, n := 0, t.NumFields(); i < n; i++ {
			r.addRuntimeType(t.Field(i).Type(), false)
		}

	case *types.Tuple:
		for i, n := 0, t.Len(); i < n; i++ {
			r.addRuntimeType(t.At(i).Type(), false)
		}

	default:
		panic(T)
	}
}
Esempio n. 7
0
// hashFor computes the hash of t.
func (h Hasher) hashFor(t types.Type) uint32 {
	// See Identical for rationale.
	switch t := t.(type) {
	case *types.Basic:
		return uint32(t.Kind())

	case *types.Array:
		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())

	case *types.Slice:
		return 9049 + 2*h.Hash(t.Elem())

	case *types.Struct:
		var hash uint32 = 9059
		for i, n := 0, t.NumFields(); i < n; i++ {
			f := t.Field(i)
			if f.Anonymous() {
				hash += 8861
			}
			hash += hashString(t.Tag(i))
			hash += hashString(f.Name()) // (ignore f.Pkg)
			hash += h.Hash(f.Type())
		}
		return hash

	case *types.Pointer:
		return 9067 + 2*h.Hash(t.Elem())

	case *types.Signature:
		var hash uint32 = 9091
		if t.Variadic() {
			hash *= 8863
		}
		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())

	case *types.Interface:
		var hash uint32 = 9103
		for i, n := 0, t.NumMethods(); i < n; i++ {
			// See go/types.identicalMethods for rationale.
			// Method order is not significant.
			// Ignore m.Pkg().
			m := t.Method(i)
			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
		}
		return hash

	case *types.Map:
		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())

	case *types.Chan:
		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())

	case *types.Named:
		// Not safe with a copying GC; objects may move.
		return uint32(reflect.ValueOf(t.Obj()).Pointer())

	case *types.Tuple:
		return h.hashTuple(t)
	}
	panic(t)
}
Esempio n. 8
0
func (fr *frame) convert(v *govalue, dsttyp types.Type) *govalue {
	b := fr.builder

	// If it's a stack allocated value, we'll want to compare the
	// value type, not the pointer type.
	srctyp := v.typ

	// Get the underlying type, if any.
	origdsttyp := dsttyp
	dsttyp = dsttyp.Underlying()
	srctyp = srctyp.Underlying()

	// Identical (underlying) types? Just swap in the destination type.
	if types.Identical(srctyp, dsttyp) {
		return newValue(v.value, origdsttyp)
	}

	// Both pointer types with identical underlying types? Same as above.
	if srctyp, ok := srctyp.(*types.Pointer); ok {
		if dsttyp, ok := dsttyp.(*types.Pointer); ok {
			srctyp := srctyp.Elem().Underlying()
			dsttyp := dsttyp.Elem().Underlying()
			if types.Identical(srctyp, dsttyp) {
				return newValue(v.value, origdsttyp)
			}
		}
	}

	// string ->
	if isString(srctyp) {
		// (untyped) string -> string
		// XXX should untyped strings be able to escape go/types?
		if isString(dsttyp) {
			return newValue(v.value, origdsttyp)
		}

		// string -> []byte
		if isSlice(dsttyp, types.Byte) {
			value := v.value
			strdata := fr.builder.CreateExtractValue(value, 0, "")
			strlen := fr.builder.CreateExtractValue(value, 1, "")

			// Data must be copied, to prevent changes in
			// the byte slice from mutating the string.
			newdata := fr.createMalloc(strlen)
			fr.memcpy(newdata, strdata, strlen)

			struct_ := llvm.Undef(fr.types.ToLLVM(dsttyp))
			struct_ = fr.builder.CreateInsertValue(struct_, newdata, 0, "")
			struct_ = fr.builder.CreateInsertValue(struct_, strlen, 1, "")
			struct_ = fr.builder.CreateInsertValue(struct_, strlen, 2, "")
			return newValue(struct_, origdsttyp)
		}

		// string -> []rune
		if isSlice(dsttyp, types.Rune) {
			return fr.stringToRuneSlice(v)
		}
	}

	// []byte -> string
	if isSlice(srctyp, types.Byte) && isString(dsttyp) {
		value := v.value
		data := fr.builder.CreateExtractValue(value, 0, "")
		len := fr.builder.CreateExtractValue(value, 1, "")

		// Data must be copied, to prevent changes in
		// the byte slice from mutating the string.
		newdata := fr.createMalloc(len)
		fr.memcpy(newdata, data, len)

		struct_ := llvm.Undef(fr.types.ToLLVM(types.Typ[types.String]))
		struct_ = fr.builder.CreateInsertValue(struct_, newdata, 0, "")
		struct_ = fr.builder.CreateInsertValue(struct_, len, 1, "")
		return newValue(struct_, types.Typ[types.String])
	}

	// []rune -> string
	if isSlice(srctyp, types.Rune) && isString(dsttyp) {
		return fr.runeSliceToString(v)
	}

	// rune -> string
	if isString(dsttyp) && isInteger(srctyp) {
		return fr.runeToString(v)
	}

	// Unsafe pointer conversions.
	llvm_type := fr.types.ToLLVM(dsttyp)
	if dsttyp == types.Typ[types.UnsafePointer] { // X -> unsafe.Pointer
		if _, isptr := srctyp.(*types.Pointer); isptr {
			return newValue(v.value, origdsttyp)
		} else if srctyp == types.Typ[types.Uintptr] {
			value := b.CreateIntToPtr(v.value, llvm_type, "")
			return newValue(value, origdsttyp)
		}
	} else if srctyp == types.Typ[types.UnsafePointer] { // unsafe.Pointer -> X
		if _, isptr := dsttyp.(*types.Pointer); isptr {
			return newValue(v.value, origdsttyp)
		} else if dsttyp == types.Typ[types.Uintptr] {
			value := b.CreatePtrToInt(v.value, llvm_type, "")
			return newValue(value, origdsttyp)
		}
	}

	lv := v.value
	srcType := lv.Type()
	switch srcType.TypeKind() {
	case llvm.IntegerTypeKind:
		switch llvm_type.TypeKind() {
		case llvm.IntegerTypeKind:
			srcBits := srcType.IntTypeWidth()
			dstBits := llvm_type.IntTypeWidth()
			delta := srcBits - dstBits
			switch {
			case delta < 0:
				if !isUnsigned(srctyp) {
					lv = b.CreateSExt(lv, llvm_type, "")
				} else {
					lv = b.CreateZExt(lv, llvm_type, "")
				}
			case delta > 0:
				lv = b.CreateTrunc(lv, llvm_type, "")
			}
			return newValue(lv, origdsttyp)
		case llvm.FloatTypeKind, llvm.DoubleTypeKind:
			if !isUnsigned(v.Type()) {
				lv = b.CreateSIToFP(lv, llvm_type, "")
			} else {
				lv = b.CreateUIToFP(lv, llvm_type, "")
			}
			return newValue(lv, origdsttyp)
		}
	case llvm.DoubleTypeKind:
		switch llvm_type.TypeKind() {
		case llvm.FloatTypeKind:
			lv = b.CreateFPTrunc(lv, llvm_type, "")
			return newValue(lv, origdsttyp)
		case llvm.IntegerTypeKind:
			if !isUnsigned(dsttyp) {
				lv = b.CreateFPToSI(lv, llvm_type, "")
			} else {
				lv = b.CreateFPToUI(lv, llvm_type, "")
			}
			return newValue(lv, origdsttyp)
		}
	case llvm.FloatTypeKind:
		switch llvm_type.TypeKind() {
		case llvm.DoubleTypeKind:
			lv = b.CreateFPExt(lv, llvm_type, "")
			return newValue(lv, origdsttyp)
		case llvm.IntegerTypeKind:
			if !isUnsigned(dsttyp) {
				lv = b.CreateFPToSI(lv, llvm_type, "")
			} else {
				lv = b.CreateFPToUI(lv, llvm_type, "")
			}
			return newValue(lv, origdsttyp)
		}
	}

	// Complex -> complex. Complexes are only convertible to other
	// complexes, contant conversions aside. So we can just check the
	// source type here; given that the types are not identical
	// (checked above), we can assume the destination type is the alternate
	// complex type.
	if isComplex(srctyp) {
		var fpcast func(llvm.Builder, llvm.Value, llvm.Type, string) llvm.Value
		var fptype llvm.Type
		if srctyp == types.Typ[types.Complex64] {
			fpcast = (llvm.Builder).CreateFPExt
			fptype = llvm.DoubleType()
		} else {
			fpcast = (llvm.Builder).CreateFPTrunc
			fptype = llvm.FloatType()
		}
		if fpcast != nil {
			realv := b.CreateExtractValue(lv, 0, "")
			imagv := b.CreateExtractValue(lv, 1, "")
			realv = fpcast(b, realv, fptype, "")
			imagv = fpcast(b, imagv, fptype, "")
			lv = llvm.Undef(fr.types.ToLLVM(dsttyp))
			lv = b.CreateInsertValue(lv, realv, 0, "")
			lv = b.CreateInsertValue(lv, imagv, 1, "")
			return newValue(lv, origdsttyp)
		}
	}
	panic(fmt.Sprintf("unimplemented conversion: %s (%s) -> %s", v.typ, lv.Type(), origdsttyp))
}