// extractEffect separates out any effects that the expression may // have, returning a function that will perform those effects and a // new exprCompiler that is guaranteed to be side-effect free. These // are the moral equivalents of "temp := expr" and "temp" (or "temp := // &expr" and "*temp" for addressable exprs). Because this creates a // temporary variable, the caller should create a temporary block for // the compilation of this expression and the evaluation of the // results. func (a *expr) extractEffect(b *block, errOp string) (func(*Thread), *expr) { // Create "&a" if a is addressable rhs := a if a.evalAddr != nil { rhs = a.compileUnaryExpr(token.AND, rhs) } // Create temp ac, ok := a.checkAssign(a.pos, []*expr{rhs}, errOp, "") if !ok { return nil, nil } if len(ac.rmt.Elems) != 1 { a.diag("multi-valued expression not allowed in %s", errOp) return nil, nil } tempType := ac.rmt.Elems[0] if tempType.isIdeal() { // It's too bad we have to duplicate this rule. switch { case tempType.isInteger(): tempType = IntType case tempType.isFloat(): tempType = FloatType default: log.Crashf("unexpected ideal type %v", tempType) } } temp := b.DefineTemp(tempType) tempIdx := temp.Index // Create "temp := rhs" assign := ac.compile(b, tempType) if assign == nil { log.Crashf("compileAssign type check failed") } effect := func(t *Thread) { tempVal := tempType.Zero() t.f.Vars[tempIdx] = tempVal assign(tempVal, t) } // Generate "temp" or "*temp" getTemp := a.compileVariable(0, temp) if a.evalAddr == nil { return effect, getTemp } deref := a.compileStarExpr(getTemp) if deref == nil { return nil, nil } return effect, deref }
func (a *stmtCompiler) compileIncDecStmt(s *ast.IncDecStmt) { // Create temporary block for extractEffect bc := a.enterChild() defer bc.exit() l := a.compileExpr(bc.block, false, s.X) if l == nil { return } if l.evalAddr == nil { l.diag("cannot assign to %s", l.desc) return } if !(l.t.isInteger() || l.t.isFloat()) { l.diagOpType(s.Tok, l.t) return } var op token.Token var desc string switch s.Tok { case token.INC: op = token.ADD desc = "increment statement" case token.DEC: op = token.SUB desc = "decrement statement" default: log.Crashf("Unexpected IncDec token %v", s.Tok) } effect, l := l.extractEffect(bc.block, desc) one := l.newExpr(IdealIntType, "constant") one.pos = s.Pos() one.eval = func() *bignum.Integer { return bignum.Int(1) } binop := l.compileBinaryExpr(op, l, one) if binop == nil { return } assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "", "") if assign == nil { log.Crashf("compileAssign type check failed") } lf := l.evalAddr a.push(func(v *Thread) { effect(v) assign(lf(v), v) }) }
// TODO(austin) This is a hack to eliminate a circular dependency // between type.go and expr.go func (a *compiler) compileArrayLen(b *block, expr ast.Expr) (int64, bool) { lenExpr := a.compileExpr(b, true, expr) if lenExpr == nil { return 0, false } // XXX(Spec) Are ideal floats with no fractional part okay? if lenExpr.t.isIdeal() { lenExpr = lenExpr.convertTo(IntType) if lenExpr == nil { return 0, false } } if !lenExpr.t.isInteger() { a.diagAt(expr, "array size must be an integer") return 0, false } switch lenExpr.t.lit().(type) { case *intType: return lenExpr.asInt()(nil), true case *uintType: return int64(lenExpr.asUint()(nil)), true } log.Crashf("unexpected integer type %T", lenExpr.t) return 0, false }
func (a *expr) genConstant(v Value) { switch a.t.lit().(type) { case *boolType: a.eval = func(t *Thread) bool { return v.(BoolValue).Get(t) } case *uintType: a.eval = func(t *Thread) uint64 { return v.(UintValue).Get(t) } case *intType: a.eval = func(t *Thread) int64 { return v.(IntValue).Get(t) } case *idealIntType: val := v.(IdealIntValue).Get() a.eval = func() *bignum.Integer { return val } case *floatType: a.eval = func(t *Thread) float64 { return v.(FloatValue).Get(t) } case *idealFloatType: val := v.(IdealFloatValue).Get() a.eval = func() *bignum.Rational { return val } case *stringType: a.eval = func(t *Thread) string { return v.(StringValue).Get(t) } case *ArrayType: a.eval = func(t *Thread) ArrayValue { return v.(ArrayValue).Get(t) } case *StructType: a.eval = func(t *Thread) StructValue { return v.(StructValue).Get(t) } case *PtrType: a.eval = func(t *Thread) Value { return v.(PtrValue).Get(t) } case *FuncType: a.eval = func(t *Thread) Func { return v.(FuncValue).Get(t) } case *SliceType: a.eval = func(t *Thread) Slice { return v.(SliceValue).Get(t) } case *MapType: a.eval = func(t *Thread) Map { return v.(MapValue).Get(t) } default: log.Crashf("unexpected constant type %v at %v", a.t, a.pos) } }
func (a *expr) genUnaryOpNeg(v *expr) { switch a.t.lit().(type) { case *uintType: vf := v.asUint() a.eval = func(t *Thread) uint64 { v := vf(t) return -v } case *intType: vf := v.asInt() a.eval = func(t *Thread) int64 { v := vf(t) return -v } case *idealIntType: v := v.asIdealInt()() val := v.Neg() a.eval = func() *bignum.Integer { return val } case *floatType: vf := v.asFloat() a.eval = func(t *Thread) float64 { v := vf(t) return -v } case *idealFloatType: v := v.asIdealFloat()() val := v.Neg() a.eval = func() *bignum.Rational { return val } default: log.Crashf("unexpected type %v at %v", a.t, a.pos) } }
func (a *expr) genIdentOp(level, index int) { a.evalAddr = func(t *Thread) Value { return t.f.Get(level, index) } switch a.t.lit().(type) { case *boolType: a.eval = func(t *Thread) bool { return t.f.Get(level, index).(BoolValue).Get(t) } case *uintType: a.eval = func(t *Thread) uint64 { return t.f.Get(level, index).(UintValue).Get(t) } case *intType: a.eval = func(t *Thread) int64 { return t.f.Get(level, index).(IntValue).Get(t) } case *floatType: a.eval = func(t *Thread) float64 { return t.f.Get(level, index).(FloatValue).Get(t) } case *stringType: a.eval = func(t *Thread) string { return t.f.Get(level, index).(StringValue).Get(t) } case *ArrayType: a.eval = func(t *Thread) ArrayValue { return t.f.Get(level, index).(ArrayValue).Get(t) } case *StructType: a.eval = func(t *Thread) StructValue { return t.f.Get(level, index).(StructValue).Get(t) } case *PtrType: a.eval = func(t *Thread) Value { return t.f.Get(level, index).(PtrValue).Get(t) } case *FuncType: a.eval = func(t *Thread) Func { return t.f.Get(level, index).(FuncValue).Get(t) } case *SliceType: a.eval = func(t *Thread) Slice { return t.f.Get(level, index).(SliceValue).Get(t) } case *MapType: a.eval = func(t *Thread) Map { return t.f.Get(level, index).(MapValue).Get(t) } default: log.Crashf("unexpected identifier type %v at %v", a.t, a.pos) } }
// processEvent processes a single event, without manipulating the // event queues. It returns either EAStop or EAContinue and possibly // an error. func (p *Process) processEvent(ev Event) (EventAction, os.Error) { p.event = ev var action EventAction var err os.Error switch ev := p.event.(type) { case *Breakpoint: hook, ok := p.breakpointHooks[ev.pc] if !ok { break } p.curGoroutine = ev.Goroutine() action, err = hook.handle(ev) case *GoroutineCreate: p.curGoroutine = ev.Goroutine() action, err = p.goroutineCreateHook.handle(ev) case *GoroutineExit: action, err = p.goroutineExitHook.handle(ev) default: log.Crashf("Unknown event type %T in queue", p.event) } if err != nil { return EAStop, err } else if action == EAStop { return EAStop, nil } return EAContinue, nil }
func (a *typeCompiler) compileIdent(x *ast.Ident, allowRec bool) Type { _, _, def := a.block.Lookup(x.Value) if def == nil { a.diagAt(x, "%s: undefined", x.Value) return nil } switch def := def.(type) { case *Constant: a.diagAt(x, "constant %v used as type", x.Value) return nil case *Variable: a.diagAt(x, "variable %v used as type", x.Value) return nil case *NamedType: if !allowRec && def.incomplete { a.diagAt(x, "illegal recursive type") return nil } if !def.incomplete && def.Def == nil { // Placeholder type from an earlier error return nil } return def case Type: return def } log.Crashf("name %s has unknown type %T", x.Value, def) return nil }
// put creates a flow control point for the next PC in the code buffer. // This should be done before pushing the instruction into the code buffer. func (f *flowBuf) put(cond bool, term bool, jumps []*uint) { pc := f.cb.nextPC() if ent, ok := f.ents[pc]; ok { log.Crashf("Flow entry already exists at PC %d: %+v", pc, ent) } f.ents[pc] = &flowEnt{cond, term, jumps, false} }
func (a *expr) genValue(vf func(*Thread) Value) { a.evalAddr = vf switch a.t.lit().(type) { case *boolType: a.eval = func(t *Thread) bool { return vf(t).(BoolValue).Get(t) } case *uintType: a.eval = func(t *Thread) uint64 { return vf(t).(UintValue).Get(t) } case *intType: a.eval = func(t *Thread) int64 { return vf(t).(IntValue).Get(t) } case *floatType: a.eval = func(t *Thread) float64 { return vf(t).(FloatValue).Get(t) } case *stringType: a.eval = func(t *Thread) string { return vf(t).(StringValue).Get(t) } case *ArrayType: a.eval = func(t *Thread) ArrayValue { return vf(t).(ArrayValue).Get(t) } case *StructType: a.eval = func(t *Thread) StructValue { return vf(t).(StructValue).Get(t) } case *PtrType: a.eval = func(t *Thread) Value { return vf(t).(PtrValue).Get(t) } case *FuncType: a.eval = func(t *Thread) Func { return vf(t).(FuncValue).Get(t) } case *SliceType: a.eval = func(t *Thread) Slice { return vf(t).(SliceValue).Get(t) } case *MapType: a.eval = func(t *Thread) Map { return vf(t).(MapValue).Get(t) } default: log.Crashf("unexpected result type %v at %v", a.t, a.pos) } }
func (a *expr) asInterface() func(*Thread) interface{} { switch sf := a.eval.(type) { case func(t *Thread) bool: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) uint64: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) int64: return func(t *Thread) interface{} { return sf(t) } case func() *bignum.Integer: return func(*Thread) interface{} { return sf() } case func(t *Thread) float64: return func(t *Thread) interface{} { return sf(t) } case func() *bignum.Rational: return func(*Thread) interface{} { return sf() } case func(t *Thread) string: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) ArrayValue: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) StructValue: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) Value: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) Func: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) Slice: return func(t *Thread) interface{} { return sf(t) } case func(t *Thread) Map: return func(t *Thread) interface{} { return sf(t) } default: log.Crashf("unexpected expression node type %T at %v", a.eval, a.pos) } panic() }
func (a *stmtCompiler) compileDecl(decl ast.Decl) { switch d := decl.(type) { case *ast.BadDecl: // Do nothing. Already reported by parser. a.silentErrors++ case *ast.FuncDecl: decl := a.compileFuncType(a.block, d.Type) if decl == nil { return } // Declare and initialize v before compiling func // so that body can refer to itself. c, prev := a.block.DefineConst(d.Name.Name(), a.pos, decl.Type, decl.Type.Zero()) if prev != nil { pos := prev.Pos() if pos.IsValid() { a.diagAt(d.Name, "identifier %s redeclared in this block\n\tprevious declaration at %s", d.Name.Name(), &pos) } else { a.diagAt(d.Name, "identifier %s redeclared in this block", d.Name.Name()) } } fn := a.compileFunc(a.block, decl, d.Body) if c == nil || fn == nil { return } var zeroThread Thread c.Value.(FuncValue).Set(nil, fn(&zeroThread)) case *ast.GenDecl: switch d.Tok { case token.IMPORT: log.Crashf("%v not implemented", d.Tok) case token.CONST: log.Crashf("%v not implemented", d.Tok) case token.TYPE: a.compileTypeDecl(a.block, d) case token.VAR: a.compileVarDecl(d) } default: log.Crashf("Unexpected Decl type %T", decl) } }
func (a *expr) genUnaryOpNot(v *expr) { switch a.t.lit().(type) { case *boolType: vf := v.asBool() a.eval = func(t *Thread) bool { v := vf(t); return !v } default: log.Crashf("unexpected type %v at %v", a.t, a.pos) } }
func (a *compiler) compileExpr(b *block, constant bool, expr ast.Expr) *expr { ec := &exprCompiler{a, b, constant} nerr := a.numError() e := ec.compile(expr, false) if e == nil && nerr == a.numError() { log.Crashf("expression compilation failed without reporting errors") } return e }
func (a *exprInfo) compileFloatLit(lit string) *expr { f, _, n := bignum.RatFromString(lit, 0) if n != len(lit) { log.Crashf("malformed float literal %s at %v passed parser", lit, a.pos) } expr := a.newExpr(IdealFloatType, "float literal") expr.eval = func() *bignum.Rational { return f } return expr }
func (t *NamedType) Complete(def Type) { if !t.incomplete { log.Crashf("cannot complete already completed NamedType %+v", *t) } // We strip the name from def because multiple levels of // naming are useless. if ndef, ok := def.(*NamedType); ok { def = ndef.Def } t.Def = def t.incomplete = false }
// derefArray returns an expression of array type if the given // expression is a *array type. Otherwise, returns the given // expression. func (a *expr) derefArray() *expr { if pt, ok := a.t.lit().(*PtrType); ok { if _, ok := pt.Elem.lit().(*ArrayType); ok { deref := a.compileStarExpr(a) if deref == nil { log.Crashf("failed to dereference *array") } return deref } } return a }
func (t *floatType) maxVal() *bignum.Rational { bits := t.Bits if bits == 0 { bits = uint(8 * unsafe.Sizeof(float(0))) } switch bits { case 32: return maxFloat32Val case 64: return maxFloat64Val } log.Crashf("unexpected floating point bit count: %d", bits) panic("unreachable") }
// goFiles returns a list of the *.go source files in dir, // excluding those in package main or ending in _test.go. // It also returns a map giving the packages imported // by those files. The map keys are the imported paths. // The key's value is one file that imports that path. func goFiles(dir string) (files []string, imports map[string]string, err os.Error) { f, err := os.Open(dir, os.O_RDONLY, 0) if err != nil { return nil, nil, err } dirs, err := f.Readdir(-1) f.Close() if err != nil { return nil, nil, err } files = make([]string, 0, len(dirs)) imports = make(map[string]string) pkgName := "" for i := range dirs { d := &dirs[i] if !strings.HasSuffix(d.Name, ".go") || strings.HasSuffix(d.Name, "_test.go") { continue } filename := path.Join(dir, d.Name) pf, err := parser.ParseFile(filename, nil, nil, parser.ImportsOnly) if err != nil { return nil, nil, err } s := string(pf.Name.Name()) if s == "main" { continue } if pkgName == "" { pkgName = s } else if pkgName != s { return nil, nil, os.ErrorString("multiple package names in " + dir) } n := len(files) files = files[0 : n+1] files[n] = filename for _, decl := range pf.Decls { for _, spec := range decl.(*ast.GenDecl).Specs { quoted := string(spec.(*ast.ImportSpec).Path.Value) unquoted, err := strconv.Unquote(quoted) if err != nil { log.Crashf("%s: parser returned invalid quoted string: <%s>", filename, quoted) } imports[unquoted] = filename } } } return files, imports, nil }
func (a *expr) genUnaryOpXor(v *expr) { switch a.t.lit().(type) { case *uintType: vf := v.asUint() a.eval = func(t *Thread) uint64 { v := vf(t); return ^v } case *intType: vf := v.asInt() a.eval = func(t *Thread) int64 { v := vf(t); return ^v } case *idealIntType: v := v.asIdealInt()() val := v.Neg().Sub(bignum.Int(1)) a.eval = func() *bignum.Integer { return val } default: log.Crashf("unexpected type %v at %v", a.t, a.pos) } }
func toValue(val interface{}) Value { switch val := val.(type) { case bool: r := boolV(val) return &r case uint8: r := uint8V(val) return &r case uint: r := uintV(val) return &r case int: r := intV(val) return &r case *bignum.Integer: return &idealIntV{val} case float: r := floatV(val) return &r case *bignum.Rational: return &idealFloatV{val} case string: r := stringV(val) return &r case vstruct: elems := make([]Value, len(val)) for i, e := range val { elems[i] = toValue(e) } r := structV(elems) return &r case varray: elems := make([]Value, len(val)) for i, e := range val { elems[i] = toValue(e) } r := arrayV(elems) return &r case vslice: return &sliceV{Slice{toValue(val.arr).(ArrayValue), int64(val.len), int64(val.cap)}} case Func: return &funcV{val} } log.Crashf("toValue(%T) not implemented", val) panic("unreachable") }
func (a *expr) genBinOpGeq(l, r *expr) { switch t := l.t.lit().(type) { case *uintType: lf := l.asUint() rf := r.asUint() a.eval = func(t *Thread) bool { l, r := lf(t), rf(t) return l >= r } case *intType: lf := l.asInt() rf := r.asInt() a.eval = func(t *Thread) bool { l, r := lf(t), rf(t) return l >= r } case *idealIntType: l := l.asIdealInt()() r := r.asIdealInt()() val := l.Cmp(r) >= 0 a.eval = func(t *Thread) bool { return val } case *floatType: lf := l.asFloat() rf := r.asFloat() a.eval = func(t *Thread) bool { l, r := lf(t), rf(t) return l >= r } case *idealFloatType: l := l.asIdealFloat()() r := r.asIdealFloat()() val := l.Cmp(r) >= 0 a.eval = func(t *Thread) bool { return val } case *stringType: lf := l.asString() rf := r.asString() a.eval = func(t *Thread) bool { l, r := lf(t), rf(t) return l >= r } default: log.Crashf("unexpected type %v at %v", l.t, a.pos) } }
func (a *exprInfo) compileIdent(b *block, constant bool, callCtx bool, name string) *expr { bl, level, def := b.Lookup(name) if def == nil { a.diag("%s: undefined", name) return nil } switch def := def.(type) { case *Constant: expr := a.newExpr(def.Type, "constant") if ft, ok := def.Type.(*FuncType); ok && ft.builtin != "" { // XXX(Spec) I don't think anything says that // built-in functions can't be used as values. if !callCtx { a.diag("built-in function %s cannot be used as a value", ft.builtin) return nil } // Otherwise, we leave the evaluators empty // because this is handled specially } else { expr.genConstant(def.Value) } return expr case *Variable: if constant { a.diag("variable %s used in constant expression", name) return nil } if bl.global { return a.compileGlobalVariable(def) } return a.compileVariable(level, def) case Type: if callCtx { return a.exprFromType(def) } a.diag("type %v used as expression", name) return nil } log.Crashf("name %s has unknown type %T", name, def) panic() }
func (a *stmtCompiler) compileDeclStmt(s *ast.DeclStmt) { switch decl := s.Decl.(type) { case *ast.BadDecl: // Do nothing. Already reported by parser. a.silentErrors++ case *ast.FuncDecl: if !a.block.global { log.Crash("FuncDecl at statement level") } case *ast.GenDecl: if decl.Tok == token.IMPORT && !a.block.global { log.Crash("import at statement level") } default: log.Crashf("Unexpected Decl type %T", s.Decl) } a.compileDecl(s.Decl) }
func genAssign(lt Type, r *expr) func(lv Value, t *Thread) { switch lt.lit().(type) { case *boolType: rf := r.asBool() return func(lv Value, t *Thread) { lv.(BoolValue).Set(t, rf(t)) } case *uintType: rf := r.asUint() return func(lv Value, t *Thread) { lv.(UintValue).Set(t, rf(t)) } case *intType: rf := r.asInt() return func(lv Value, t *Thread) { lv.(IntValue).Set(t, rf(t)) } case *floatType: rf := r.asFloat() return func(lv Value, t *Thread) { lv.(FloatValue).Set(t, rf(t)) } case *stringType: rf := r.asString() return func(lv Value, t *Thread) { lv.(StringValue).Set(t, rf(t)) } case *ArrayType: rf := r.asArray() return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) } case *StructType: rf := r.asStruct() return func(lv Value, t *Thread) { lv.Assign(t, rf(t)) } case *PtrType: rf := r.asPtr() return func(lv Value, t *Thread) { lv.(PtrValue).Set(t, rf(t)) } case *FuncType: rf := r.asFunc() return func(lv Value, t *Thread) { lv.(FuncValue).Set(t, rf(t)) } case *SliceType: rf := r.asSlice() return func(lv Value, t *Thread) { lv.(SliceValue).Set(t, rf(t)) } case *MapType: rf := r.asMap() return func(lv Value, t *Thread) { lv.(MapValue).Set(t, rf(t)) } default: log.Crashf("unexpected left operand type %v at %v", lt, r.pos) } panic() }
func (a *stmtCompiler) doAssignOp(s *ast.AssignStmt) { if len(s.Lhs) != 1 || len(s.Rhs) != 1 { a.diag("tuple assignment cannot be combined with an arithmetic operation") return } // Create temporary block for extractEffect bc := a.enterChild() defer bc.exit() l := a.compileExpr(bc.block, false, s.Lhs[0]) r := a.compileExpr(bc.block, false, s.Rhs[0]) if l == nil || r == nil { return } if l.evalAddr == nil { l.diag("cannot assign to %s", l.desc) return } effect, l := l.extractEffect(bc.block, "operator-assignment") binop := r.compileBinaryExpr(assignOpToOp[s.Tok], l, r) if binop == nil { return } assign := a.compileAssign(s.Pos(), bc.block, l.t, []*expr{binop}, "assignment", "value") if assign == nil { log.Crashf("compileAssign type check failed") } lf := l.evalAddr a.push(func(t *Thread) { effect(t) assign(lf(t), t) }) }
// reachesEnd returns true if the end of f's code buffer can be // reached from the given program counter. Error reporting is the // caller's responsibility. func (f *flowBuf) reachesEnd(pc uint) bool { endPC := f.cb.nextPC() if pc > endPC { log.Crashf("Reached bad PC %d past end PC %d", pc, endPC) } for ; pc < endPC; pc++ { ent, ok := f.ents[pc] if !ok { continue } if ent.visited { return false } ent.visited = true if ent.term { return false } // If anything can reach the end, we can reach the end // from pc. for _, j := range ent.jumps { if f.reachesEnd(*j) { return true } } // If the jump was conditional, we can reach the next // PC, so try reaching the end from it. if ent.cond { continue } return false } return true }
// newManualType constructs a remote type from an interpreter Type // using the size and alignment properties of the given architecture. // Most types are parsed directly out of the remote process, but to do // so we need to layout the structures that describe those types ourselves. func newManualType(t eval.Type, arch Arch) *remoteType { if nt, ok := t.(*eval.NamedType); ok { t = nt.Def } // Get the type map for this architecture typeMap, _ := manualTypes[arch] if typeMap == nil { typeMap = make(map[eval.Type]*remoteType) manualTypes[arch] = typeMap // Construct basic types for this architecture basicType := func(t eval.Type, mk maker, size int, fieldAlign int) { t = t.(*eval.NamedType).Def if fieldAlign == 0 { fieldAlign = size } typeMap[t] = &remoteType{t, size, fieldAlign, mk} } basicType(eval.Uint8Type, mkUint8, 1, 0) basicType(eval.Uint32Type, mkUint32, 4, 0) basicType(eval.UintptrType, mkUintptr, arch.PtrSize(), 0) basicType(eval.Int16Type, mkInt16, 2, 0) basicType(eval.Int32Type, mkInt32, 4, 0) basicType(eval.IntType, mkInt, arch.IntSize(), 0) basicType(eval.StringType, mkString, arch.PtrSize()+arch.IntSize(), arch.PtrSize()) } if rt, ok := typeMap[t]; ok { return rt } var rt *remoteType switch t := t.(type) { case *eval.PtrType: var elem *remoteType mk := func(r remote) eval.Value { return remotePtr{r, elem} } rt = &remoteType{t, arch.PtrSize(), arch.PtrSize(), mk} // Construct the element type after registering the // type to break cycles. typeMap[eval.Type(t)] = rt elem = newManualType(t.Elem, arch) case *eval.ArrayType: elem := newManualType(t.Elem, arch) mk := func(r remote) eval.Value { return remoteArray{r, t.Len, elem} } rt = &remoteType{t, elem.size * int(t.Len), elem.fieldAlign, mk} case *eval.SliceType: elem := newManualType(t.Elem, arch) mk := func(r remote) eval.Value { return remoteSlice{r, elem} } rt = &remoteType{t, arch.PtrSize() + 2*arch.IntSize(), arch.PtrSize(), mk} case *eval.StructType: layout := make([]remoteStructField, len(t.Elems)) offset := 0 fieldAlign := 0 for i, f := range t.Elems { elem := newManualType(f.Type, arch) if fieldAlign == 0 { fieldAlign = elem.fieldAlign } offset = arch.Align(offset, elem.fieldAlign) layout[i].offset = offset layout[i].fieldType = elem offset += elem.size } mk := func(r remote) eval.Value { return remoteStruct{r, layout} } rt = &remoteType{t, offset, fieldAlign, mk} default: log.Crashf("cannot manually construct type %T", t) } typeMap[t] = rt return rt }
func (a *expr) genBinOpRem(l, r *expr) { switch t := l.t.lit().(type) { case *uintType: lf := l.asUint() rf := r.asUint() switch t.Bits { case 8: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return uint64(uint8(ret)) } case 16: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return uint64(uint16(ret)) } case 32: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return uint64(uint32(ret)) } case 64: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return uint64(uint64(ret)) } case 0: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return uint64(uint(ret)) } default: log.Crashf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) } case *intType: lf := l.asInt() rf := r.asInt() switch t.Bits { case 8: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return int64(int8(ret)) } case 16: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return int64(int16(ret)) } case 32: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return int64(int32(ret)) } case 64: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return int64(int64(ret)) } case 0: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 if r == 0 { t.Abort(DivByZeroError{}) } ret = l % r return int64(int(ret)) } default: log.Crashf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) } case *idealIntType: l := l.asIdealInt()() r := r.asIdealInt()() val := l.Rem(r) a.eval = func() *bignum.Integer { return val } default: log.Crashf("unexpected type %v at %v", l.t, a.pos) } }
func (a *expr) genBinOpMul(l, r *expr) { switch t := l.t.lit().(type) { case *uintType: lf := l.asUint() rf := r.asUint() switch t.Bits { case 8: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 ret = l * r return uint64(uint8(ret)) } case 16: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 ret = l * r return uint64(uint16(ret)) } case 32: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 ret = l * r return uint64(uint32(ret)) } case 64: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 ret = l * r return uint64(uint64(ret)) } case 0: a.eval = func(t *Thread) uint64 { l, r := lf(t), rf(t) var ret uint64 ret = l * r return uint64(uint(ret)) } default: log.Crashf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) } case *intType: lf := l.asInt() rf := r.asInt() switch t.Bits { case 8: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 ret = l * r return int64(int8(ret)) } case 16: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 ret = l * r return int64(int16(ret)) } case 32: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 ret = l * r return int64(int32(ret)) } case 64: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 ret = l * r return int64(int64(ret)) } case 0: a.eval = func(t *Thread) int64 { l, r := lf(t), rf(t) var ret int64 ret = l * r return int64(int(ret)) } default: log.Crashf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) } case *idealIntType: l := l.asIdealInt()() r := r.asIdealInt()() val := l.Mul(r) a.eval = func() *bignum.Integer { return val } case *floatType: lf := l.asFloat() rf := r.asFloat() switch t.Bits { case 32: a.eval = func(t *Thread) float64 { l, r := lf(t), rf(t) var ret float64 ret = l * r return float64(float32(ret)) } case 64: a.eval = func(t *Thread) float64 { l, r := lf(t), rf(t) var ret float64 ret = l * r return float64(float64(ret)) } case 0: a.eval = func(t *Thread) float64 { l, r := lf(t), rf(t) var ret float64 ret = l * r return float64(float(ret)) } default: log.Crashf("unexpected size %d in type %v at %v", t.Bits, t, a.pos) } case *idealFloatType: l := l.asIdealFloat()() r := r.asIdealFloat()() val := l.Mul(r) a.eval = func() *bignum.Rational { return val } default: log.Crashf("unexpected type %v at %v", l.t, a.pos) } }