func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { if p.As == ALEAL || p.As == ALEAQ { return } if a.Reg == REG_BP { ctxt.Diag("invalid address: %v", p) return } if a.Reg == REG_TLS { a.Reg = REG_BP } if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE { switch a.Reg { // all ok case REG_BP, REG_SP, REG_R15: break default: if a.Index != REG_NONE { ctxt.Diag("invalid address %v", p) } a.Index = a.Reg if a.Index != REG_NONE { a.Scale = 1 } a.Reg = REG_R15 } } }
func datagostring(sval string, a *obj.Addr) { sym := stringsym(sval) a.Type = obj.TYPE_MEM a.Name = obj.NAME_EXTERN a.Sym = Linksym(sym) a.Node = sym.Def a.Offset = 0 // header a.Etype = TSTRING }
func Datastring(s string, a *obj.Addr) { sym := stringsym(s) a.Type = obj.TYPE_MEM a.Name = obj.NAME_EXTERN a.Sym = Linksym(sym) a.Node = sym.Def a.Offset = int64(Widthptr) + int64(Widthint) // skip header a.Etype = Simtype[TINT] }
/* * substitute s for v in a * return failure to substitute */ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int { if copyas(a, v) { reg := int(int(s.Reg)) if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 { if f != 0 { a.Reg = int16(reg) } } return 0 } if regtyp(v) { reg := int(int(v.Reg)) if a.Type == obj.TYPE_MEM && int(a.Reg) == reg { if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE { return 1 /* can't use BP-base with index */ } if f != 0 { a.Reg = s.Reg } } // return 0; if int(a.Index) == reg { if f != 0 { a.Index = s.Reg } return 0 } return 0 } return 0 }
func addreg(a *obj.Addr, rn int) { a.Sym = nil a.Node = nil a.Offset = 0 a.Type = obj.TYPE_REG a.Reg = int16(rn) a.Name = 0 Ostats.Ncvtreg++ }
func mkvar(f *Flow, a *obj.Addr) Bits { /* * mark registers used */ if a.Type == obj.TYPE_NONE { return zbits } r := f.Data.(*Reg) r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB var n int switch a.Type { default: regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB if regu == 0 { return zbits } bit := zbits bit.b[0] = regu return bit // TODO(rsc): Remove special case here. case obj.TYPE_ADDR: var bit Bits if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' { goto memcase } a.Type = obj.TYPE_MEM bit = mkvar(f, a) setaddrs(bit) a.Type = obj.TYPE_ADDR Ostats.Naddr++ return zbits memcase: fallthrough case obj.TYPE_MEM: if r != nil { r.use1.b[0] |= Thearch.RtoB(int(a.Reg)) } /* NOTE: 5g did if(r->f.prog->scond & (C_PBIT|C_WBIT)) r->set.b[0] |= RtoB(a->reg); */ switch a.Name { default: // Note: This case handles NAME_EXTERN and NAME_STATIC. // We treat these as requiring eager writes to memory, due to // the possibility of a fault handler looking at them, so there is // not much point in registerizing the loads. // If we later choose the set of candidate variables from a // larger list, these cases could be deprioritized instead of // removed entirely. return zbits case obj.NAME_PARAM, obj.NAME_AUTO: n = int(a.Name) } } node, _ := a.Node.(*Node) if node == nil || node.Op != ONAME || node.Orig == nil { return zbits } node = node.Orig if node.Orig != node { Fatal("%v: bad node", Ctxt.Dconv(a)) } if node.Sym == nil || node.Sym.Name[0] == '.' { return zbits } et := int(a.Etype) o := a.Offset w := a.Width if w < 0 { Fatal("bad width %d for %v", w, Ctxt.Dconv(a)) } flag := 0 var v *Var for i := 0; i < nvar; i++ { v = &vars[i] if v.node == node && int(v.name) == n { if v.offset == o { if int(v.etype) == et { if int64(v.width) == w { // TODO(rsc): Remove special case for arm here. if flag == 0 || Thearch.Thechar != '5' { return blsh(uint(i)) } } } } // if they overlap, disable both if overlap_reg(v.offset, v.width, o, int(w)) { // print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et); v.addr = 1 flag = 1 } } } switch et { case 0, TFUNC: return zbits } if nvar >= NVAR { if Debug['w'] > 1 && node != nil { Fatal("variable not optimized: %v", Nconv(node, obj.FmtSharp)) } if Debug['v'] > 0 { Warn("variable not optimized: %v", Nconv(node, obj.FmtSharp)) } // If we're not tracking a word in a variable, mark the rest as // having its address taken, so that we keep the whole thing // live at all calls. otherwise we might optimize away part of // a variable but not all of it. var v *Var for i := 0; i < nvar; i++ { v = &vars[i] if v.node == node { v.addr = 1 } } return zbits } i := nvar nvar++ v = &vars[i] v.id = i v.offset = o v.name = int8(n) v.etype = int8(et) v.width = int(w) v.addr = int8(flag) // funny punning v.node = node // node->opt is the head of a linked list // of Vars within the given Node, so that // we can start at a Var and find all the other // Vars in the same Go variable. v.nextinnode, _ = node.Opt.(*Var) node.Opt = v bit := blsh(uint(i)) if n == obj.NAME_EXTERN || n == obj.NAME_STATIC { for z := 0; z < BITS; z++ { externs.b[z] |= bit.b[z] } } if n == obj.NAME_PARAM { for z := 0; z < BITS; z++ { params.b[z] |= bit.b[z] } } if node.Class == PPARAM { for z := 0; z < BITS; z++ { ivar.b[z] |= bit.b[z] } } if node.Class == PPARAMOUT { for z := 0; z < BITS; z++ { ovar.b[z] |= bit.b[z] } } // Treat values with their address taken as live at calls, // because the garbage collector's liveness analysis in ../gc/plive.c does. // These must be consistent or else we will elide stores and the garbage // collector will see uninitialized data. // The typical case where our own analysis is out of sync is when the // node appears to have its address taken but that code doesn't actually // get generated and therefore doesn't show up as an address being // taken when we analyze the instruction stream. // One instance of this case is when a closure uses the same name as // an outer variable for one of its own variables declared with :=. // The parser flags the outer variable as possibly shared, and therefore // sets addrtaken, even though it ends up not being actually shared. // If we were better about _ elision, _ = &x would suffice too. // The broader := in a closure problem is mentioned in a comment in // closure.c:/^typecheckclosure and dcl.c:/^oldname. if node.Addrtaken { v.addr = 1 } // Disable registerization for globals, because: // (1) we might panic at any time and we want the recovery code // to see the latest values (issue 1304). // (2) we don't know what pointers might point at them and we want // loads via those pointers to see updated values and vice versa (issue 7995). // // Disable registerization for results if using defer, because the deferred func // might recover and return, causing the current values to be used. if node.Class == PEXTERN || (Hasdefer != 0 && node.Class == PPARAMOUT) { v.addr = 1 } if Debug['R'] != 0 { fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr) } Ostats.Nvar++ return bit }
// Naddr rewrites a to refer to n. // It assumes that a is zeroed on entry. func Naddr(a *obj.Addr, n *Node) { if n == nil { return } if n.Type != nil && n.Type.Etype != TIDEAL { // TODO(rsc): This is undone by the selective clearing of width below, // to match architectures that were not as aggressive in setting width // during naddr. Those widths must be cleared to avoid triggering // failures in gins when it detects real but heretofore latent (and one // hopes innocuous) type mismatches. // The type mismatches should be fixed and the clearing below removed. dowidth(n.Type) a.Width = n.Type.Width } switch n.Op { default: a := a // copy to let escape into Ctxt.Dconv Debug['h'] = 1 Dump("naddr", n) Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a)) case OREGISTER: a.Type = obj.TYPE_REG a.Reg = n.Reg a.Sym = nil if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width. a.Width = 0 } case OINDREG: a.Type = obj.TYPE_MEM a.Reg = n.Reg a.Sym = Linksym(n.Sym) a.Offset = n.Xoffset if a.Offset != int64(int32(a.Offset)) { Yyerror("offset %d too large for OINDREG", a.Offset) } if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width. a.Width = 0 } // n->left is PHEAP ONAME for stack parameter. // compute address of actual parameter on stack. case OPARAM: a.Etype = Simtype[n.Left.Type.Etype] a.Width = n.Left.Type.Width a.Offset = n.Xoffset a.Sym = Linksym(n.Left.Sym) a.Type = obj.TYPE_MEM a.Name = obj.NAME_PARAM a.Node = n.Left.Orig case OCLOSUREVAR: if !Curfn.Func.Needctxt { Fatal("closurevar without needctxt") } a.Type = obj.TYPE_MEM a.Reg = int16(Thearch.REGCTXT) a.Sym = nil a.Offset = n.Xoffset case OCFUNC: Naddr(a, n.Left) a.Sym = Linksym(n.Left.Sym) case ONAME: a.Etype = 0 if n.Type != nil { a.Etype = Simtype[n.Type.Etype] } a.Offset = n.Xoffset s := n.Sym a.Node = n.Orig //if(a->node >= (Node*)&n) // fatal("stack node"); if s == nil { s = Lookup(".noname") } if n.Method { if n.Type != nil { if n.Type.Sym != nil { if n.Type.Sym.Pkg != nil { s = Pkglookup(s.Name, n.Type.Sym.Pkg) } } } } a.Type = obj.TYPE_MEM switch n.Class { default: Fatal("naddr: ONAME class %v %d\n", n.Sym, n.Class) case PEXTERN: a.Name = obj.NAME_EXTERN case PAUTO: a.Name = obj.NAME_AUTO case PPARAM, PPARAMOUT: a.Name = obj.NAME_PARAM case PFUNC: a.Name = obj.NAME_EXTERN a.Type = obj.TYPE_ADDR a.Width = int64(Widthptr) s = funcsym(s) } a.Sym = Linksym(s) case OLITERAL: if Thearch.Thechar == '8' { a.Width = 0 } switch n.Val.Ctype { default: Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong)) case CTFLT: a.Type = obj.TYPE_FCONST a.Val = mpgetflt(n.Val.U.Fval) case CTINT, CTRUNE: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = Mpgetfix(n.Val.U.Xval) case CTSTR: datagostring(n.Val.U.Sval, a) case CTBOOL: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = int64(obj.Bool2int(n.Val.U.Bval)) case CTNIL: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = 0 } case OADDR: Naddr(a, n.Left) a.Etype = uint8(Tptr) if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64. a.Width = int64(Widthptr) } if a.Type != obj.TYPE_MEM { a := a // copy to let escape into Ctxt.Dconv Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0)) } a.Type = obj.TYPE_ADDR // itable of interface value case OITAB: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // itab(nil) } a.Etype = uint8(Tptr) a.Width = int64(Widthptr) // pointer in a string or slice case OSPTR: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // ptr(nil) } a.Etype = Simtype[Tptr] a.Offset += int64(Array_array) a.Width = int64(Widthptr) // len of string or slice case OLEN: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // len(nil) } a.Etype = Simtype[TUINT] a.Offset += int64(Array_nel) if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } // cap of string or slice case OCAP: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // cap(nil) } a.Etype = Simtype[TUINT] a.Offset += int64(Array_cap) if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } } return }
func Afunclit(a *obj.Addr, n *Node) { if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN { a.Type = obj.TYPE_MEM a.Sym = Linksym(n.Sym) } }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := gc.Mpgetfix(n.Val.U.Xval) if v >= 32000 || v <= -32000 { break } switch as { default: return false case x86.AADDB, x86.AADDW, x86.AADDL, x86.AADDQ, x86.ASUBB, x86.ASUBW, x86.ASUBL, x86.ASUBQ, x86.AANDB, x86.AANDW, x86.AANDL, x86.AANDQ, x86.AORB, x86.AORW, x86.AORL, x86.AORQ, x86.AXORB, x86.AXORW, x86.AXORL, x86.AXORQ, x86.AINCB, x86.AINCW, x86.AINCL, x86.AINCQ, x86.ADECB, x86.ADECW, x86.ADECL, x86.ADECQ, x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(movptr, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Index = obj.TYPE_NONE gc.Fixlargeoffset(&n1) gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }