func addreg(a *obj.Addr, rn int) { a.Sym = nil a.Node = nil a.Offset = 0 a.Type = obj.TYPE_REG a.Reg = int16(rn) a.Name = 0 Ostats.Ncvtreg++ }
// copysub substitute s for v in a. // copysub returns true on failure to substitute. // TODO(dfc) remove unused return value, remove calls with f=false as they do nothing. func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool { if f && copyau(a, v) { if a.Type == obj.TYPE_SHIFT { if a.Offset&0xf == int64(v.Reg-arm.REG_R0) { a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf } if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) { a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8 } } else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 { if a.Offset == int64(v.Reg) { a.Offset = int64(s.Reg) } if a.Reg == v.Reg { a.Reg = s.Reg } } else { a.Reg = s.Reg } } return false }
// Naddr rewrites a to refer to n. // It assumes that a is zeroed on entry. func Naddr(a *obj.Addr, n *Node) { if n == nil { return } if n.Type != nil && n.Type.Etype != TIDEAL { // TODO(rsc): This is undone by the selective clearing of width below, // to match architectures that were not as aggressive in setting width // during naddr. Those widths must be cleared to avoid triggering // failures in gins when it detects real but heretofore latent (and one // hopes innocuous) type mismatches. // The type mismatches should be fixed and the clearing below removed. dowidth(n.Type) a.Width = n.Type.Width } switch n.Op { default: a := a // copy to let escape into Ctxt.Dconv Debug['h'] = 1 Dump("naddr", n) Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) case OREGISTER: a.Type = obj.TYPE_REG a.Reg = n.Reg a.Sym = nil if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. a.Width = 0 } case OINDREG: a.Type = obj.TYPE_MEM a.Reg = n.Reg a.Sym = Linksym(n.Sym) a.Offset = n.Xoffset if a.Offset != int64(int32(a.Offset)) { Yyerror("offset %d too large for OINDREG", a.Offset) } if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. a.Width = 0 } case OCLOSUREVAR: if !Curfn.Func.Needctxt { Fatalf("closurevar without needctxt") } a.Type = obj.TYPE_MEM a.Reg = int16(Thearch.REGCTXT) a.Sym = nil a.Offset = n.Xoffset case OCFUNC: Naddr(a, n.Left) a.Sym = Linksym(n.Left.Sym) case ONAME: a.Etype = 0 if n.Type != nil { a.Etype = uint8(Simtype[n.Type.Etype]) } a.Offset = n.Xoffset s := n.Sym a.Node = n.Orig //if(a->node >= (Node*)&n) // fatal("stack node"); if s == nil { s = Lookup(".noname") } if n.Name.Method && n.Type != nil && n.Type.Sym != nil && n.Type.Sym.Pkg != nil { s = Pkglookup(s.Name, n.Type.Sym.Pkg) } a.Type = obj.TYPE_MEM switch n.Class { default: Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class) case PEXTERN: a.Name = obj.NAME_EXTERN case PAUTO: a.Name = obj.NAME_AUTO case PPARAM, PPARAMOUT: a.Name = obj.NAME_PARAM case PFUNC: a.Name = obj.NAME_EXTERN a.Type = obj.TYPE_ADDR a.Width = int64(Widthptr) s = funcsym(s) } a.Sym = Linksym(s) case ODOT: // A special case to make write barriers more efficient. // Taking the address of the first field of a named struct // is the same as taking the address of the struct. if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym { Debug['h'] = 1 Dump("naddr", n) Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) } Naddr(a, n.Left) case OLITERAL: if Thearch.LinkArch.Family == sys.I386 { a.Width = 0 } switch u := n.Val().U.(type) { default: Fatalf("naddr: const %v", Tconv(n.Type, FmtLong)) case *Mpflt: a.Type = obj.TYPE_FCONST a.Val = u.Float64() case *Mpint: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = u.Int64() case string: datagostring(u, a) case bool: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = int64(obj.Bool2int(u)) case *NilVal: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = 0 } case OADDR: Naddr(a, n.Left) a.Etype = uint8(Tptr) if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { // TODO(rsc): Do this even for these architectures. a.Width = int64(Widthptr) } if a.Type != obj.TYPE_MEM { a := a // copy to let escape into Ctxt.Dconv Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), n.Left.Op) } a.Type = obj.TYPE_ADDR // itable of interface value case OITAB: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // itab(nil) } a.Etype = uint8(Tptr) a.Width = int64(Widthptr) // pointer in a string or slice case OSPTR: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // ptr(nil) } a.Etype = uint8(Simtype[Tptr]) a.Offset += int64(Array_array) a.Width = int64(Widthptr) // len of string or slice case OLEN: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // len(nil) } a.Etype = uint8(Simtype[TUINT]) a.Offset += int64(Array_nel) if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } // cap of string or slice case OCAP: Naddr(a, n.Left) if a.Type == obj.TYPE_CONST && a.Offset == 0 { break // cap(nil) } a.Etype = uint8(Simtype[TUINT]) a.Offset += int64(Array_cap) if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } } }
/* * xtramodes enables the ARM post increment and * shift offset addressing modes to transform * MOVW 0(R3),R1 * ADD $4,R3,R3 * into * MOVW.P 4(R3),R1 * and * ADD R0,R1 * MOVBU 0(R1),R0 * into * MOVBU R0<<0(R1),R0 */ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool { p := r.Prog v := *a v.Type = obj.TYPE_REG r1 := findpre(r, &v) if r1 != nil { p1 := r1.Prog if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg { switch p1.As { case arm.AADD: if p1.Scond&arm.C_SBIT != 0 { // avoid altering ADD.S/ADC sequences. break } if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) { if nochange(gc.Uniqs(r1), r, p1) { if a != &p.From || v.Reg != p.To.Reg { if finduse(g, r.S1, &v) { if p1.Reg == 0 || p1.Reg == v.Reg { /* pre-indexing */ p.Scond |= arm.C_WBIT } else { return false } } } switch p1.From.Type { /* register offset */ case obj.TYPE_REG: if gc.Nacl { return false } *a = obj.Addr{} a.Type = obj.TYPE_SHIFT a.Offset = int64(p1.From.Reg) & 15 /* scaled register offset */ case obj.TYPE_SHIFT: if gc.Nacl { return false } *a = obj.Addr{} a.Type = obj.TYPE_SHIFT fallthrough /* immediate offset */ case obj.TYPE_CONST, obj.TYPE_ADDR: a.Offset = p1.From.Offset } if p1.Reg != 0 { a.Reg = p1.Reg } excise(r1) return true } } case arm.AMOVW: if p1.From.Type == obj.TYPE_REG { r2 := findinc(r1, r, &p1.From) if r2 != nil { var r3 *gc.Flow for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) { } if r3 == r { /* post-indexing */ p1 := r2.Prog a.Reg = p1.To.Reg a.Offset = p1.From.Offset p.Scond |= arm.C_PBIT if !finduse(g, r, &r1.Prog.To) { excise(r1) } excise(r2) return true } } } } } } if a != &p.From || a.Reg != p.To.Reg { r1 := findinc(r, nil, &v) if r1 != nil { /* post-indexing */ p1 := r1.Prog a.Offset = p1.From.Offset p.Scond |= arm.C_PBIT excise(r1) return true } } return false }