func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, gc.Ctxt.FixedFrameSize()+frame+lo+i) } } else if cnt <= int64(128*gc.Widthptr) { p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, gc.Ctxt.FixedFrameSize()+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0) p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p.Reg = ppc64.REGRT1 p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr)) p1 := p p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
func Prog(as int) *obj.Prog { var p *obj.Prog if as == obj.ADATA || as == obj.AGLOBL { if ddumped != 0 { Fatalf("already dumped data") } if dpc == nil { dpc = Ctxt.NewProg() dfirst = dpc } p = dpc dpc = Ctxt.NewProg() p.Link = dpc } else { p = Pc Pc = Ctxt.NewProg() Clearp(Pc) p.Link = Pc } if lineno == 0 { if Debug['K'] != 0 { Warn("prog: line 0") } } p.As = int16(as) p.Lineno = lineno return p }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog var p2 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } // check is // CMP arg, $0 // JNE 2(PC) (likely) // MOV AX, 0 p1 = gc.Ctxt.NewProg() p2 = gc.Ctxt.NewProg() gc.Clearp(p1) gc.Clearp(p2) p1.Link = p2 p2.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p2.Lineno = p.Lineno p1.Pc = 9999 p2.Pc = 9999 p.As = int16(cmpptr) p.To.Type = obj.TYPE_CONST p.To.Offset = 0 p1.As = x86.AJNE p1.From.Type = obj.TYPE_CONST p1.From.Offset = 1 // likely p1.To.Type = obj.TYPE_BRANCH p1.To.Val = p2.Link // crash by write to memory address 0. // if possible, since we know arg is 0, use 0(arg), // which will be shorter to encode than plain 0. p2.As = x86.AMOVL p2.From.Type = obj.TYPE_REG p2.From.Reg = x86.REG_AX if regtyp(&p.From) { p2.To.Type = obj.TYPE_MEM p2.To.Reg = p.From.Reg } else { p2.To.Type = obj.TYPE_MEM p2.To.Reg = x86.REG_NONE } p2.To.Offset = 0 } }
/* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr gc.Naddr(&a, n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) } else { gc.Fatalf("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { nl, nr = nr, nl } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
// ARMConditionCodes handles the special condition code situation for the ARM. // It returns a boolean to indicate success; failure means cond was unrecognized. func ARMConditionCodes(prog *obj.Prog, cond string) bool { if cond == "" { return true } bits, ok := ParseARMCondition(cond) if !ok { return false } /* hack to make B.NE etc. work: turn it into the corresponding conditional */ if prog.As == arm.AB { prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf]) bits = (bits &^ 0xf) | arm.C_SCOND_NONE } prog.Scond = bits return true }
func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) { // RegTo2 is set on the instructions we insert here so they don't get // processed twice. if p.RegTo2 != 0 { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } // Any Prog (aside from the above special cases) with an Addr with Name == // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.cx // inserted before it. isName := func(a *obj.Addr) bool { if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { return false } if a.Sym.Type == obj.STLSBSS { return false } return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF } if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" // respectively. if p.To.Type != obj.TYPE_REG { q := obj.Appendp(ctxt, p) q.As = p.As q.From.Type = obj.TYPE_REG q.From.Reg = REG_CX q.To = p.To p.As = AMOVL p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX p.To.Sym = nil p.To.Name = obj.NAME_NONE } } if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) { return } q := obj.Appendp(ctxt, p) q.RegTo2 = 1 r := obj.Appendp(ctxt, q) r.RegTo2 = 1 q.As = obj.ACALL q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0) q.To.Type = obj.TYPE_MEM q.To.Name = obj.NAME_EXTERN q.To.Sym.Local = true r.As = p.As r.Scond = p.Scond r.From = p.From r.From3 = p.From3 r.Reg = p.Reg r.To = p.To obj.Nopout(p) }
func (p *Parser) branch(jmp, target *obj.Prog) { jmp.To = obj.Addr{ Type: obj.TYPE_BRANCH, Index: 0, } jmp.To.Val = target }
// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0. // Returns 1 on failure to substitute (it always succeeds on mips). func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int { if f != 0 { if copyau1(p1, v) { p1.Reg = s.Reg } } return 0 }
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { if oprange[AOR&obj.AMask].start == nil { buildop(ctxt) } a1 := int(p.Optab) if a1 != 0 { return &optab[a1-1:][0] } a1 = int(p.From.Class) if a1 == 0 { a1 = aclass(ctxt, &p.From) + 1 p.From.Class = int8(a1) } a1-- a3 := int(p.To.Class) if a3 == 0 { a3 = aclass(ctxt, &p.To) + 1 p.To.Class = int8(a3) } a3-- a2 := C_NONE if p.Reg != 0 { a2 = C_REG } //print("oplook %P %d %d %d\n", p, a1, a2, a3); r0 := p.As & obj.AMask o := oprange[r0].start if o == nil { o = oprange[r0].stop /* just generate an error */ } e := oprange[r0].stop c1 := xcmp[a1][:] c3 := xcmp[a3][:] for ; -cap(o) < -cap(e); o = o[1:] { if int(o[0].a2) == a2 { if c1[o[0].a1] != 0 { if c3[o[0].a3] != 0 { p.Optab = uint16((-cap(o) + cap(optab)) + 1) return &o[0] } } } } ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3)) prasm(p) if o == nil { o = optab } return &o[0] }
// ARM64Suffix handles the special suffix for the ARM64. // It returns a boolean to indicate success; failure means // cond was unrecognized. func ARM64Suffix(prog *obj.Prog, cond string) bool { if cond == "" { return true } bits, ok := ParseARM64Suffix(cond) if !ok { return false } prog.Scond = bits return true }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+frame+lo+i) } // TODO(dfc): https://golang.org/issue/12108 // If DUFFZERO is used inside a tail call (see genwrapper) it will // overwrite the link register. } else if false && cnt <= int64(128*gc.Widthptr) { p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr)) } else { // ADDV $(8+frame+lo-8), SP, r1 // ADDV $cnt, r1, r2 // loop: // MOVV R0, (Widthptr)r1 // ADDV $Widthptr, r1 // BNE r1, r2, loop p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, mips.REGRT1, 0) p.Reg = mips.REGSP p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0) p.Reg = mips.REGRT1 p = appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr)) p1 := p p = appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0) p = appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0) p.Reg = mips.REGRT2 gc.Patch(p, p1) } return p }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v\n", p) } // check is // CBNZ arg, 2(PC) // MOVD ZR, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p.As = arm64.ACBNZ p.To.Type = obj.TYPE_BRANCH p.To.Val = p1.Link // crash by write to memory address 0. p1.As = arm64.AMOVD p1.From.Type = obj.TYPE_REG p1.From.Reg = arm64.REGZERO p1.To.Type = obj.TYPE_MEM p1.To.Reg = p.From.Reg p1.To.Offset = 0 } }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if *r0 == 0 { p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) *r0 = 1 } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i)) } } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) { p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0) p.Reg = arm.REG_R1 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) p1 := p p.Scond |= arm.C_PBIT p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm.REG_R2 p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i) } } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0) p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0) p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0) p.Reg = arm64.REGRT1 p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr)) p.Scond = arm64.C_XPRE p1 := p p = appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm64.REGRT2 p = appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
/* * The idea is to remove redundant constants. * $c1->v1 * ($c1->v2 s/$c1/v1)* * set v1 return * The v1->v2 should be eliminated by copy propagation. */ func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) { if gc.Debug['P'] != 0 { fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1)) } var p *obj.Prog for ; r != nil; r = r.S1 { p = r.Prog if gc.Debug['P'] != 0 { fmt.Printf("%v", p) } if gc.Uniqp(r) == nil { if gc.Debug['P'] != 0 { fmt.Printf("; merge; return\n") } return } if p.As == arm.AMOVW && copyas(&p.From, c1) { if gc.Debug['P'] != 0 { fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1)) } p.From = *v1 } else if copyu(p, v1, nil) > 1 { if gc.Debug['P'] != 0 { fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1)) } return } if gc.Debug['P'] != 0 { fmt.Printf("\n") } if r.S2 != nil { constprop(c1, v1, r.S2) } } }
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog { q := gc.Ctxt.NewProg() gc.Clearp(q) q.As = int16(as) q.Lineno = p.Lineno q.From.Type = int16(ftype) q.From.Reg = int16(freg) q.From.Offset = foffset q.To.Type = int16(ttype) q.To.Reg = int16(treg) q.To.Offset = toffset q.Link = p.Link p.Link = q return q }
func addnop(ctxt *obj.Link, p *obj.Prog) { q := ctxt.NewProg() // we want to use the canonical NOP (SLL $0,R0,R0) here, // however, as the assembler will always replace $0 // as R0, we have to resort to manually encode the SLL // instruction as WORD $0. q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Name = obj.NAME_NONE q.From.Offset = 0 q.Link = p.Link p.Link = q }
// append adds the Prog to the end of the program-thus-far. // If doLabel is set, it also defines the labels collect for this Prog. func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) { if cond != "" { switch p.arch.Thechar { case '5': if !arch.ARMConditionCodes(prog, cond) { p.errorf("unrecognized condition code .%q", cond) return } case '7': if !arch.ARM64Suffix(prog, cond) { p.errorf("unrecognized suffix .%q", cond) return } default: p.errorf("unrecognized suffix .%q", cond) return } } if p.firstProg == nil { p.firstProg = prog } else { p.lastProg.Link = prog } p.lastProg = prog if doLabel { p.pc++ for _, label := range p.pendingLabels { if p.labels[label] != nil { p.errorf("label %q multiply defined", label) return } p.labels[label] = prog } p.pendingLabels = p.pendingLabels[0:0] } prog.Pc = int64(p.pc) if *flags.Debug { fmt.Println(p.histLineNum, prog) } if testOut != nil { fmt.Fprintln(testOut, prog) } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var reg int var p1 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v", p) } reg = int(p.From.Reg) // check is // CMP arg, $0 // MOV.EQ arg, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p1.As = arm.AMOVW p1.From.Type = obj.TYPE_REG p1.From.Reg = int16(reg) p1.To.Type = obj.TYPE_MEM p1.To.Reg = int16(reg) p1.To.Offset = 0 p1.Scond = arm.C_SCOND_EQ p.As = arm.ACMP p.From.Type = obj.TYPE_CONST p.From.Reg = 0 p.From.Offset = 0 p.Reg = int16(reg) } }
func progedit(ctxt *obj.Link, p *obj.Prog) { p.From.Class = 0 p.To.Class = 0 // Rewrite B/BL to symbol as TYPE_BRANCH. switch p.As { case AB, ABL, obj.ADUFFZERO, obj.ADUFFCOPY: if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { p.To.Type = obj.TYPE_BRANCH } } // Replace TLS register fetches on older ARM procesors. switch p.As { // Treat MRC 15, 0, <reg>, C13, C0, 3 specially. case AMRC: if p.To.Offset&0xffff0fff == 0xee1d0f70 { // Because the instruction might be rewriten to a BL which returns in R0 // the register must be zero. if p.To.Offset&0xf000 != 0 { ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) } if ctxt.Goarm < 7 { // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. if progedit_tlsfallback == nil { progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0) } // MOVW LR, R11 p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP // BL runtime.read_tls_fallback(SB) p = obj.Appendp(ctxt, p) p.As = ABL p.To.Type = obj.TYPE_BRANCH p.To.Sym = progedit_tlsfallback p.To.Offset = 0 // MOVW R11, LR p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.To.Type = obj.TYPE_REG p.To.Reg = REGLINK break } } // Otherwise, MRC/MCR instructions need no further treatment. p.As = AWORD } // Rewrite float constants to values stored in memory. switch p.As { case AMOVF: if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } case AMOVD: if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.Val.(float64)) < 0 && (chipzero5(ctxt, p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { i64 := math.Float64bits(p.From.Val.(float64)) literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) p.From.Type = obj.TYPE_MEM p.From.Sym = s p.From.Name = obj.NAME_EXTERN p.From.Offset = 0 } } if ctxt.Flag_dynlink { rewriteToUseGot(ctxt, p) } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { autosize := int32(0) ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } softfloat(ctxt, cursym) p := cursym.Text autoffset := int32(p.To.Offset) if autoffset < 0 { autoffset = 0 } cursym.Locals = autoffset cursym.Args = p.To.Val.(int32) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ var q1 *obj.Prog var q *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: p.Mark |= LEAF case obj.ARET: break case ADIV, ADIVU, AMOD, AMODU: q = p if ctxt.Sym_div == nil { initdiv(ctxt) } cursym.Text.Mark &^= LEAF continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ if q1 != nil { q1.Mark |= p.Mark } continue case ABL, ABX, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case AB, ABEQ, ABNE, ABCS, ABHS, ABCC, ABLO, ABMI, ABPL, ABVS, ABVC, ABHI, ABLS, ABGE, ABLT, ABGT, ABLE: q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } } } q = p } var o int var p1 *obj.Prog var p2 *obj.Prog var q2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: autosize = int32(p.To.Offset + 4) if autosize <= 4 { if cursym.Text.Mark&LEAF != 0 { p.To.Offset = -4 autosize = 0 } } if autosize == 0 && cursym.Text.Mark&LEAF == 0 { if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name) ctxt.Bso.Flush() } cursym.Text.Mark |= LEAF } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 if autosize == 0 { break } } if p.From3.Offset&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autosize) // emit split check } // MOVW.W R14,$-autosize(SP) p = obj.Appendp(ctxt, p) p.As = AMOVW p.Scond |= C_WBIT p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_MEM p.To.Offset = int64(-autosize) p.To.Reg = REGSP p.Spadj = autosize if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVW g_panic(g), R1 // CMP $0, R1 // B.EQ end // MOVW panic_argp(R1), R2 // ADD $(autosize+4), R13, R3 // CMP R2, R3 // B.NE end // ADD $4, R13, R4 // MOVW R4, panic_argp(R1) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes. p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.Reg = REG_R1 p = obj.Appendp(ctxt, p) p.As = ABEQ p.To.Type = obj.TYPE_BRANCH p1 = p p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R1 p.From.Offset = 0 // Panic.argp p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) + 4 p.Reg = REG_R13 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R2 p.Reg = REG_R3 p = obj.Appendp(ctxt, p) p.As = ABNE p.To.Type = obj.TYPE_BRANCH p2 = p p = obj.Appendp(ctxt, p) p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = 4 p.Reg = REG_R13 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_REG p.From.Reg = REG_R4 p.To.Type = obj.TYPE_MEM p.To.Reg = REG_R1 p.To.Offset = 0 // Panic.argp p = obj.Appendp(ctxt, p) p.As = obj.ANOP p1.Pcond = p p2.Pcond = p } case obj.ARET: obj.Nocache(p) if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = AB p.From = obj.Addr{} if p.To.Sym != nil { // retjmp p.To.Type = obj.TYPE_BRANCH } else { p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = REGLINK } break } } p.As = AMOVW p.Scond |= C_PBIT p.From.Type = obj.TYPE_MEM p.From.Offset = int64(autosize) p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGPC // If there are instructions following // this ARET, they come from a branch // with the same stackframe, so no spadj. if p.To.Sym != nil { // retjmp p.To.Reg = REGLINK q2 = obj.Appendp(ctxt, p) q2.As = AB q2.To.Type = obj.TYPE_BRANCH q2.To.Sym = p.To.Sym p.To.Sym = nil p = q2 } case AADD: if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(-p.From.Offset) } case ASUB: if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(p.From.Offset) } case ADIV, ADIVU, AMOD, AMODU: if cursym.Text.From3.Offset&obj.NOSPLIT != 0 { ctxt.Diag("cannot divide in NOSPLIT function") } if ctxt.Debugdivmod != 0 { break } if p.From.Type != obj.TYPE_REG { break } if p.To.Type != obj.TYPE_REG { break } // Make copy because we overwrite p below. q1 := *p if q1.Reg == REGTMP || q1.Reg == 0 && q1.To.Reg == REGTMP { ctxt.Diag("div already using REGTMP: %v", p) } /* MOV m(g),REGTMP */ p.As = AMOVW p.Lineno = q1.Lineno p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 6 * 4 // offset of g.m p.Reg = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP /* MOV a,m_divmod(REGTMP) */ p = obj.Appendp(ctxt, p) p.As = AMOVW p.Lineno = q1.Lineno p.From.Type = obj.TYPE_REG p.From.Reg = q1.From.Reg p.To.Type = obj.TYPE_MEM p.To.Reg = REGTMP p.To.Offset = 8 * 4 // offset of m.divmod /* MOV b,REGTMP */ p = obj.Appendp(ctxt, p) p.As = AMOVW p.Lineno = q1.Lineno p.From.Type = obj.TYPE_REG p.From.Reg = q1.Reg if q1.Reg == 0 { p.From.Reg = q1.To.Reg } p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP p.To.Offset = 0 /* CALL appropriate */ p = obj.Appendp(ctxt, p) p.As = ABL p.Lineno = q1.Lineno p.To.Type = obj.TYPE_BRANCH switch o { case ADIV: p.To.Sym = ctxt.Sym_div case ADIVU: p.To.Sym = ctxt.Sym_divu case AMOD: p.To.Sym = ctxt.Sym_mod case AMODU: p.To.Sym = ctxt.Sym_modu } /* MOV REGTMP, b */ p = obj.Appendp(ctxt, p) p.As = AMOVW p.Lineno = q1.Lineno p.From.Type = obj.TYPE_REG p.From.Reg = REGTMP p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = q1.To.Reg case AMOVW: if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP { p.Spadj = int32(-p.To.Offset) } if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC { p.Spadj = int32(-p.From.Offset) } if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { p.Spadj = int32(-p.From.Offset) } } } }
// Rewrite p, if necessary, to access global data via the global offset table. func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) { if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { // ADUFFxxx $offset // becomes // MOVW runtime.duffxxx@GOT, R9 // ADD $offset, R9 // CALL (R9) var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = obj.Linklookup(ctxt, "runtime.duffzero", 0) } else { sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0) } offset := p.To.Offset p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF p.From.Sym = sym p.To.Type = obj.TYPE_REG p.To.Reg = REG_R9 p.To.Name = obj.NAME_NONE p.To.Offset = 0 p.To.Sym = nil p1 := obj.Appendp(ctxt, p) p1.As = AADD p1.From.Type = obj.TYPE_CONST p1.From.Offset = offset p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R9 p2 := obj.Appendp(ctxt, p1) p2.As = obj.ACALL p2.To.Type = obj.TYPE_MEM p2.To.Reg = REG_R9 return } // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { // MOVW $sym, Rx becomes MOVW sym@GOT, Rx // MOVW $sym+<off>, Rx becomes MOVW sym@GOT, Rx; ADD <off>, Rx if p.As != AMOVW { ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p) } if p.To.Type != obj.TYPE_REG { ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p) } p.From.Type = obj.TYPE_MEM p.From.Name = obj.NAME_GOTREF if p.From.Offset != 0 { q := obj.Appendp(ctxt, p) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = p.From.Offset q.To = p.To p.From.Offset = 0 } } if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr // MOVx sym, Ry becomes MOVW sym@GOT, R9; MOVx (R9), Ry // MOVx Ry, sym becomes MOVW sym@GOT, R9; MOVx Ry, (R9) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local { source = &p.To } else { return } if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { return } if source.Sym.Type == obj.STLSBSS { return } if source.Type != obj.TYPE_MEM { ctxt.Diag("don't know how to handle %v with -dynlink", p) } p1 := obj.Appendp(ctxt, p) p2 := obj.Appendp(ctxt, p1) p1.As = AMOVW p1.From.Type = obj.TYPE_MEM p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG p1.To.Reg = REG_R9 p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = REG_R9 p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { p2.To.Reg = REG_R9 p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { return } obj.Nopout(p) }
// If s==nil, copyu returns the set/use of v in p; otherwise, it // modifies p to replace reads of v with reads of s and returns 0 for // success or non-zero for failure. // // If s==nil, copy returns one of the following values: // 1 if v only used // 2 if v is set and used in one address (read-alter-rewrite; // can't substitute) // 3 if v is only set // 4 if v is set in one address and used in another (so addresses // can be rewritten independently) // 0 otherwise (not touched) func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { if p.From3Type() != obj.TYPE_NONE { // never generates a from3 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) } switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case obj.ANOP, /* read p->from, write p->to */ mips.AMOVV, mips.AMOVF, mips.AMOVD, mips.AMOVH, mips.AMOVHU, mips.AMOVB, mips.AMOVBU, mips.AMOVW, mips.AMOVWU, mips.AMOVFD, mips.AMOVDF, mips.AMOVDW, mips.AMOVWD, mips.AMOVFW, mips.AMOVWF, mips.AMOVDV, mips.AMOVVD, mips.AMOVFV, mips.AMOVVF, mips.ATRUNCFV, mips.ATRUNCDV, mips.ATRUNCFW, mips.ATRUNCDW: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { // Fix up implicit from if p.From.Type == obj.TYPE_NONE { p.From = p.To } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { // p->to only indirectly uses v return 1 } return 0 case mips.ASGT, /* read p->from, read p->reg, write p->to */ mips.ASGTU, mips.AADD, mips.AADDU, mips.ASUB, mips.ASUBU, mips.ASLL, mips.ASRL, mips.ASRA, mips.AOR, mips.ANOR, mips.AAND, mips.AXOR, mips.AADDV, mips.AADDVU, mips.ASUBV, mips.ASUBVU, mips.ASLLV, mips.ASRLV, mips.ASRAV, mips.AADDF, mips.AADDD, mips.ASUBF, mips.ASUBD, mips.AMULF, mips.AMULD, mips.ADIVF, mips.ADIVD: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Reg == 0 { // Fix up implicit reg (e.g., ADD // R3,R4 -> ADD R3,R4,R4) so we can // update reg and to separately. p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case obj.ACHECKNIL, /* read p->from */ mips.ABEQ, /* read p->from, read p->reg */ mips.ABNE, mips.ABGTZ, mips.ABGEZ, mips.ABLTZ, mips.ABLEZ, mips.ACMPEQD, mips.ACMPEQF, mips.ACMPGED, mips.ACMPGEF, mips.ACMPGTD, mips.ACMPGTF, mips.ABFPF, mips.ABFPT, mips.AMUL, mips.AMULU, mips.ADIV, mips.ADIVU, mips.AMULV, mips.AMULVU, mips.ADIVV, mips.ADIVVU: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub1(p, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case mips.AJMP: /* read p->to */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case mips.ARET: /* funny */ if s != nil { return 0 } // All registers die at this point, so claim // everything is set (and not used). return 3 case mips.AJAL: /* funny */ if v.Type == obj.TYPE_REG { // TODO(rsc): REG_R0 and REG_F0 used to be // (when register numbers started at 0) exregoffset and exfregoffset, // which are unset entirely. // It's strange that this handles R0 and F0 differently from the other // registers. Possible failure to optimize? if mips.REG_R0 < v.Reg && v.Reg <= mips.REG_R31 { return 2 } if v.Reg == mips.REGARG { return 2 } if mips.REG_F0 < v.Reg && v.Reg <= mips.REG_F31 { return 2 } } if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R0 is zero, used by DUFFZERO, cannot be substituted. // R1 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == 0 { return 1 } if v.Reg == 1 { return 2 } } return 0 // R1, R2 are ptr to src, dst, used and set, cannot be substituted. // R3 is scratch, set by DUFFCOPY, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == 1 || v.Reg == 2 { return 2 } if v.Reg == 3 { return 3 } } return 0 case obj.ATEXT: /* funny */ if v.Type == obj.TYPE_REG { if v.Reg == mips.REGARG { return 3 } } return 0 case obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
/* * ASLL x,y,w * .. (not use w, not set x y w) * AXXX w,a,b (a != w) * .. (not use w) * (set w) * ----------- changed to * .. * AXXX (x<<y),a,b * .. */ func shiftprop(r *gc.Flow) bool { p := (*obj.Prog)(r.Prog) if p.To.Type != obj.TYPE_REG { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: result not reg; FAILURE\n") } return false } n := int(int(p.To.Reg)) a := obj.Addr(obj.Addr{}) if p.Reg != 0 && p.Reg != p.To.Reg { a.Type = obj.TYPE_REG a.Reg = p.Reg } if gc.Debug['P'] != 0 { fmt.Printf("shiftprop\n%v", p) } r1 := (*gc.Flow)(r) var p1 *obj.Prog for { /* find first use of shift result; abort if shift operands or result are changed */ r1 = gc.Uniqs(r1) if r1 == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tbranch; FAILURE\n") } return false } if gc.Uniqp(r1) == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tmerge; FAILURE\n") } return false } p1 = r1.Prog if gc.Debug['P'] != 0 { fmt.Printf("\n%v", p1) } switch copyu(p1, &p.To, nil) { case 0: /* not used or set */ if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) { if gc.Debug['P'] != 0 { fmt.Printf("\targs modified; FAILURE\n") } return false } continue case 3: /* set, not used */ { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: noref; FAILURE\n") } return false } } break } /* check whether substitution can be done */ switch p1.As { default: if gc.Debug['P'] != 0 { fmt.Printf("\tnon-dpi; FAILURE\n") } return false case arm.AAND, arm.AEOR, arm.AADD, arm.AADC, arm.AORR, arm.ASUB, arm.ASBC, arm.ARSB, arm.ARSC: if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) { if p1.From.Type != obj.TYPE_REG { if gc.Debug['P'] != 0 { fmt.Printf("\tcan't swap; FAILURE\n") } return false } p1.Reg = p1.From.Reg p1.From.Reg = int16(n) switch p1.As { case arm.ASUB: p1.As = arm.ARSB case arm.ARSB: p1.As = arm.ASUB case arm.ASBC: p1.As = arm.ARSC case arm.ARSC: p1.As = arm.ASBC } if gc.Debug['P'] != 0 { fmt.Printf("\t=>%v", p1) } } fallthrough case arm.ABIC, arm.ATST, arm.ACMP, arm.ACMN: if int(p1.Reg) == n { if gc.Debug['P'] != 0 { fmt.Printf("\tcan't swap; FAILURE\n") } return false } if p1.Reg == 0 && int(p1.To.Reg) == n { if gc.Debug['P'] != 0 { fmt.Printf("\tshift result used twice; FAILURE\n") } return false } // case AMVN: if p1.From.Type == obj.TYPE_SHIFT { if gc.Debug['P'] != 0 { fmt.Printf("\tshift result used in shift; FAILURE\n") } return false } if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: where is it used?; FAILURE\n") } return false } } /* check whether shift result is used subsequently */ p2 := (*obj.Prog)(p1) if int(p1.To.Reg) != n { var p1 *obj.Prog for { r1 = gc.Uniqs(r1) if r1 == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tinconclusive; FAILURE\n") } return false } p1 = r1.Prog if gc.Debug['P'] != 0 { fmt.Printf("\n%v", p1) } switch copyu(p1, &p.To, nil) { case 0: /* not used or set */ continue case 3: /* set, not used */ break default: /* used */ if gc.Debug['P'] != 0 { fmt.Printf("\treused; FAILURE\n") } return false } break } } /* make the substitution */ p2.From.Reg = 0 o := int(int(p.Reg)) if o == 0 { o = int(p.To.Reg) } o &= 15 switch p.From.Type { case obj.TYPE_CONST: o |= int((p.From.Offset & 0x1f) << 7) case obj.TYPE_REG: o |= 1<<4 | (int(p.From.Reg)&15)<<8 } switch p.As { case arm.ASLL: o |= 0 << 5 case arm.ASRL: o |= 1 << 5 case arm.ASRA: o |= 2 << 5 } p2.From = obj.Addr{} p2.From.Type = obj.TYPE_SHIFT p2.From.Offset = int64(o) if gc.Debug['P'] != 0 { fmt.Printf("\t=>%v\tSUCCEED\n", p2) } return true }
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog { // MOVW g_stackguard(g), R1 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_MEM p.From.Reg = REGG p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 if framesize <= obj.StackSmall { // small stack: SP < stackguard // CMP stackguard, SP p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REGSP } else if framesize <= obj.StackBig { // large stack: SP-framesize < stackguard-StackSmall // MOVW $-framesize(SP), R2 // CMP stackguard, R2 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = int64(-framesize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.Reg = REG_R2 } else { // Such a large stack we need to protect against wraparound // if SP is close to zero. // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // CMP $StackPreempt, R1 // MOVW.NE $StackGuard(SP), R2 // SUB.NE R1, R2 // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3 // CMP.NE R3, R2 p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) p.Reg = REG_R1 p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP p.From.Offset = obj.StackGuard p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = ASUB p.From.Type = obj.TYPE_REG p.From.Reg = REG_R1 p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p.Scond = C_SCOND_NE p = obj.Appendp(ctxt, p) p.As = ACMP p.From.Type = obj.TYPE_REG p.From.Reg = REG_R3 p.Reg = REG_R2 p.Scond = C_SCOND_NE } // BLS call-to-morestack bls := obj.Appendp(ctxt, p) bls.As = ABLS bls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } spfix := obj.Appendp(ctxt, last) spfix.As = obj.ANOP spfix.Spadj = -framesize // MOVW LR, R3 movw := obj.Appendp(ctxt, spfix) movw.As = AMOVW movw.From.Type = obj.TYPE_REG movw.From.Reg = REGLINK movw.To.Type = obj.TYPE_REG movw.To.Reg = REG_R3 bls.Pcond = movw // BL runtime.morestack call := obj.Appendp(ctxt, movw) call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc != 0: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) // B start b := obj.Appendp(ctxt, call) b.As = obj.AJMP b.To.Type = obj.TYPE_BRANCH b.Pcond = ctxt.Cursym.Text.Link b.Spadj = +framesize return bls }
// UNUSED func peep(firstp *obj.Prog) { g := (*gc.Graph)(gc.Flowstart(firstp, nil)) if g == nil { return } gactive = 0 var r *gc.Flow var p *obj.Prog var t int loop1: if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { gc.Dumpit("loop1", g.Start, 0) } t = 0 for r = g.Start; r != nil; r = r.Link { p = r.Prog switch p.As { /* * elide shift into TYPE_SHIFT operand of subsequent instruction */ // if(shiftprop(r)) { // excise(r); // t++; // break; // } case arm.ASLL, arm.ASRL, arm.ASRA: break case arm.AMOVB, arm.AMOVH, arm.AMOVW, arm.AMOVF, arm.AMOVD: if regtyp(&p.From) { if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) { if p.Scond == arm.C_SCOND_NONE { if copyprop(g, r) { excise(r) t++ break } if subprop(r) && copyprop(g, r) { excise(r) t++ break } } } } case arm.AMOVHS, arm.AMOVHU, arm.AMOVBS, arm.AMOVBU: if p.From.Type == obj.TYPE_REG { if shortprop(r) { t++ } } } } /* if(p->scond == C_SCOND_NONE) if(regtyp(&p->to)) if(isdconst(&p->from)) { constprop(&p->from, &p->to, r->s1); } break; */ if t != 0 { goto loop1 } for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { /* * EOR -1,x,y => MVN x,y */ case arm.AEOR: if isdconst(&p.From) && p.From.Offset == -1 { p.As = arm.AMVN p.From.Type = obj.TYPE_REG if p.Reg != 0 { p.From.Reg = p.Reg } else { p.From.Reg = p.To.Reg } p.Reg = 0 } } } for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { case arm.AMOVW, arm.AMOVB, arm.AMOVBS, arm.AMOVBU: if p.From.Type == obj.TYPE_MEM && p.From.Offset == 0 { xtramodes(g, r, &p.From) } else if p.To.Type == obj.TYPE_MEM && p.To.Offset == 0 { xtramodes(g, r, &p.To) } else { continue } } } // case ACMP: // /* // * elide CMP $0,x if calculation of x can set condition codes // */ // if(isdconst(&p->from) || p->from.offset != 0) // continue; // r2 = r->s1; // if(r2 == nil) // continue; // t = r2->prog->as; // switch(t) { // default: // continue; // case ABEQ: // case ABNE: // case ABMI: // case ABPL: // break; // case ABGE: // t = ABPL; // break; // case ABLT: // t = ABMI; // break; // case ABHI: // t = ABNE; // break; // case ABLS: // t = ABEQ; // break; // } // r1 = r; // do // r1 = uniqp(r1); // while (r1 != nil && r1->prog->as == ANOP); // if(r1 == nil) // continue; // p1 = r1->prog; // if(p1->to.type != TYPE_REG) // continue; // if(p1->to.reg != p->reg) // if(!(p1->as == AMOVW && p1->from.type == TYPE_REG && p1->from.reg == p->reg)) // continue; // // switch(p1->as) { // default: // continue; // case AMOVW: // if(p1->from.type != TYPE_REG) // continue; // case AAND: // case AEOR: // case AORR: // case ABIC: // case AMVN: // case ASUB: // case ARSB: // case AADD: // case AADC: // case ASBC: // case ARSC: // break; // } // p1->scond |= C_SBIT; // r2->prog->as = t; // excise(r); // continue; // predicate(g); gc.Flowend(g) }
/* * return * 1 if v only used (and substitute), * 2 if read-alter-rewrite * 3 if set * 4 if set and used * 0 otherwise (not touched) */ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case arm.AMOVM: if v.Type != obj.TYPE_REG { return 0 } if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */ if s != nil { if p.From.Offset&(1<<uint(v.Reg)) != 0 { return 1 } if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { if p.Scond&arm.C_WBIT != 0 { return 2 } return 1 } if p.From.Offset&(1<<uint(v.Reg)) != 0 { return 1 /* read/rar, write reglist */ } } else { if s != nil { if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 1 } if copysub(&p.From, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.From, v) { if p.Scond&arm.C_WBIT != 0 { return 2 } if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 4 } return 1 } if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 3 } } return 0 case obj.ANOP, /* read,, write */ arm.ASQRTD, arm.AMOVW, arm.AMOVF, arm.AMOVD, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVFW, arm.AMOVWF, arm.AMOVDW, arm.AMOVWD, arm.AMOVFD, arm.AMOVDF: if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 { if v.Type == obj.TYPE_REG { if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT { if p.From.Reg == v.Reg { return 2 } } else { if p.To.Reg == v.Reg { return 2 } } } } if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Scond != arm.C_SCOND_NONE { return 2 } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm.AMULLU, /* read, read, write, write */ arm.AMULL, arm.AMULA, arm.AMVN: return 2 case arm.AADD, /* read, read, write */ arm.AADC, arm.ASUB, arm.ASBC, arm.ARSB, arm.ASLL, arm.ASRL, arm.ASRA, arm.AORR, arm.AAND, arm.AEOR, arm.AMUL, arm.AMULU, arm.ADIV, arm.ADIVU, arm.AMOD, arm.AMODU, arm.AADDF, arm.AADDD, arm.ASUBF, arm.ASUBD, arm.AMULF, arm.AMULD, arm.ADIVF, arm.ADIVD, obj.ACHECKNIL, /* read */ arm.ACMPF, /* read, read, */ arm.ACMPD, arm.ACMP, arm.ACMN, arm.ATST: /* read,, */ if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Scond != arm.C_SCOND_NONE { return 2 } if p.Reg == 0 { p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm.ABEQ, /* read, read */ arm.ABNE, arm.ABCS, arm.ABHS, arm.ABCC, arm.ABLO, arm.ABMI, arm.ABPL, arm.ABVS, arm.ABVC, arm.ABHI, arm.ABLS, arm.ABGE, arm.ABLT, arm.ABGT, arm.ABLE: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub1(p, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case arm.AB: /* funny */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case obj.ARET: /* funny */ if s != nil { return 1 } return 3 case arm.ABL: /* funny */ if v.Type == obj.TYPE_REG { // TODO(rsc): REG_R0 and REG_F0 used to be // (when register numbers started at 0) exregoffset and exfregoffset, // which are unset entirely. // It's strange that this handles R0 and F0 differently from the other // registers. Possible failure to optimize? if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT { return 2 } if v.Reg == arm.REGARG { return 2 } if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT { return 2 } } if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R0 is zero, used by DUFFZERO, cannot be substituted. // R1 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == arm.REG_R0 { return 1 } if v.Reg == arm.REG_R0+1 { return 2 } } return 0 // R0 is scratch, set by DUFFCOPY, cannot be substituted. // R1, R2 areptr to src, dst, used and set, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == arm.REG_R0 { return 3 } if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 { return 2 } } return 0 case obj.ATEXT: /* funny */ if v.Type == obj.TYPE_REG { if v.Reg == arm.REGARG { return 3 } } return 0 case obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var a int var i int loop: if p == nil { return } a = int(p.As) if a == AB { q = p.Pcond if q != nil && q.As != obj.ATEXT { p.Mark |= FOLL p = q if p.Mark&FOLL == 0 { goto loop } } } if p.Mark&FOLL != 0 { i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == *last || q == nil { break } a = int(q.As) if a == obj.ANOP { i-- continue } if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { goto copy } if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } if a != ABEQ && a != ABNE { continue } copy: for { r = ctxt.NewProg() *r = *p if r.Mark&FOLL == 0 { fmt.Printf("can't happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r continue } (*last).Link = r *last = r if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { return } r.As = ABNE if a == ABNE { r.As = ABEQ } r.Pcond = p.Link r.Link = p.Pcond if r.Link.Mark&FOLL == 0 { xfol(ctxt, r.Link, last) } if r.Pcond.Mark&FOLL == 0 { fmt.Printf("can't happen 2\n") } return } } a = AB q = ctxt.NewProg() q.As = int16(a) q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p if a == AB || (a == obj.ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { return } if p.Pcond != nil { if a != ABL && a != ABX && p.Link != nil { q = obj.Brchain(ctxt, p.Link) if a != obj.ATEXT { if q != nil && (q.Mark&FOLL != 0) { p.As = int16(relinv(a)) p.Link = p.Pcond p.Pcond = q } } xfol(ctxt, p.Link, last) q = obj.Brchain(ctxt, p.Pcond) if q == nil { q = p.Pcond } if q.Mark&FOLL != 0 { p.Pcond = q return } p = q goto loop } } p = p.Link goto loop }
func peep(firstp *obj.Prog) { g := (*gc.Graph)(gc.Flowstart(firstp, nil)) if g == nil { return } gactive = 0 var p *obj.Prog var r *gc.Flow var t int loop1: if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { gc.Dumpit("loop1", g.Start, 0) } t = 0 for r = g.Start; r != nil; r = r.Link { p = r.Prog // TODO(minux) Handle smaller moves. arm and amd64 // distinguish between moves that *must* sign/zero // extend and moves that don't care so they // can eliminate moves that don't care without // breaking moves that do care. This might let us // simplify or remove the next peep loop, too. if p.As == arm64.AMOVD || p.As == arm64.AFMOVD { if regtyp(&p.To) { // Try to eliminate reg->reg moves if regtyp(&p.From) { if p.From.Type == p.To.Type { if copyprop(r) { excise(r) t++ } else if subprop(r) && copyprop(r) { excise(r) t++ } } } } } } if t != 0 { goto loop1 } /* * look for MOVB x,R; MOVB R,R (for small MOVs not handled above) */ var p1 *obj.Prog var r1 *gc.Flow for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { default: continue case arm64.AMOVH, arm64.AMOVHU, arm64.AMOVB, arm64.AMOVBU, arm64.AMOVW, arm64.AMOVWU: if p.To.Type != obj.TYPE_REG { continue } } r1 = r.Link if r1 == nil { continue } p1 = r1.Prog if p1.As != p.As { continue } if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { continue } if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg { continue } excise(r1) } if gc.Debug['D'] > 1 { goto ret /* allow following code improvement to be suppressed */ } // MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { default: continue case arm64.AMOVD: if p.To.Type != obj.TYPE_REG { continue } if p.From.Type != obj.TYPE_CONST { continue } if p.From.Offset < 0 || 4096 <= p.From.Offset { continue } } r1 = r.Link if r1 == nil { continue } p1 = r1.Prog if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm. continue } if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { continue } if p1.To.Type != obj.TYPE_REG { continue } if gc.Debug['P'] != 0 { fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1) } p1.From.Type = obj.TYPE_CONST p1.From = p.From excise(r) } /* TODO(minux): * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R * when OP can set condition codes correctly */ ret: gc.Flowend(g) }