// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog var p2 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } // check is // CMP arg, $0 // JNE 2(PC) (likely) // MOV AX, 0 p1 = gc.Ctxt.NewProg() p2 = gc.Ctxt.NewProg() gc.Clearp(p1) gc.Clearp(p2) p1.Link = p2 p2.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p2.Lineno = p.Lineno p1.Pc = 9999 p2.Pc = 9999 p.As = int16(cmpptr) p.To.Type = obj.TYPE_CONST p.To.Offset = 0 p1.As = x86.AJNE p1.From.Type = obj.TYPE_CONST p1.From.Offset = 1 // likely p1.To.Type = obj.TYPE_BRANCH p1.To.Val = p2.Link // crash by write to memory address 0. // if possible, since we know arg is 0, use 0(arg), // which will be shorter to encode than plain 0. p2.As = x86.AMOVL p2.From.Type = obj.TYPE_REG p2.From.Reg = x86.REG_AX if regtyp(&p.From) { p2.To.Type = obj.TYPE_MEM p2.To.Reg = p.From.Reg } else { p2.To.Type = obj.TYPE_MEM p2.To.Reg = x86.REG_NONE } p2.To.Offset = 0 } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog var p2 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatal("invalid nil check %v\n", p) } // check is // CMP arg, ZR // BNE 2(PC) [likely] // MOVD ZR, 0(arg) p1 = gc.Ctxt.NewProg() p2 = gc.Ctxt.NewProg() gc.Clearp(p1) gc.Clearp(p2) p1.Link = p2 p2.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p2.Lineno = p.Lineno p1.Pc = 9999 p2.Pc = 9999 p.As = arm64.ACMP p.Reg = arm64.REGZERO p1.As = arm64.ABNE //p1->from.type = TYPE_CONST; //p1->from.offset = 1; // likely p1.To.Type = obj.TYPE_BRANCH p1.To.Val = p2.Link // crash by write to memory address 0. p2.As = arm64.AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = arm64.REGZERO p2.To.Type = obj.TYPE_MEM p2.To.Reg = p.From.Reg p2.To.Offset = 0 } }
func outgcode(a int, g1 *obj.Addr, reg int, g2, g3 *obj.Addr) { var p *obj.Prog var pl *obj.Plist if asm.Pass == 1 { goto out } p = asm.Ctxt.NewProg() p.As = int16(a) p.Lineno = stmtline if nosched != 0 { p.Mark |= ppc64.NOSCHED } p.From = *g1 p.Reg = int16(reg) p.From3 = *g2 p.To = *g3 p.Pc = int64(asm.PC) if lastpc == nil { pl = obj.Linknewplist(asm.Ctxt) pl.Firstpc = p } else { lastpc.Link = p } lastpc = p out: if a != obj.AGLOBL && a != obj.ADATA { asm.PC++ } }
func Prog(as int) *obj.Prog { var p *obj.Prog if as == obj.ADATA || as == obj.AGLOBL { if ddumped != 0 { Fatalf("already dumped data") } if dpc == nil { dpc = Ctxt.NewProg() dfirst = dpc } p = dpc dpc = Ctxt.NewProg() p.Link = dpc } else { p = Pc Pc = Ctxt.NewProg() Clearp(Pc) p.Link = Pc } if lineno == 0 { if Debug['K'] != 0 { Warn("prog: line 0") } } p.As = int16(as) p.Lineno = lineno return p }
func outcode(a int, g2 *Addr2) { var p *obj.Prog var pl *obj.Plist if asm.Pass == 1 { goto out } p = new(obj.Prog) *p = obj.Prog{} p.Ctxt = asm.Ctxt p.As = int16(a) p.Lineno = stmtline p.From = g2.from p.To = g2.to p.Pc = int64(asm.PC) if lastpc == nil { pl = obj.Linknewplist(asm.Ctxt) pl.Firstpc = p } else { lastpc.Link = p } lastpc = p out: if a != obj.AGLOBL && a != obj.ADATA { asm.PC++ } }
func Prog(as obj.As) *obj.Prog { var p *obj.Prog p = pc pc = Ctxt.NewProg() Clearp(pc) p.Link = pc if lineno == 0 && Debug['K'] != 0 { Warn("prog: line 0") } p.As = as p.Lineno = lineno return p }
func outcode(a int, g1 *obj.Addr, reg int, g2 *obj.Addr) { var p *obj.Prog var pl *obj.Plist if asm.Pass == 1 { goto out } if g1.Scale != 0 { if reg != 0 || g2.Scale != 0 { yyerror("bad addressing modes") } reg = int(g1.Scale) } else if g2.Scale != 0 { if reg != 0 { yyerror("bad addressing modes") } reg = int(g2.Scale) } p = asm.Ctxt.NewProg() p.As = int16(a) p.Lineno = stmtline if nosched != 0 { p.Mark |= ppc64.NOSCHED } p.From = *g1 p.Reg = int16(reg) p.To = *g2 p.Pc = int64(asm.PC) if lastpc == nil { pl = obj.Linknewplist(asm.Ctxt) pl.Firstpc = p } else { lastpc.Link = p } lastpc = p out: if a != obj.AGLOBL && a != obj.ADATA { asm.PC++ } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v\n", p) } // check is // CBNZ arg, 2(PC) // MOVD ZR, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p.As = arm64.ACBNZ p.To.Type = obj.TYPE_BRANCH p.To.Val = p1.Link // crash by write to memory address 0. p1.As = arm64.AMOVD p1.From.Type = obj.TYPE_REG p1.From.Reg = arm64.REGZERO p1.To.Type = obj.TYPE_MEM p1.To.Reg = p.From.Reg p1.To.Offset = 0 } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var reg int var p1 *obj.Prog for p := firstp; p != nil; p = p.Link { if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatalf("invalid nil check %v", p) } reg = int(p.From.Reg) // check is // CMP arg, $0 // MOV.EQ arg, 0(arg) p1 = gc.Ctxt.NewProg() gc.Clearp(p1) p1.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p1.Pc = 9999 p1.As = arm.AMOVW p1.From.Type = obj.TYPE_REG p1.From.Reg = int16(reg) p1.To.Type = obj.TYPE_MEM p1.To.Reg = int16(reg) p1.To.Offset = 0 p1.Scond = arm.C_SCOND_EQ p.As = arm.ACMP p.From.Type = obj.TYPE_CONST p.From.Reg = 0 p.From.Offset = 0 p.Reg = int16(reg) } }
func outcode(a, scond int32, g1 *obj.Addr, reg int32, g2 *obj.Addr) { var p *obj.Prog var pl *obj.Plist /* hack to make B.NE etc. work: turn it into the corresponding conditional */ if a == arm.AB { a = int32(bcode[(scond^arm.C_SCOND_XOR)&0xf]) scond = (scond &^ 0xf) | Always } if asm.Pass == 1 { goto out } p = new(obj.Prog) *p = obj.Prog{} p.Ctxt = asm.Ctxt p.As = int16(a) p.Lineno = stmtline p.Scond = uint8(scond) p.From = *g1 p.Reg = int16(reg) p.To = *g2 p.Pc = int64(asm.PC) if lastpc == nil { pl = obj.Linknewplist(asm.Ctxt) pl.Firstpc = p } else { lastpc.Link = p } lastpc = p out: if a != obj.AGLOBL && a != obj.ADATA { asm.PC++ } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset aoffset := int32(textstksiz) cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET */ q := (*obj.Prog)(nil) var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: p.Mark |= LEAF case obj.ARET: break case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue case ABL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ACBNZ, ACBZ, ACBNZW, ACBZW, ATBZ, ATBNZ, AB, ABEQ, ABNE, ABCS, ABHS, ABCC, ABLO, ABMI, ABPL, ABVS, ABVC, ABHI, ABLS, ABGE, ABLT, ABGT, ABLE, AADR, /* strange */ AADRP: q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } } break } q = p } var q2 *obj.Prog var retjmp *obj.LSym for p := cursym.Text; p != nil; p = p.Link { o := p.As switch o { case obj.ATEXT: cursym.Text = p if textstksiz < 0 { ctxt.Autosize = 0 } else { ctxt.Autosize = int32(textstksiz + 8) } if (cursym.Text.Mark&LEAF != 0) && ctxt.Autosize <= 8 { ctxt.Autosize = 0 } else if ctxt.Autosize&(16-1) != 0 { // The frame includes an LR. // If the frame size is 8, it's only an LR, // so there's no potential for breaking references to // local variables by growing the frame size, // because there are no local variables. // But otherwise, if there is a non-empty locals section, // the author of the code is responsible for making sure // that the frame size is 8 mod 16. if ctxt.Autosize == 8 { ctxt.Autosize += 8 cursym.Locals += 8 } else { ctxt.Diag("%v: unaligned frame size %d - must be 8 mod 16 (or 0)", p, ctxt.Autosize-8) } } p.To.Offset = int64(ctxt.Autosize) - 8 if ctxt.Autosize == 0 && !(cursym.Text.Mark&LEAF != 0) { if ctxt.Debugvlog != 0 { ctxt.Logf("save suppressed in: %s\n", cursym.Text.From.Sym.Name) } cursym.Text.Mark |= LEAF } if !(p.From3.Offset&obj.NOSPLIT != 0) { p = stacksplit(ctxt, p, ctxt.Autosize) // emit split check } aoffset = ctxt.Autosize if aoffset > 0xF0 { aoffset = 0xF0 } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = true if ctxt.Autosize == 0 { break } aoffset = 0 } q = p if ctxt.Autosize > aoffset { q = ctxt.NewProg() q.As = ASUB q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) - int64(aoffset) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = int32(q.From.Offset) q.Link = p.Link p.Link = q if cursym.Text.Mark&LEAF != 0 { break } } q1 = ctxt.NewProg() q1.As = AMOVD q1.Lineno = p.Lineno q1.From.Type = obj.TYPE_REG q1.From.Reg = REGLINK q1.To.Type = obj.TYPE_MEM q1.Scond = C_XPRE q1.To.Offset = int64(-aoffset) q1.To.Reg = REGSP q1.Link = q.Link q1.Spadj = aoffset q.Link = q1 if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOV g_panic(g), R1 // CMP ZR, R1 // BEQ end // MOV panic_argp(R1), R2 // ADD $(autosize+8), RSP, R3 // CMP R2, R3 // BNE end // ADD $8, RSP, R4 // MOVD R4, panic_argp(R1) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes. q = q1 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REGZERO q.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH q1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R1 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) + 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH q2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R1 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP q1.Pcond = q q2.Pcond = q } case obj.ARET: nocache(p) if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retjmp = p.To.Sym p.To = obj.Addr{} if cursym.Text.Mark&LEAF != 0 { if ctxt.Autosize != 0 { p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(ctxt.Autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -ctxt.Autosize } } else { /* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/ aoffset = ctxt.Autosize if aoffset > 0xF0 { aoffset = 0xF0 } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.Scond = C_XPOST p.From.Offset = int64(aoffset) p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGLINK p.Spadj = -aoffset if ctxt.Autosize > aoffset { q = ctxt.NewProg() q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) - int64(aoffset) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Link = p.Link q.Spadj = int32(-q.From.Offset) q.Lineno = p.Lineno p.Link = q p = q } } if p.As != obj.ARET { q = ctxt.NewProg() q.Lineno = p.Lineno q.Link = p.Link p.Link = q p = q } if retjmp != nil { // retjmp p.As = AB p.To.Type = obj.TYPE_BRANCH p.To.Sym = retjmp p.Spadj = +ctxt.Autosize break } p.As = obj.ARET p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = REGLINK p.Spadj = +ctxt.Autosize case AADD, ASUB: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { if p.As == AADD { p.Spadj = int32(-p.From.Offset) } else { p.Spadj = int32(+p.From.Offset) } } break } } }
// Called after regopt and peep have run. // Expand CHECKNIL pseudo-op into actual nil pointer check. func expandchecks(firstp *obj.Prog) { var p1 *obj.Prog var p2 *obj.Prog for p := (*obj.Prog)(firstp); p != nil; p = p.Link { if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { fmt.Printf("expandchecks: %v\n", p) } if p.As != obj.ACHECKNIL { continue } if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers gc.Warnl(int(p.Lineno), "generated nil check") } if p.From.Type != obj.TYPE_REG { gc.Fatal("invalid nil check %v\n", p) } /* // check is // TD $4, R0, arg (R0 is always zero) // eqv. to: // tdeq r0, arg // NOTE: this needs special runtime support to make SIGTRAP recoverable. reg = p->from.reg; p->as = ATD; p->from = p->to = p->from3 = zprog.from; p->from.type = TYPE_CONST; p->from.offset = 4; p->from.reg = 0; p->reg = REGZERO; p->to.type = TYPE_REG; p->to.reg = reg; */ // check is // CMP arg, R0 // BNE 2(PC) [likely] // MOVD R0, 0(R0) p1 = gc.Ctxt.NewProg() p2 = gc.Ctxt.NewProg() gc.Clearp(p1) gc.Clearp(p2) p1.Link = p2 p2.Link = p.Link p.Link = p1 p1.Lineno = p.Lineno p2.Lineno = p.Lineno p1.Pc = 9999 p2.Pc = 9999 p.As = ppc64.ACMP p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGZERO p1.As = ppc64.ABNE //p1->from.type = TYPE_CONST; //p1->from.offset = 1; // likely p1.To.Type = obj.TYPE_BRANCH p1.To.Val = p2.Link // crash by write to memory address 0. p2.As = ppc64.AMOVD p2.From.Type = obj.TYPE_REG p2.From.Reg = ppc64.REGZERO p2.To.Type = obj.TYPE_MEM p2.To.Reg = ppc64.REGZERO p2.To.Offset = 0 } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil { ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0) } ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text autoffset := int32(p.To.Offset) if autoffset < 0 { autoffset = 0 } var bpsize int if p.Mode == 64 && obj.Framepointer_enabled != 0 && autoffset > 0 { // Make room for to save a base pointer. If autoffset == 0, // this might do something special like a tail jump to // another function, so in that case we omit this. bpsize = ctxt.Arch.Ptrsize autoffset += int32(bpsize) p.To.Offset += int64(bpsize) } else { bpsize = 0 } textarg := int64(p.To.Val.(int32)) cursym.Args = int32(textarg) cursym.Locals = int32(p.To.Offset) // TODO(rsc): Remove. if p.Mode == 32 && cursym.Locals < 0 { cursym.Locals = 0 } // TODO(rsc): Remove 'p.Mode == 64 &&'. if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 { for q := p; q != nil; q = q.Link { if q.As == obj.ACALL { goto noleaf } if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 { goto noleaf } } p.From3.Offset |= obj.NOSPLIT noleaf: } if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 { p = obj.Appendp(ctxt, p) p = load_g_cx(ctxt, p) // load g into CX } if cursym.Text.From3Offset()&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check } if autoffset != 0 { if autoffset%int32(ctxt.Arch.Regsize) != 0 { ctxt.Diag("unaligned stack size %d", autoffset) } p = obj.Appendp(ctxt, p) p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autoffset) p.Spadj = autoffset } else { // zero-byte stack adjustment. // Insert a fake non-zero adjustment so that stkcheck can // recognize the end of the stack-splitting prolog. p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(-ctxt.Arch.Ptrsize) p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(ctxt.Arch.Ptrsize) } deltasp := autoffset if bpsize > 0 { // Save caller's BP p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_SP p.To.Scale = 1 p.To.Offset = int64(autoffset) - int64(bpsize) // Move current frame to BP p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP } if cursym.Text.From3Offset()&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVQ g_panic(CX), BX // TESTQ BX, BX // JEQ end // LEAQ (autoffset+8)(SP), DI // CMPQ panic_argp(BX), DI // JNE end // MOVQ SP, panic_argp(BX) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_CX p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_CX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = ATESTQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BX p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ATESTL } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH p1 := p p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = int64(autoffset) + int64(ctxt.Arch.Regsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ALEAL } p = obj.Appendp(ctxt, p) p.As = ACMPQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_BX p.From.Offset = 0 // Panic.argp p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = ACMPL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_BX } if p.Mode == 32 { p.As = ACMPL } p = obj.Appendp(ctxt, p) p.As = AJNE p.To.Type = obj.TYPE_BRANCH p2 := p p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_BX p.To.Offset = 0 // Panic.argp if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.To.Type = obj.TYPE_MEM p.To.Reg = REG_R15 p.To.Scale = 1 p.To.Index = REG_BX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = obj.ANOP p1.Pcond = p p2.Pcond = p } var a int var pcsize int for ; p != nil; p = p.Link { pcsize = int(p.Mode) / 8 a = int(p.From.Name) if a == obj.NAME_AUTO { p.From.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From.Offset += int64(deltasp) + int64(pcsize) } if p.From3 != nil { a = int(p.From3.Name) if a == obj.NAME_AUTO { p.From3.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From3.Offset += int64(deltasp) + int64(pcsize) } } a = int(p.To.Name) if a == obj.NAME_AUTO { p.To.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.To.Offset += int64(deltasp) + int64(pcsize) } switch p.As { default: continue case APUSHL, APUSHFL: deltasp += 4 p.Spadj = 4 continue case APUSHQ, APUSHFQ: deltasp += 8 p.Spadj = 8 continue case APUSHW, APUSHFW: deltasp += 2 p.Spadj = 2 continue case APOPL, APOPFL: deltasp -= 4 p.Spadj = -4 continue case APOPQ, APOPFQ: deltasp -= 8 p.Spadj = -8 continue case APOPW, APOPFW: deltasp -= 2 p.Spadj = -2 continue case obj.ARET: break } if autoffset != deltasp { ctxt.Diag("unbalanced PUSH/POP") } if autoffset != 0 { if bpsize > 0 { // Restore caller's BP p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP p = obj.Appendp(ctxt, p) } p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-autoffset) p.Spadj = -autoffset p = obj.Appendp(ctxt, p) p.As = obj.ARET // If there are instructions following // this ARET, they come from a branch // with the same stackframe, so undo // the cleanup. p.Spadj = +autoffset } if p.To.Sym != nil { // retjmp p.As = obj.AJMP } } } func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { a.Type = obj.TYPE_MEM a.Reg = REG_R15 a.Index = REG_CX a.Scale = 1 return } a.Type = obj.TYPE_MEM a.Reg = REG_CX } // Append code to p to load g into cx. // Overwrites p with the first instruction (no first appendp). // Overwriting p is unusual but it lets use this in both the // prologue (caller must call appendp first) and in the epilogue. // Returns last new instruction. func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog { p.As = AMOVQ if ctxt.Arch.Ptrsize == 4 { p.As = AMOVL } p.From.Type = obj.TYPE_MEM p.From.Reg = REG_TLS p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX next := p.Link progedit(ctxt, p) for p.Link != next { p = p.Link } if p.From.Index == REG_TLS { p.From.Scale = 2 } return p } // Append code to p to check for stack split. // Appends to (does not overwrite) p. // Assumes g is in CX. // Returns last new instruction. func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog { cmp := ACMPQ lea := ALEAQ mov := AMOVQ sub := ASUBQ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { cmp = ACMPL lea = ALEAL mov = AMOVL sub = ASUBL } var q1 *obj.Prog if framesize <= obj.StackSmall { // small stack: SP <= stackguard // CMPQ SP, stackguard p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else if framesize <= obj.StackBig { // large stack: SP-framesize <= stackguard-StackSmall // LEAQ -xxx(SP), AX // CMPQ AX, stackguard p = obj.Appendp(ctxt, p) p.As = lea p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = -(int64(framesize) - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // MOVQ stackguard, CX // CMPQ CX, $StackPreempt // JEQ label-of-call-to-morestack // LEAQ StackGuard(SP), AX // SUBQ CX, AX // CMPQ AX, $(framesize+(StackGuard-StackSmall)) p = obj.Appendp(ctxt, p) p.As = mov indir_cx(ctxt, p, &p.From) p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_SI p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_CONST p.To.Offset = obj.StackPreempt if p.Mode == 32 { p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH q1 = p p = obj.Appendp(ctxt, p) p.As = lea p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = obj.StackGuard p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = sub p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = cmp p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX p.To.Type = obj.TYPE_CONST p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) } // common jls := obj.Appendp(ctxt, p) jls.As = AJLS jls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } spfix := obj.Appendp(ctxt, last) spfix.As = obj.ANOP spfix.Spadj = -framesize call := obj.Appendp(ctxt, spfix) call.Lineno = ctxt.Cursym.Text.Lineno call.Mode = ctxt.Cursym.Text.Mode call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) jmp := obj.Appendp(ctxt, call) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH jmp.Pcond = ctxt.Cursym.Text.Link jmp.Spadj = +framesize jls.Pcond = call if q1 != nil { q1.Pcond = call } return jls } func follow(ctxt *obj.Link, s *obj.LSym) { ctxt.Cursym = s firstp := ctxt.NewProg() lastp := firstp xfol(ctxt, s.Text, &lastp) lastp.Link = nil s.Text = firstp.Link } func nofollow(a obj.As) bool { switch a { case obj.AJMP, obj.ARET, AIRETL, AIRETQ, AIRETW, ARETFL, ARETFQ, ARETFW, obj.AUNDEF: return true } return false } func pushpop(a obj.As) bool { switch a { case APUSHL, APUSHFL, APUSHQ, APUSHFQ, APUSHW, APUSHFW, APOPL, APOPFL, APOPQ, APOPFQ, APOPW, APOPFW: return true } return false } func relinv(a obj.As) obj.As { switch a { case AJEQ: return AJNE case AJNE: return AJEQ case AJLE: return AJGT case AJLS: return AJHI case AJLT: return AJGE case AJMI: return AJPL case AJGE: return AJLT case AJPL: return AJMI case AJGT: return AJLE case AJHI: return AJLS case AJCS: return AJCC case AJCC: return AJCS case AJPS: return AJPC case AJPC: return AJPS case AJOS: return AJOC case AJOC: return AJOS } log.Fatalf("unknown relation: %s", obj.Aconv(a)) return 0 } func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var i int var a obj.As loop: if p == nil { return } if p.As == obj.AJMP { q = p.Pcond if q != nil && q.As != obj.ATEXT { /* mark instruction as done and continue layout at target of jump */ p.Mark |= DONE p = q if p.Mark&DONE == 0 { goto loop } } } if p.Mark&DONE != 0 { /* * p goes here, but already used it elsewhere. * copy up to 4 instructions or else branch to other copy. */ i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == nil { break } if q == *last { break } a = q.As if a == obj.ANOP { i-- continue } if nofollow(a) || pushpop(a) { break // NOTE(rsc): arm does goto copy } if q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } if a == obj.ACALL || a == ALOOP { continue } for { if p.As == obj.ANOP { p = p.Link continue } q = obj.Copyp(ctxt, p) p = p.Link q.Mark |= DONE (*last).Link = q *last = q if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } q.As = relinv(q.As) p = q.Pcond q.Pcond = q.Link q.Link = p xfol(ctxt, q.Link, last) p = q.Link if p.Mark&DONE != 0 { return } goto loop /* */ } } q = ctxt.NewProg() q.As = obj.AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } /* emit p */ p.Mark |= DONE (*last).Link = p *last = p a = p.As /* continue loop with what comes after p */ if nofollow(a) { return } if p.Pcond != nil && a != obj.ACALL { /* * some kind of conditional branch. * recurse to follow one path. * continue loop on the other. */ q = obj.Brchain(ctxt, p.Pcond) if q != nil { p.Pcond = q } q = obj.Brchain(ctxt, p.Link) if q != nil { p.Link = q } if p.From.Type == obj.TYPE_CONST { if p.From.Offset == 1 { /* * expect conditional jump to be taken. * rewrite so that's the fall-through case. */ p.As = relinv(a) q = p.Link p.Link = p.Pcond p.Pcond = q } } else { q = p.Link if q.Mark&DONE != 0 { if a != ALOOP { p.As = relinv(a) p.Link = p.Pcond p.Pcond = q } } } xfol(ctxt, p.Link, last) if p.Pcond.Mark&DONE != 0 { return } p = p.Pcond goto loop } p = p.Link goto loop } var unaryDst = map[obj.As]bool{ ABSWAPL: true, ABSWAPQ: true, ACMPXCHG8B: true, ADECB: true, ADECL: true, ADECQ: true, ADECW: true, AINCB: true, AINCL: true, AINCQ: true, AINCW: true, ANEGB: true, ANEGL: true, ANEGQ: true, ANEGW: true, ANOTB: true, ANOTL: true, ANOTQ: true, ANOTW: true, APOPL: true, APOPQ: true, APOPW: true, ASETCC: true, ASETCS: true, ASETEQ: true, ASETGE: true, ASETGT: true, ASETHI: true, ASETLE: true, ASETLS: true, ASETLT: true, ASETMI: true, ASETNE: true, ASETOC: true, ASETOS: true, ASETPC: true, ASETPL: true, ASETPS: true, AFFREE: true, AFLDENV: true, AFSAVE: true, AFSTCW: true, AFSTENV: true, AFSTSW: true, AFXSAVE: true, AFXSAVE64: true, ASTMXCSR: true, } var Linkamd64 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 8, Regsize: 8, } var Linkamd64p32 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64p32", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 8, } var Link386 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "386", Thechar: '8', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 4, }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var a int var i int loop: if p == nil { return } a = int(p.As) if a == AB { q = p.Pcond if q != nil && q.As != obj.ATEXT { p.Mark |= FOLL p = q if p.Mark&FOLL == 0 { goto loop } } } if p.Mark&FOLL != 0 { i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == *last || q == nil { break } a = int(q.As) if a == obj.ANOP { i-- continue } if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { goto copy } if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } if a != ABEQ && a != ABNE { continue } copy: for { r = ctxt.NewProg() *r = *p if r.Mark&FOLL == 0 { fmt.Printf("can't happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r continue } (*last).Link = r *last = r if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { return } r.As = ABNE if a == ABNE { r.As = ABEQ } r.Pcond = p.Link r.Link = p.Pcond if r.Link.Mark&FOLL == 0 { xfol(ctxt, r.Link, last) } if r.Pcond.Mark&FOLL == 0 { fmt.Printf("can't happen 2\n") } return } } a = AB q = ctxt.NewProg() q.As = int16(a) q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p if a == AB || (a == obj.ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF { return } if p.Pcond != nil { if a != ABL && a != ABX && p.Link != nil { q = obj.Brchain(ctxt, p.Link) if a != obj.ATEXT { if q != nil && (q.Mark&FOLL != 0) { p.As = int16(relinv(a)) p.Link = p.Pcond p.Pcond = q } } xfol(ctxt, p.Link, last) q = obj.Brchain(ctxt, p.Pcond) if q == nil { q = p.Pcond } if q.Mark&FOLL != 0 { p.Pcond = q return } p = q goto loop } } p = p.Link goto loop }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var i int loop: if p == nil { return } a := p.As if a == AB { q = p.Pcond if q != nil { p.Mark |= FOLL p = q if !(p.Mark&FOLL != 0) { goto loop } } } if p.Mark&FOLL != 0 { i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == *last || q == nil { break } a = q.As if a == obj.ANOP { i-- continue } if a == AB || a == obj.ARET || a == AERET { goto copy } if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } if a != ABEQ && a != ABNE { continue } copy: for { r = ctxt.NewProg() *r = *p if !(r.Mark&FOLL != 0) { fmt.Printf("cant happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r continue } (*last).Link = r *last = r if a == AB || a == obj.ARET || a == AERET { return } if a == ABNE { r.As = ABEQ } else { r.As = ABNE } r.Pcond = p.Link r.Link = p.Pcond if !(r.Link.Mark&FOLL != 0) { xfol(ctxt, r.Link, last) } if !(r.Pcond.Mark&FOLL != 0) { fmt.Printf("cant happen 2\n") } return } } a = AB q = ctxt.NewProg() q.As = a q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p if a == AB || a == obj.ARET || a == AERET { return } if p.Pcond != nil { if a != ABL && p.Link != nil { q = obj.Brchain(ctxt, p.Link) if a != obj.ATEXT { if q != nil && (q.Mark&FOLL != 0) { p.As = relinv(a) p.Link = p.Pcond p.Pcond = q } } xfol(ctxt, p.Link, last) q = obj.Brchain(ctxt, p.Pcond) if q == nil { q = p.Pcond } if q.Mark&FOLL != 0 { p.Pcond = q return } p = q goto loop } } p = p.Link goto loop }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. p.From3.Offset |= obj.NOFRAME textstksiz = 0 } if textstksiz%8 != 0 { ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From3.Offset&obj.NOFRAME != 0 { if textstksiz != 0 { ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime()) } ctxt.Bso.Flush() var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ALWAR, ASTWCCC, AECIWX, AECOWX, AEIEIO, AICBI, AISYNC, ATLBIE, ATLBIEL, ASLBIA, ASLBIE, ASLBMFEE, ASLBMFEV, ASLBMTE, ADCBF, ADCBI, ADCBST, ADCBT, ADCBTST, ADCBZ, ASYNC, ATLBSYNC, APTESYNC, ATW, AWORD, ARFI, ARFCI, ARFID, AHRFID: q = p p.Mark |= LABEL | SYNC continue case AMOVW, AMOVWZ, AMOVD: q = p if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } continue case AFABS, AFABSCC, AFADD, AFADDCC, AFCTIW, AFCTIWCC, AFCTIWZ, AFCTIWZCC, AFDIV, AFDIVCC, AFMADD, AFMADDCC, AFMOVD, AFMOVDU, /* case AFMOVDS: */ AFMOVS, AFMOVSU, /* case AFMOVSD: */ AFMSUB, AFMSUBCC, AFMUL, AFMULCC, AFNABS, AFNABSCC, AFNEG, AFNEGCC, AFNMADD, AFNMADDCC, AFNMSUB, AFNMSUBCC, AFRSP, AFRSPCC, AFSUB, AFSUBCC: q = p p.Mark |= FLOAT continue case ABL, ABCL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ABC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABNE, ABR, ABVC, ABVS: p.Mark |= BRANCH q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } else { p.Mark |= LABEL } q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case AFCMPO, AFCMPU: q = p p.Mark |= FCMP | FLOAT continue case obj.ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } autosize := int32(0) var aoffset int var mov int var o int var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: mov = AMOVD aoffset = 0 autosize = int32(textstksiz) if p.Mark&LEAF != 0 && autosize == 0 && p.From3.Offset&obj.NOFRAME == 0 { // A leaf function with no locals has no frame. p.From3.Offset |= obj.NOFRAME } if p.From3.Offset&obj.NOFRAME == 0 { // If there is a stack frame at all, it includes // space to save the LR. autosize += int32(ctxt.FixedFrameSize()) } p.To.Offset = int64(autosize) q = p if ctxt.Flag_shared != 0 && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" { // When compiling Go into PIC, all functions must start // with instructions to load the TOC pointer into r2: // // addis r2, r12, .TOC.-func@ha // addi r2, r2, .TOC.-func@l+4 // // We could probably skip this prologue in some situations // but it's a bit subtle. However, it is both safe and // necessary to leave the prologue off duffzero and // duffcopy as we rely on being able to jump to a specific // instruction offset for them. // // These are AWORDS because there is no (afaict) way to // generate the addis instruction except as part of the // load of a large constant, and in that case there is no // way to use r12 as the source. q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x3c4c0000 q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x38420000 rel := obj.Addrel(ctxt.Cursym) rel.Off = 0 rel.Siz = 8 rel.Sym = obj.Linklookup(ctxt, ".TOC.", 0) rel.Type = obj.R_ADDRPOWER_PCREL } if cursym.Text.From3.Offset&obj.NOSPLIT == 0 { q = stacksplit(ctxt, q, autosize) // emit split check } if autosize != 0 { /* use MOVDU to adjust R1 when saving R31, if autosize is small */ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG { mov = AMOVDU aoffset = int(-autosize) } else { q = obj.Appendp(ctxt, q) q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } } else if cursym.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. cursym.Text.Mark |= LEAF } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 break } q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q = obj.Appendp(ctxt, q) q.As = int16(mov) q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_MEM q.To.Offset = int64(aoffset) q.To.Reg = REGSP if q.As == AMOVDU { q.Spadj = int32(-aoffset) } if ctxt.Flag_shared != 0 { q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REGSP q.To.Offset = 24 } if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 // CMP R0, R3 // BEQ end // MOVD panic_argp(R3), R4 // ADD $(autosize+8), R1, R5 // CMP R4, R5 // BNE end // ADD $8, R1, R6 // MOVD R6, panic_argp(R3) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R0 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case obj.ARET: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retTarget := p.To.Sym if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} if retTarget == nil { p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { p.To.Type = obj.TYPE_BRANCH p.To.Sym = retTarget } p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = ABR q.Lineno = p.Lineno q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Link = p.Link p.Link = q p = q if false { // Debug bad returns q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } if autosize != 0 { q = ctxt.NewProg() q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = ABR q1.Lineno = p.Lineno if retTarget == nil { q1.To.Type = obj.TYPE_REG q1.To.Reg = REG_LR } else { q1.To.Type = obj.TYPE_BRANCH q1.To.Sym = retTarget } q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Symmorestack[0] == nil { ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0) ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime()) } obj.Bflush(ctxt.Bso) var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ALWAR, ASTWCCC, AECIWX, AECOWX, AEIEIO, AICBI, AISYNC, ATLBIE, ATLBIEL, ASLBIA, ASLBIE, ASLBMFEE, ASLBMFEV, ASLBMTE, ADCBF, ADCBI, ADCBST, ADCBT, ADCBTST, ADCBZ, ASYNC, ATLBSYNC, APTESYNC, ATW, AWORD, ARFI, ARFCI, ARFID, AHRFID: q = p p.Mark |= LABEL | SYNC continue case AMOVW, AMOVWZ, AMOVD: q = p if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } continue case AFABS, AFABSCC, AFADD, AFADDCC, AFCTIW, AFCTIWCC, AFCTIWZ, AFCTIWZCC, AFDIV, AFDIVCC, AFMADD, AFMADDCC, AFMOVD, AFMOVDU, /* case AFMOVDS: */ AFMOVS, AFMOVSU, /* case AFMOVSD: */ AFMSUB, AFMSUBCC, AFMUL, AFMULCC, AFNABS, AFNABSCC, AFNEG, AFNEGCC, AFNMADD, AFNMADDCC, AFNMSUB, AFNMSUBCC, AFRSP, AFRSPCC, AFSUB, AFSUBCC: q = p p.Mark |= FLOAT continue case ABL, ABCL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ABC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABNE, ABR, ABVC, ABVS: p.Mark |= BRANCH q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } else { p.Mark |= LABEL } q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case AFCMPO, AFCMPU: q = p p.Mark |= FCMP | FLOAT continue case ARETURN: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } autosize := int32(0) var aoffset int var mov int var o int var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: mov = AMOVD aoffset = 0 autosize = int32(textstksiz + 8) if (p.Mark&LEAF != 0) && autosize <= 8 { autosize = 0 } else if autosize&4 != 0 { autosize += 4 } p.To.Offset = int64(autosize) - 8 if p.From3.Offset&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check } q = p if autosize != 0 { /* use MOVDU to adjust R1 when saving R31, if autosize is small */ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG { mov = AMOVDU aoffset = int(-autosize) } else { q = obj.Appendp(ctxt, p) q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } } else if cursym.Text.Mark&LEAF == 0 { if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name) obj.Bflush(ctxt.Bso) } cursym.Text.Mark |= LEAF } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 break } q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q = obj.Appendp(ctxt, q) q.As = int16(mov) q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_MEM q.To.Offset = int64(aoffset) q.To.Reg = REGSP if q.As == AMOVDU { q.Spadj = int32(-aoffset) } if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 // CMP R0, R3 // BEQ end // MOVD panic_argp(R3), R4 // ADD $(autosize+8), R1, R5 // CMP R4, R5 // BNE end // ADD $8, R1, R6 // MOVD R6, panic_argp(R3) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R0 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case ARETURN: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } if p.To.Sym != nil { // retjmp p.As = ABR p.To.Type = obj.TYPE_BRANCH break } if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = ABR q.Lineno = p.Lineno q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Link = p.Link p.Link = q p = q if false { // Debug bad returns q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } if autosize != 0 { q = ctxt.NewProg() q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = ABR q1.Lineno = p.Lineno q1.To.Type = obj.TYPE_REG q1.To.Reg = REG_LR q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } }
func span0(ctxt *obj.Link, cursym *obj.LSym) { p := cursym.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols return } ctxt.Cursym = cursym ctxt.Autosize = int32(p.To.Offset + 8) if oprange[AOR&obj.AMask].start == nil { buildop(ctxt) } c := int64(0) p.Pc = c var m int var o *Optab for p = p.Link; p != nil; p = p.Link { ctxt.Curp = p p.Pc = c o = oplook(ctxt, p) m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD { ctxt.Diag("zero-width instruction\n%v", p) } continue } c += int64(m) } cursym.Size = c /* * if any procedure is large enough to * generate a large SBRA branch, then * generate extra passes putting branches * around jmps to fix. this is rare. */ bflag := 1 var otxt int64 var q *obj.Prog for bflag != 0 { if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime()) } bflag = 0 c = 0 for p = cursym.Text.Link; p != nil; p = p.Link { p.Pc = c o = oplook(ctxt, p) // very large conditional branches if o.type_ == 6 && p.Pcond != nil { otxt = p.Pcond.Pc - c if otxt < -(1<<17)+10 || otxt >= (1<<17)-10 { q = ctxt.NewProg() q.Link = p.Link p.Link = q q.As = AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.Pcond = p.Pcond p.Pcond = q q = ctxt.NewProg() q.Link = p.Link p.Link = q q.As = AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.Pcond = q.Link.Link addnop(ctxt, p.Link) addnop(ctxt, p) bflag = 1 } } m = int(o.size) if m == 0 { if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.AUSEFIELD { ctxt.Diag("zero-width instruction\n%v", p) } continue } c += int64(m) } cursym.Size = c } c += -c & (FuncAlign - 1) cursym.Size = c /* * lay out the code, emitting code and data relocations. */ obj.Symgrow(ctxt, cursym, cursym.Size) bp := cursym.P var i int32 var out [4]uint32 for p := cursym.Text.Link; p != nil; p = p.Link { ctxt.Pc = p.Pc ctxt.Curp = p o = oplook(ctxt, p) if int(o.size) > 4*len(out) { log.Fatalf("out array in span0 is too small, need at least %d for %v", o.size/4, p) } asmout(ctxt, p, o, out[:]) for i = 0; i < int32(o.size/4); i++ { ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) bp = bp[4:] } } }
func sched(ctxt *obj.Link, p0, pe *obj.Prog) { var sch [NSCHED]Sch /* * build side structure */ s := sch[:] for p := p0; ; p = p.Link { s[0].p = *p markregused(ctxt, &s[0]) if p == pe { break } s = s[1:] } se := s for i := cap(sch) - cap(se); i >= 0; i-- { s = sch[i:] if s[0].p.Mark&DELAY == 0 { continue } if -cap(s) < -cap(se) { if !conflict(&s[0], &s[1]) { continue } } var t []Sch var j int for j = cap(sch) - cap(s) - 1; j >= 0; j-- { t = sch[j:] if t[0].comp { if s[0].p.Mark&BRANCH != 0 { goto no2 } } if t[0].p.Mark&DELAY != 0 { if -cap(s) >= -cap(se) || conflict(&t[0], &s[1]) { goto no2 } } for u := t[1:]; -cap(u) <= -cap(s); u = u[1:] { if depend(ctxt, &u[0], &t[0]) { goto no2 } } goto out2 no2: } if s[0].p.Mark&BRANCH != 0 { s[0].nop = 1 } continue out2: // t[0] is the instruction being moved to fill the delay stmp := t[0] copy(t[:i-j], t[1:i-j+1]) s[0] = stmp if t[i-j-1].p.Mark&BRANCH != 0 { // t[i-j] is being put into a branch delay slot // combine its Spadj with the branch instruction t[i-j-1].p.Spadj += t[i-j].p.Spadj t[i-j].p.Spadj = 0 } i-- } /* * put it all back */ var p *obj.Prog var q *obj.Prog for s, p = sch[:], p0; -cap(s) <= -cap(se); s, p = s[1:], q { q = p.Link if q != s[0].p.Link { *p = s[0].p p.Link = q } for s[0].nop != 0 { s[0].nop-- addnop(ctxt, p) } } } func markregused(ctxt *obj.Link, s *Sch) { p := &s.p s.comp = compound(ctxt, p) s.nop = 0 if s.comp { s.set.ireg |= 1 << (REGTMP - REG_R0) s.used.ireg |= 1 << (REGTMP - REG_R0) } ar := 0 /* dest is really reference */ ad := 0 /* source/dest is really address */ ld := 0 /* opcode is load instruction */ sz := 20 /* size of load/store for overlap computation */ /* * flags based on opcode */ switch p.As { case obj.ATEXT: ctxt.Autosize = int32(p.To.Offset + 8) ad = 1 case AJAL: c := p.Reg if c == 0 { c = REGLINK } s.set.ireg |= 1 << uint(c-REG_R0) ar = 1 ad = 1 case ABGEZAL, ABLTZAL: s.set.ireg |= 1 << (REGLINK - REG_R0) fallthrough case ABEQ, ABGEZ, ABGTZ, ABLEZ, ABLTZ, ABNE: ar = 1 ad = 1 case ABFPT, ABFPF: ad = 1 s.used.cc |= E_FCR case ACMPEQD, ACMPEQF, ACMPGED, ACMPGEF, ACMPGTD, ACMPGTF: ar = 1 s.set.cc |= E_FCR p.Mark |= FCMP case AJMP: ar = 1 ad = 1 case AMOVB, AMOVBU: sz = 1 ld = 1 case AMOVH, AMOVHU: sz = 2 ld = 1 case AMOVF, AMOVW, AMOVWL, AMOVWR: sz = 4 ld = 1 case AMOVD, AMOVV, AMOVVL, AMOVVR: sz = 8 ld = 1 case ADIV, ADIVU, AMUL, AMULU, AREM, AREMU, ADIVV, ADIVVU, AMULV, AMULVU, AREMV, AREMVU: s.set.cc = E_HILO fallthrough case AADD, AADDU, AADDV, AADDVU, AAND, ANOR, AOR, ASGT, ASGTU, ASLL, ASRA, ASRL, ASLLV, ASRAV, ASRLV, ASUB, ASUBU, ASUBV, ASUBVU, AXOR, AADDD, AADDF, AADDW, ASUBD, ASUBF, ASUBW, AMULF, AMULD, AMULW, ADIVF, ADIVD, ADIVW: if p.Reg == 0 { if p.To.Type == obj.TYPE_REG { p.Reg = p.To.Reg } //if(p->reg == NREG) // print("botch %P\n", p); } } /* * flags based on 'to' field */ c := int(p.To.Class) if c == 0 { c = aclass(ctxt, &p.To) + 1 p.To.Class = int8(c) } c-- switch c { default: fmt.Printf("unknown class %d %v\n", c, p) case C_ZCON, C_SCON, C_ADD0CON, C_AND0CON, C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_NONE, C_SBRA, C_LBRA, C_ADDR, C_TEXTSIZE: break case C_HI, C_LO: s.set.cc |= E_HILO case C_FCREG: s.set.cc |= E_FCR case C_MREG: s.set.cc |= E_MCR case C_ZOREG, C_SOREG, C_LOREG: c = int(p.To.Reg) s.used.ireg |= 1 << uint(c-REG_R0) if ad != 0 { break } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.To) m := uint32(ANYMEM) if c == REGSB { m = E_MEMSB } if c == REGSP { m = E_MEMSP } if ar != 0 { s.used.cc |= m } else { s.set.cc |= m } case C_SACON, C_LACON: s.used.ireg |= 1 << (REGSP - REG_R0) case C_SECON, C_LECON: s.used.ireg |= 1 << (REGSB - REG_R0) case C_REG: if ar != 0 { s.used.ireg |= 1 << uint(p.To.Reg-REG_R0) } else { s.set.ireg |= 1 << uint(p.To.Reg-REG_R0) } case C_FREG: if ar != 0 { s.used.freg |= 1 << uint(p.To.Reg-REG_F0) } else { s.set.freg |= 1 << uint(p.To.Reg-REG_F0) } if ld != 0 && p.From.Type == obj.TYPE_REG { p.Mark |= LOAD } case C_SAUTO, C_LAUTO: s.used.ireg |= 1 << (REGSP - REG_R0) if ad != 0 { break } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.To) if ar != 0 { s.used.cc |= E_MEMSP } else { s.set.cc |= E_MEMSP } case C_SEXT, C_LEXT: s.used.ireg |= 1 << (REGSB - REG_R0) if ad != 0 { break } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.To) if ar != 0 { s.used.cc |= E_MEMSB } else { s.set.cc |= E_MEMSB } } /* * flags based on 'from' field */ c = int(p.From.Class) if c == 0 { c = aclass(ctxt, &p.From) + 1 p.From.Class = int8(c) } c-- switch c { default: fmt.Printf("unknown class %d %v\n", c, p) case C_ZCON, C_SCON, C_ADD0CON, C_AND0CON, C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_NONE, C_SBRA, C_LBRA, C_ADDR, C_TEXTSIZE: break case C_HI, C_LO: s.used.cc |= E_HILO case C_FCREG: s.used.cc |= E_FCR case C_MREG: s.used.cc |= E_MCR case C_ZOREG, C_SOREG, C_LOREG: c = int(p.From.Reg) s.used.ireg |= 1 << uint(c-REG_R0) if ld != 0 { p.Mark |= LOAD } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.From) m := uint32(ANYMEM) if c == REGSB { m = E_MEMSB } if c == REGSP { m = E_MEMSP } s.used.cc |= m case C_SACON, C_LACON: c = int(p.From.Reg) if c == 0 { c = REGSP } s.used.ireg |= 1 << uint(c-REG_R0) case C_SECON, C_LECON: s.used.ireg |= 1 << (REGSB - REG_R0) case C_REG: s.used.ireg |= 1 << uint(p.From.Reg-REG_R0) case C_FREG: s.used.freg |= 1 << uint(p.From.Reg-REG_F0) if ld != 0 && p.To.Type == obj.TYPE_REG { p.Mark |= LOAD } case C_SAUTO, C_LAUTO: s.used.ireg |= 1 << (REGSP - REG_R0) if ld != 0 { p.Mark |= LOAD } if ad != 0 { break } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.From) s.used.cc |= E_MEMSP case C_SEXT: case C_LEXT: s.used.ireg |= 1 << (REGSB - REG_R0) if ld != 0 { p.Mark |= LOAD } if ad != 0 { break } s.size = uint8(sz) s.soffset = regoff(ctxt, &p.From) s.used.cc |= E_MEMSB } c = int(p.Reg) if c != 0 { if REG_F0 <= c && c <= REG_F31 { s.used.freg |= 1 << uint(c-REG_F0) } else { s.used.ireg |= 1 << uint(c-REG_R0) } } s.set.ireg &^= (1 << (REGZERO - REG_R0)) /* R0 can't be set */ } /* * test to see if two instructions can be * interchanged without changing semantics */ func depend(ctxt *obj.Link, sa, sb *Sch) bool { if sa.set.ireg&(sb.set.ireg|sb.used.ireg) != 0 { return true } if sb.set.ireg&sa.used.ireg != 0 { return true } if sa.set.freg&(sb.set.freg|sb.used.freg) != 0 { return true } if sb.set.freg&sa.used.freg != 0 { return true } /* * special case. * loads from same address cannot pass. * this is for hardware fifo's and the like */ if sa.used.cc&sb.used.cc&E_MEM != 0 { if sa.p.Reg == sb.p.Reg { if regoff(ctxt, &sa.p.From) == regoff(ctxt, &sb.p.From) { return true } } } x := (sa.set.cc & (sb.set.cc | sb.used.cc)) | (sb.set.cc & sa.used.cc) if x != 0 { /* * allow SB and SP to pass each other. * allow SB to pass SB iff doffsets are ok * anything else conflicts */ if x != E_MEMSP && x != E_MEMSB { return true } x = sa.set.cc | sb.set.cc | sa.used.cc | sb.used.cc if x&E_MEM != 0 { return true } if offoverlap(sa, sb) { return true } } return false } func offoverlap(sa, sb *Sch) bool { if sa.soffset < sb.soffset { if sa.soffset+int32(sa.size) > sb.soffset { return true } return false } if sb.soffset+int32(sb.size) > sa.soffset { return true } return false } /* * test 2 adjacent instructions * and find out if inserted instructions * are desired to prevent stalls. */ func conflict(sa, sb *Sch) bool { if sa.set.ireg&sb.used.ireg != 0 { return true } if sa.set.freg&sb.used.freg != 0 { return true } if sa.set.cc&sb.used.cc != 0 { return true } return false } func compound(ctxt *obj.Link, p *obj.Prog) bool { o := oplook(ctxt, p) if o.size != 4 { return true } if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSB { return true } return false } func follow(ctxt *obj.Link, s *obj.LSym) { ctxt.Cursym = s firstp := ctxt.NewProg() lastp := firstp xfol(ctxt, s.Text, &lastp) lastp.Link = nil s.Text = firstp.Link } func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var i int loop: if p == nil { return } a := p.As if a == AJMP { q = p.Pcond if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) { p.Mark |= FOLL (*last).Link = p *last = p p = p.Link xfol(ctxt, p, last) p = q if p != nil && p.Mark&FOLL == 0 { goto loop } return } if q != nil { p.Mark |= FOLL p = q if p.Mark&FOLL == 0 { goto loop } } } if p.Mark&FOLL != 0 { i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == *last || (q.Mark&NOSCHED != 0) { break } a = q.As if a == obj.ANOP { i-- continue } if a == AJMP || a == ARET || a == ARFE { goto copy } if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } if a != ABEQ && a != ABNE { continue } copy: for { r = ctxt.NewProg() *r = *p if r.Mark&FOLL == 0 { fmt.Printf("can't happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r continue } (*last).Link = r *last = r if a == AJMP || a == ARET || a == ARFE { return } r.As = ABNE if a == ABNE { r.As = ABEQ } r.Pcond = p.Link r.Link = p.Pcond if r.Link.Mark&FOLL == 0 { xfol(ctxt, r.Link, last) } if r.Pcond.Mark&FOLL == 0 { fmt.Printf("can't happen 2\n") } return } } a = AJMP q = ctxt.NewProg() q.As = a q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p if a == AJMP || a == ARET || a == ARFE { if p.Mark&NOSCHED != 0 { p = p.Link goto loop } return } if p.Pcond != nil { if a != AJAL && p.Link != nil { xfol(ctxt, p.Link, last) p = p.Pcond if p == nil || (p.Mark&FOLL != 0) { return } goto loop } } p = p.Link goto loop } var Linkmips64 = obj.LinkArch{ Arch: sys.ArchMIPS64, Preprocess: preprocess, Assemble: span0, Follow: follow, Progedit: progedit, } var Linkmips64le = obj.LinkArch{ Arch: sys.ArchMIPS64LE, Preprocess: preprocess, Assemble: span0, Follow: follow, Progedit: progedit, } var Linkmips = obj.LinkArch{ Arch: sys.ArchMIPS, Preprocess: preprocess, Assemble: span0, Follow: follow, Progedit: progedit, } var Linkmipsle = obj.LinkArch{ Arch: sys.ArchMIPSLE, Preprocess: preprocess, Assemble: span0, Follow: follow, Progedit: progedit, }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym // a switch for enabling/disabling instruction scheduling nosched := true if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f noops\n", obj.Cputime()) } var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } /* too hard, just leave alone */ case AMOVW, AMOVV: q = p if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC break } if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } /* too hard, just leave alone */ case ASYSCALL, AWORD, ATLBWR, ATLBWI, ATLBP, ATLBR: q = p p.Mark |= LABEL | SYNC case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ABGEZAL, ABLTZAL, AJAL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case AJMP, ABEQ, ABGEZ, ABGTZ, ABLEZ, ABLTZ, ABNE, ABFPT, ABFPF: if p.As == ABFPT || p.As == ABFPF { // We don't treat ABFPT and ABFPF as branches here, // so that we will always fill nop (0x0) in their // delay slot during assembly. // This is to workaround a kernel FPU emulator bug // where it uses the user stack to simulate the // instruction in the delay slot if it's not 0x0, // and somehow that leads to SIGSEGV when the kernel // jump to the stack. p.Mark |= SYNC } else { p.Mark |= BRANCH } q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } //else { // p.Mark |= LABEL //} q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } var mov, add obj.As if ctxt.Mode&Mips64 != 0 { add = AADDV mov = AMOVV } else { add = AADDU mov = AMOVW } autosize := int32(0) var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o := p.As switch o { case obj.ATEXT: autosize = int32(textstksiz + ctxt.FixedFrameSize()) if (p.Mark&LEAF != 0) && autosize <= int32(ctxt.FixedFrameSize()) { autosize = 0 } else if autosize&4 != 0 && ctxt.Mode&Mips64 != 0 { autosize += 4 } p.To.Offset = int64(autosize) - ctxt.FixedFrameSize() if p.From3.Offset&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autosize) // emit split check } q = p if autosize != 0 { // Make sure to save link register for non-empty frame, even if // it is a leaf function, so that traceback works. // Store link register before decrement SP, so if a signal comes // during the execution of the function prologue, the traceback // code will not see a half-updated stack frame. q = obj.Appendp(ctxt, q) q.As = mov q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGLINK q.To.Type = obj.TYPE_MEM q.To.Offset = int64(-autosize) q.To.Reg = REGSP q = obj.Appendp(ctxt, q) q.As = add q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } else if cursym.Text.Mark&LEAF == 0 { if cursym.Text.From3.Offset&obj.NOSPLIT != 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("save suppressed in: %s\n", cursym.Name) } cursym.Text.Mark |= LEAF } } if cursym.Text.Mark&LEAF != 0 { cursym.Set(obj.AttrLeaf, true) break } if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOV g_panic(g), R1 // BEQ R1, end // MOV panic_argp(R1), R2 // ADD $(autosize+FIXED_FRAME), R29, R3 // BNE R2, R3, end // ADD $FIXED_FRAME, R29, R2 // MOV R2, panic_argp(R1) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = mov q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ABEQ q.From.Type = obj.TYPE_REG q.From.Reg = REG_R1 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = mov q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R1 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = add q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABNE q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.Reg = REG_R3 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = add q.From.Type = obj.TYPE_CONST q.From.Offset = ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = mov q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R1 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case ARET: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retSym := p.To.Sym p.To.Name = obj.NAME_NONE // clear fields as we may modify p to other instruction p.To.Sym = nil if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = AJMP p.From = obj.Addr{} if retSym != nil { // retjmp p.To.Type = obj.TYPE_BRANCH p.To.Name = obj.NAME_EXTERN p.To.Sym = retSym } else { p.To.Type = obj.TYPE_MEM p.To.Reg = REGLINK p.To.Offset = 0 } p.Mark |= BRANCH break } p.As = add p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_MEM q.To.Offset = 0 q.To.Reg = REGLINK q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = mov p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 if retSym != nil { // retjmp from non-leaf, need to restore LINK register p.To.Reg = REGLINK } if autosize != 0 { q = ctxt.NewProg() q.As = add q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = AJMP q1.Lineno = p.Lineno if retSym != nil { // retjmp q1.To.Type = obj.TYPE_BRANCH q1.To.Name = obj.NAME_EXTERN q1.To.Sym = retSym } else { q1.To.Type = obj.TYPE_MEM q1.To.Offset = 0 q1.To.Reg = REG_R4 } q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADD, AADDU, AADDV, AADDVU: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } if nosched { // if we don't do instruction scheduling, simply add // NOP after each branch instruction. for p = cursym.Text; p != nil; p = p.Link { if p.Mark&BRANCH != 0 { addnop(ctxt, p) } } return } // instruction scheduling q = nil // p - 1 q1 = cursym.Text // top of block o := 0 // count of instructions for p = cursym.Text; p != nil; p = p1 { p1 = p.Link o++ if p.Mark&NOSCHED != 0 { if q1 != p { sched(ctxt, q1, q) } for ; p != nil; p = p.Link { if p.Mark&NOSCHED == 0 { break } q = p } p1 = p q1 = p o = 0 continue } if p.Mark&(LABEL|SYNC) != 0 { if q1 != p { sched(ctxt, q1, q) } q1 = p o = 1 } if p.Mark&(BRANCH|SYNC) != 0 { sched(ctxt, q1, p) q1 = p1 o = 0 } if o >= NSCHED { sched(ctxt, q1, p) q1 = p1 o = 0 } q = p } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym // a switch for enabling/disabling instruction scheduling nosched := true if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime()) } ctxt.Bso.Flush() var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } /* too hard, just leave alone */ case AMOVW, AMOVV: q = p if p.To.Type == obj.TYPE_REG && p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC break } if p.From.Type == obj.TYPE_REG && p.From.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } /* too hard, just leave alone */ case ASYSCALL, AWORD, ATLBWR, ATLBWI, ATLBP, ATLBR: q = p p.Mark |= LABEL | SYNC case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ABGEZAL, ABLTZAL, AJAL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case AJMP, ABEQ, ABGEZ, ABGTZ, ABLEZ, ABLTZ, ABNE, ABFPT, ABFPF: if p.As == ABFPT || p.As == ABFPF { // We don't treat ABFPT and ABFPF as branches here, // so that we will always fill nop (0x0) in their // delay slot during assembly. // This is to workaround a kernel FPU emulator bug // where it uses the user stack to simulate the // instruction in the delay slot if it's not 0x0, // and somehow that leads to SIGSEGV when the kernel // jump to the stack. p.Mark |= SYNC } else { p.Mark |= BRANCH } q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } //else { // p.Mark |= LABEL //} q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } autosize := int32(0) var o int var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: autosize = int32(textstksiz + 8) if (p.Mark&LEAF != 0) && autosize <= 8 { autosize = 0 } else if autosize&4 != 0 { autosize += 4 } p.To.Offset = int64(autosize) - 8 if p.From3.Offset&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autosize) // emit split check } q = p if autosize != 0 { q = obj.Appendp(ctxt, p) q.As = AADDV q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } else if cursym.Text.Mark&LEAF == 0 { if cursym.Text.From3.Offset&obj.NOSPLIT != 0 { if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name) ctxt.Bso.Flush() } cursym.Text.Mark |= LEAF } } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 break } q = obj.Appendp(ctxt, q) q.As = AMOVV q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGLINK q.To.Type = obj.TYPE_MEM q.To.Offset = int64(0) q.To.Reg = REGSP if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVV g_panic(g), R1 // BEQ R1, end // MOVV panic_argp(R1), R2 // ADDV $(autosize+8), R29, R3 // BNE R2, R3, end // ADDV $8, R29, R2 // MOVV R2, panic_argp(R1) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = AMOVV q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ABEQ q.From.Type = obj.TYPE_REG q.From.Reg = REG_R1 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = AMOVV q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R1 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = AADDV q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABNE q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.Reg = REG_R3 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = AADDV q.From.Type = obj.TYPE_CONST q.From.Offset = 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = AMOVV q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R1 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case ARET: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } if p.To.Sym != nil { // retjmp p.As = AJMP p.To.Type = obj.TYPE_BRANCH break } if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = AJMP p.From = obj.Addr{} p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = REGLINK p.Mark |= BRANCH break } p.As = AADDV p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_MEM q.To.Offset = 0 q.To.Reg = REGLINK q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVV p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 if false { // Debug bad returns q = ctxt.NewProg() q.As = AMOVV q.Lineno = p.Lineno q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } if autosize != 0 { q = ctxt.NewProg() q.As = AADDV q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = AJMP q1.Lineno = p.Lineno q1.To.Type = obj.TYPE_MEM q1.To.Offset = 0 q1.To.Reg = REG_R4 q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADDV, AADDVU: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } if nosched { // if we don't do instruction scheduling, simply add // NOP after each branch instruction. for p = cursym.Text; p != nil; p = p.Link { if p.Mark&BRANCH != 0 { addnop(ctxt, p) } } return } // instruction scheduling q = nil // p - 1 q1 = cursym.Text // top of block o = 0 // count of instructions for p = cursym.Text; p != nil; p = p1 { p1 = p.Link o++ if p.Mark&NOSCHED != 0 { if q1 != p { sched(ctxt, q1, q) } for ; p != nil; p = p.Link { if p.Mark&NOSCHED == 0 { break } q = p } p1 = p q1 = p o = 0 continue } if p.Mark&(LABEL|SYNC) != 0 { if q1 != p { sched(ctxt, q1, q) } q1 = p o = 1 } if p.Mark&(BRANCH|SYNC) != 0 { sched(ctxt, q1, p) q1 = p1 o = 0 } if o >= NSCHED { sched(ctxt, q1, p) q1 = p1 o = 0 } q = p } }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var i int var a obj.As loop: if p == nil { return } if p.As == obj.AJMP { q = p.Pcond if q != nil && q.As != obj.ATEXT { /* mark instruction as done and continue layout at target of jump */ p.Mark |= DONE p = q if p.Mark&DONE == 0 { goto loop } } } if p.Mark&DONE != 0 { /* * p goes here, but already used it elsewhere. * copy up to 4 instructions or else branch to other copy. */ i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == nil { break } if q == *last { break } a = q.As if a == obj.ANOP { i-- continue } if nofollow(a) || pushpop(a) { break // NOTE(rsc): arm does goto copy } if q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } if a == obj.ACALL || a == ALOOP { continue } for { if p.As == obj.ANOP { p = p.Link continue } q = obj.Copyp(ctxt, p) p = p.Link q.Mark |= DONE (*last).Link = q *last = q if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 { continue } q.As = relinv(q.As) p = q.Pcond q.Pcond = q.Link q.Link = p xfol(ctxt, q.Link, last) p = q.Link if p.Mark&DONE != 0 { return } goto loop /* */ } } q = ctxt.NewProg() q.As = obj.AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } /* emit p */ p.Mark |= DONE (*last).Link = p *last = p a = p.As /* continue loop with what comes after p */ if nofollow(a) { return } if p.Pcond != nil && a != obj.ACALL { /* * some kind of conditional branch. * recurse to follow one path. * continue loop on the other. */ q = obj.Brchain(ctxt, p.Pcond) if q != nil { p.Pcond = q } q = obj.Brchain(ctxt, p.Link) if q != nil { p.Link = q } if p.From.Type == obj.TYPE_CONST { if p.From.Offset == 1 { /* * expect conditional jump to be taken. * rewrite so that's the fall-through case. */ p.As = relinv(a) q = p.Link p.Link = p.Pcond p.Pcond = q } } else { q = p.Link if q.Mark&DONE != 0 { if a != ALOOP { p.As = relinv(a) p.Link = p.Pcond p.Pcond = q } } } xfol(ctxt, p.Link, last) if p.Pcond.Mark&DONE != 0 { return } p = p.Pcond goto loop } p = p.Link goto loop }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var b obj.As for p != nil { a := p.As if a == ABR { q = p.Pcond if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) { p.Mark |= FOLL (*last).Link = p *last = p (*last).Pc = pc_cnt pc_cnt += 1 p = p.Link xfol(ctxt, p, last) p = q if p != nil && p.Mark&FOLL == 0 { continue } return } if q != nil { p.Mark |= FOLL p = q if p.Mark&FOLL == 0 { continue } } } if p.Mark&FOLL != 0 { q = p for i := 0; i < 4; i, q = i+1, q.Link { if q == *last || (q.Mark&NOSCHED != 0) { break } b = 0 /* set */ a = q.As if a == obj.ANOP { i-- continue } if a != ABR && a != obj.ARET { if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } b = relinv(a) if b == 0 { continue } } for { r = ctxt.NewProg() *r = *p if r.Mark&FOLL == 0 { fmt.Printf("can't happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r (*last).Pc = pc_cnt pc_cnt += 1 continue } (*last).Link = r *last = r (*last).Pc = pc_cnt pc_cnt += 1 if a == ABR || a == obj.ARET { return } r.As = b r.Pcond = p.Link r.Link = p.Pcond if r.Link.Mark&FOLL == 0 { xfol(ctxt, r.Link, last) } if r.Pcond.Mark&FOLL == 0 { fmt.Printf("can't happen 2\n") } return } } a = ABR q = ctxt.NewProg() q.As = a q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p (*last).Pc = pc_cnt pc_cnt += 1 if a == ABR || a == obj.ARET { if p.Mark&NOSCHED != 0 { p = p.Link continue } return } if p.Pcond != nil { if a != ABL && p.Link != nil { xfol(ctxt, p.Link, last) p = p.Pcond if p == nil || (p.Mark&FOLL != 0) { return } continue } } p = p.Link } }
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var r *obj.Prog var a int var b int var i int loop: if p == nil { return } a = int(p.As) if a == ABR { q = p.Pcond if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) { p.Mark |= FOLL (*last).Link = p *last = p p = p.Link xfol(ctxt, p, last) p = q if p != nil && p.Mark&FOLL == 0 { goto loop } return } if q != nil { p.Mark |= FOLL p = q if p.Mark&FOLL == 0 { goto loop } } } if p.Mark&FOLL != 0 { i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == *last || (q.Mark&NOSCHED != 0) { break } b = 0 /* set */ a = int(q.As) if a == obj.ANOP { i-- continue } if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID { goto copy } if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) { continue } b = relinv(a) if b == 0 { continue } copy: for { r = ctxt.NewProg() *r = *p if r.Mark&FOLL == 0 { fmt.Printf("cant happen 1\n") } r.Mark |= FOLL if p != q { p = p.Link (*last).Link = r *last = r continue } (*last).Link = r *last = r if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID { return } r.As = int16(b) r.Pcond = p.Link r.Link = p.Pcond if r.Link.Mark&FOLL == 0 { xfol(ctxt, r.Link, last) } if r.Pcond.Mark&FOLL == 0 { fmt.Printf("cant happen 2\n") } return } } a = ABR q = ctxt.NewProg() q.As = int16(a) q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } p.Mark |= FOLL (*last).Link = p *last = p if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID { if p.Mark&NOSCHED != 0 { p = p.Link goto loop } return } if p.Pcond != nil { if a != ABL && p.Link != nil { xfol(ctxt, p.Link, last) p = p.Pcond if p == nil || (p.Mark&FOLL != 0) { return } goto loop } } p = p.Link goto loop }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Symmorestack[0] == nil { ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0) ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0) } ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset aoffset := int32(textstksiz) cursym.Args = p.To.U.Argsize cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET */ obj.Bflush(ctxt.Bso) q := (*obj.Prog)(nil) var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { case obj.ATEXT: p.Mark |= LEAF case obj.ARET: break case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue case ABL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ACBNZ, ACBZ, ACBNZW, ACBZW, ATBZ, ATBNZ, ABCASE, AB, ABEQ, ABNE, ABCS, ABHS, ABCC, ABLO, ABMI, ABPL, ABVS, ABVC, ABHI, ABLS, ABGE, ABLT, ABGT, ABLE, AADR, /* strange */ AADRP: q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } } break } q = p } var o int var q2 *obj.Prog var retjmp *obj.LSym var stkadj int64 for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: cursym.Text = p if textstksiz < 0 { ctxt.Autosize = 0 } else { ctxt.Autosize = int32(textstksiz + 8) } if (cursym.Text.Mark&LEAF != 0) && ctxt.Autosize <= 8 { ctxt.Autosize = 0 } else if ctxt.Autosize&(16-1) != 0 { stkadj = 16 - (int64(ctxt.Autosize) & (16 - 1)) ctxt.Autosize += int32(stkadj) cursym.Locals += int32(stkadj) } p.To.Offset = int64(ctxt.Autosize) - 8 if ctxt.Autosize == 0 && !(cursym.Text.Mark&LEAF != 0) { if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Text.From.Sym.Name) } obj.Bflush(ctxt.Bso) cursym.Text.Mark |= LEAF } if !(p.From3.Offset&obj.NOSPLIT != 0) { p = stacksplit(ctxt, p, ctxt.Autosize, bool2int(cursym.Text.From3.Offset&obj.NEEDCTXT == 0)) // emit split check } aoffset = ctxt.Autosize if aoffset > 0xF0 { aoffset = 0xF0 } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 if ctxt.Autosize == 0 { break } aoffset = 0 } q = p if ctxt.Autosize > aoffset { q = ctxt.NewProg() q.As = ASUB q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) - int64(aoffset) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = int32(q.From.Offset) q.Link = p.Link p.Link = q if cursym.Text.Mark&LEAF != 0 { break } } q1 = ctxt.NewProg() q1.As = AMOVD q1.Lineno = p.Lineno q1.From.Type = obj.TYPE_REG q1.From.Reg = REGLINK q1.To.Type = obj.TYPE_MEM q1.Scond = C_XPRE q1.To.Offset = int64(-aoffset) q1.To.Reg = REGSP q1.Link = q.Link q1.Spadj = aoffset q.Link = q1 if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOV g_panic(g), R1 // CMP ZR, R1 // BEQ end // MOV panic_argp(R1), R2 // ADD $(autosize+8), RSP, R3 // CMP R2, R3 // BNE end // ADD $8, RSP, R4 // MOVD R4, panic_argp(R1) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes. q = q1 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REGZERO q.Reg = REG_R1 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH q1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R1 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R2 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) + 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH q2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = 8 q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R1 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP q1.Pcond = q q2.Pcond = q } case obj.ARET: nocache(p) if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retjmp = p.To.Sym p.To = obj.Addr{} if cursym.Text.Mark&LEAF != 0 { if ctxt.Autosize != 0 { p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(ctxt.Autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -ctxt.Autosize } } else { /* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/ aoffset = ctxt.Autosize if aoffset > 0xF0 { aoffset = 0xF0 } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.Scond = C_XPOST p.From.Offset = int64(aoffset) p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGLINK p.Spadj = -aoffset if ctxt.Autosize > aoffset { q = ctxt.NewProg() q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(ctxt.Autosize) - int64(aoffset) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Link = p.Link q.Spadj = int32(-q.From.Offset) q.Lineno = p.Lineno p.Link = q p = q } } if p.As != obj.ARET { q = ctxt.NewProg() q.Lineno = p.Lineno q.Link = p.Link p.Link = q p = q } if retjmp != nil { // retjmp p.As = AB p.To.Type = obj.TYPE_BRANCH p.To.Sym = retjmp p.Spadj = +ctxt.Autosize break } p.As = obj.ARET p.Lineno = p.Lineno p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = REGLINK p.Spadj = +ctxt.Autosize case AADD, ASUB: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { if p.As == AADD { p.Spadj = int32(-p.From.Offset) } else { p.Spadj = int32(+p.From.Offset) } } break } } }