func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i) } } else if cnt <= int64(128*gc.Widthptr) { p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0) p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0) p.Reg = ppc64.REGSP p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0) p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p.Reg = ppc64.REGRT1 p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr)) p1 := p p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0) p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if *r0 == 0 { p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) *r0 = 1 } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i)) } } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) { p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) p.Reg = arm.REGSP p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0) p.Reg = arm.REG_R1 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) p1 := p p.Scond |= arm.C_PBIT p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm.REG_R2 p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { cnt := hi - lo if cnt == 0 { return p } if cnt < int64(4*gc.Widthptr) { for i := int64(0); i < cnt; i += int64(gc.Widthptr) { p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i) } } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) f := gc.Sysfunc("duffzero") gc.Naddr(&p.To, f) gc.Afunclit(&p.To, f) p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) } else { p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0) p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 p = appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0) p = appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0) p.Reg = arm64.REGRT1 p = appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr)) p.Scond = arm64.C_XPRE p1 := p p = appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0) p.Reg = arm64.REGRT2 p = appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) gc.Patch(p, p1) } return p }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", gc.Nconv(nl, 0), gc.Tconv(nl.Type, 0), nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if gc.Reginuse(ppc64.REGRT1) { gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(ppc64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(ppc64.AMOVDU, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := (*obj.Prog)(p) p = gins(ppc64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R3 on the last zeroed dword boff = 8 } else if q >= 4 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R3 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(ppc64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(ppc64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
/* * generate: * call f * proc=-1 normal call but no return * proc=0 normal call * proc=1 goroutine run in new proc * proc=2 defer call save away stack * proc=3 normal call to C pointer (not Go func value) */ func ginscall(f *gc.Node, proc int) { if f.Type != nil { extra := int32(0) if proc == 1 || proc == 2 { extra = 2 * int32(gc.Widthptr) } gc.Setmaxarg(f.Type, extra) } switch proc { default: gc.Fatal("ginscall: bad proc %d", proc) case 0, // normal call -1: // normal call but no return if f.Op == gc.ONAME && f.Class == gc.PFUNC { if f == gc.Deferreturn { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert an x86 NOP that we will have the right line number. // x86 NOP 0x90 is really XCHG AX, AX; use that description // because the NOP pseudo-instruction will be removed by // the linker. var reg gc.Node gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX) gins(x86.AXCHGL, ®, ®) } p := gins(obj.ACALL, nil, f) gc.Afunclit(&p.To, f) if proc == -1 || gc.Noreturn(p) { gins(obj.AUNDEF, nil, nil) } break } var reg gc.Node gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX) var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX) gmove(f, ®) reg.Op = gc.OINDREG gmove(®, &r1) reg.Op = gc.OREGISTER gins(obj.ACALL, ®, &r1) case 3: // normal call of c function pointer gins(obj.ACALL, nil, f) case 1, // call in new proc (go) 2: // deferred call (defer) var stk gc.Node stk.Op = gc.OINDREG stk.Val.U.Reg = x86.REG_SP stk.Xoffset = 0 // size of arguments at 0(SP) var con gc.Node gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type))) gins(x86.AMOVL, &con, &stk) // FuncVal* at 4(SP) stk.Xoffset = int64(gc.Widthptr) gins(x86.AMOVL, f, &stk) if proc == 1 { ginscall(gc.Newproc, 0) } else { ginscall(gc.Deferproc, 0) } if proc == 2 { var reg gc.Node gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX) gins(x86.ATESTL, ®, ®) p := gc.Gbranch(x86.AJEQ, nil, +1) cgen_ret(nil) gc.Patch(p, gc.Pc) } } }
func blockcopy(n, res *gc.Node, osrc, odst, w int64) { // determine alignment. // want to avoid unaligned access, so have to use // smaller operations for less aligned types. // for example moving [4]byte must use 4 MOVB not 1 MOVW. align := int(n.Type.Align) var op int switch align { default: gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type) case 1: op = arm.AMOVB case 2: op = arm.AMOVH case 4: op = arm.AMOVW } if w%int64(align) != 0 { gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) } c := int32(w / int64(align)) if osrc%int64(align) != 0 || odst%int64(align) != 0 { gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) } // if we are copying forward on the stack and // the src and dst overlap, then reverse direction dir := align if osrc < odst && int64(odst) < int64(osrc)+w { dir = -dir } if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 { var r0 gc.Node r0.Op = gc.OREGISTER r0.Reg = arm.REG_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Reg = arm.REG_R0 + 1 var r2 gc.Node r2.Op = gc.OREGISTER r2.Reg = arm.REG_R0 + 2 var src gc.Node gc.Regalloc(&src, gc.Types[gc.Tptr], &r1) var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2) if n.Ullman >= res.Ullman { // eval n first gc.Agen(n, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) } else { // eval res first if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) gc.Agen(n, &src) } var tmp gc.Node gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0) f := gc.Sysfunc("duffcopy") p := gins(obj.ADUFFCOPY, nil, f) gc.Afunclit(&p.To, f) // 8 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 8 * (128 - int64(c)) gc.Regfree(&tmp) gc.Regfree(&src) gc.Regfree(&dst) return } var dst gc.Node var src gc.Node if n.Ullman >= res.Ullman { gc.Agenr(n, &dst, res) // temporarily use dst gc.Regalloc(&src, gc.Types[gc.Tptr], nil) gins(arm.AMOVW, &dst, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) } else { if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agenr(res, &dst, res) gc.Agenr(n, &src, nil) } var tmp gc.Node gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil) // set up end marker var nend gc.Node if c >= 4 { gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil) p := gins(arm.AMOVW, &src, &nend) p.From.Type = obj.TYPE_ADDR if dir < 0 { p.From.Offset = int64(dir) } else { p.From.Offset = w } } // move src and dest to the end of block if necessary if dir < 0 { p := gins(arm.AMOVW, &src, &src) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) p = gins(arm.AMOVW, &dst, &dst) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) } // move if c >= 4 { p := gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT ploop := p p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(arm.ACMP, &src, nil) raddr(&nend, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop) gc.Regfree(&nend) } else { var p *obj.Prog for { tmp14 := c c-- if tmp14 <= 0 { break } p = gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT } } gc.Regfree(&dst) gc.Regfree(&src) gc.Regfree(&tmp) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads var r0 gc.Node r0.Op = gc.OREGISTER r0.Reg = arm.REG_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Reg = arm.REG_R1 var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1) gc.Agen(nl, &dst) var nc gc.Node gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0) var nz gc.Node gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0) gc.Cgen(&nc, &nz) if q > 128 { var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p := gins(arm.AMOVW, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q) * 4 p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT pl := p p = gins(arm.ACMP, &dst, nil) raddr(&end, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl) gc.Regfree(&end) } else if q >= 4 && !gc.Nacl { f := gc.Sysfunc("duffzero") p := gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 4 * (128 - int64(q)) } else { var p *obj.Prog for q > 0 { p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT //print("1. %P\n", p); q-- } } var p *obj.Prog for c > 0 { p = gins(arm.AMOVB, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 1 p.Scond |= arm.C_PBIT //print("2. %P\n", p); c-- } gc.Regfree(&dst) gc.Regfree(&nz) }
/* * generate: * call f * proc=-1 normal call but no return * proc=0 normal call * proc=1 goroutine run in new proc * proc=2 defer call save away stack * proc=3 normal call to C pointer (not Go func value) */ func ginscall(f *gc.Node, proc int) { if f.Type != nil { extra := int32(0) if proc == 1 || proc == 2 { extra = 2 * int32(gc.Widthptr) } gc.Setmaxarg(f.Type, extra) } switch proc { default: gc.Fatal("ginscall: bad proc %d", proc) case 0, // normal call -1: // normal call but no return if f.Op == gc.ONAME && f.Class == gc.PFUNC { if f == gc.Deferreturn { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert a ppc64 NOP that we will have the right line number. // The ppc64 NOP is really or r0, r0, r0; use that description // because the NOP pseudo-instruction would be removed by // the linker. var reg gc.Node gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0) gins(ppc64.AOR, ®, ®) } p := gins(ppc64.ABL, nil, f) gc.Afunclit(&p.To, f) if proc == -1 || gc.Noreturn(p) { gins(obj.AUNDEF, nil, nil) } break } var reg gc.Node gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT) var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3) gmove(f, ®) reg.Op = gc.OINDREG gmove(®, &r1) reg.Op = gc.OREGISTER ginsBL(®, &r1) case 3: // normal call of c function pointer ginsBL(nil, f) case 1, // call in new proc (go) 2: // deferred call (defer) var con gc.Node gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type))) var reg gc.Node gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3) var reg2 gc.Node gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4) gmove(f, ®) gmove(&con, ®2) p := gins(ppc64.AMOVW, ®2, nil) p.To.Type = obj.TYPE_MEM p.To.Reg = ppc64.REGSP p.To.Offset = 8 p = gins(ppc64.AMOVD, ®, nil) p.To.Type = obj.TYPE_MEM p.To.Reg = ppc64.REGSP p.To.Offset = 16 if proc == 1 { ginscall(gc.Newproc, 0) } else { if gc.Hasdefer == 0 { gc.Fatal("hasdefer=0 but has defer") } ginscall(gc.Deferproc, 0) } if proc == 2 { gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3) p := gins(ppc64.ACMP, ®, nil) p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGZERO p = gc.Gbranch(ppc64.ABEQ, nil, +1) cgen_ret(nil) gc.Patch(p, gc.Pc) } } }
/* * generate: * call f * proc=-1 normal call but no return * proc=0 normal call * proc=1 goroutine run in new proc * proc=2 defer call save away stack * proc=3 normal call to C pointer (not Go func value) */ func ginscall(f *gc.Node, proc int) { if f.Type != nil { extra := int32(0) if proc == 1 || proc == 2 { extra = 2 * int32(gc.Widthptr) } gc.Setmaxarg(f.Type, extra) } switch proc { default: gc.Fatal("ginscall: bad proc %d", proc) case 0, // normal call -1: // normal call but no return if f.Op == gc.ONAME && f.Class == gc.PFUNC { if f == gc.Deferreturn { // Deferred calls will appear to be returning to // the BL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction before that return PC. // To avoid that instruction being an unrelated instruction, // insert a NOP so that we will have the right line number. // ARM NOP 0x00000000 is really AND.EQ R0, R0, R0. // Use the latter form because the NOP pseudo-instruction // would be removed by the linker. var r gc.Node gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0) p := gins(arm.AAND, &r, &r) p.Scond = arm.C_SCOND_EQ } p := gins(arm.ABL, nil, f) gc.Afunclit(&p.To, f) if proc == -1 || gc.Noreturn(p) { gins(obj.AUNDEF, nil, nil) } break } var r gc.Node gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7) var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1) gmove(f, &r) r.Op = gc.OINDREG gmove(&r, &r1) r.Op = gc.OREGISTER r1.Op = gc.OINDREG gins(arm.ABL, &r, &r1) case 3: // normal call of c function pointer gins(arm.ABL, nil, f) case 1, // call in new proc (go) 2: // deferred call (defer) var r gc.Node regalloc(&r, gc.Types[gc.Tptr], nil) var con gc.Node gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type))) gins(arm.AMOVW, &con, &r) p := gins(arm.AMOVW, &r, nil) p.To.Type = obj.TYPE_MEM p.To.Reg = arm.REGSP p.To.Offset = 4 gins(arm.AMOVW, f, &r) p = gins(arm.AMOVW, &r, nil) p.To.Type = obj.TYPE_MEM p.To.Reg = arm.REGSP p.To.Offset = 8 regfree(&r) if proc == 1 { ginscall(gc.Newproc, 0) } else { ginscall(gc.Deferproc, 0) } if proc == 2 { gc.Nodconst(&con, gc.Types[gc.TINT32], 0) p := gins(arm.ACMP, &con, nil) p.Reg = arm.REG_R0 p = gc.Gbranch(arm.ABEQ, nil, +1) cgen_ret(nil) gc.Patch(p, gc.Pc) } } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if reg[arm64.REGRT1-arm64.REG_R0] > 0 { gc.Fatal("R%d in use during clearfat", arm64.REGRT1-arm64.REG_R0) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1) reg[arm64.REGRT1-arm64.REG_R0]++ gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(arm64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(arm64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(arm64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 p.Scond = arm64.C_XPRE pl := (*obj.Prog)(p) p = gcmp(arm64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R16 on the last zeroed dword boff = 8 } else if q >= 4 && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend p := gins(arm64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_arm64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R16 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(arm64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(arm64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } reg[arm64.REGRT1-arm64.REG_R0]-- }
/* * block copy: * memmove(&res, &n, w); * NB: character copy assumed little endian architecture */ func sgen(n *gc.Node, res *gc.Node, w int64) { if gc.Debug['g'] != 0 { fmt.Printf("\nsgen w=%d\n", w) gc.Dump("r", n) gc.Dump("res", res) } if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF { gc.Fatal("sgen UINF") } if w < 0 || int64(int32(w)) != w { gc.Fatal("sgen copy %d", w) } if n.Type == nil { gc.Fatal("sgen: missing type") } if w == 0 { // evaluate side effects only. var dst gc.Node regalloc(&dst, gc.Types[gc.Tptr], nil) agen(res, &dst) agen(n, &dst) regfree(&dst) return } // If copying .args, that's all the results, so record definition sites // for them for the liveness analysis. if res.Op == gc.ONAME && res.Sym.Name == ".args" { for l := gc.Curfn.Dcl; l != nil; l = l.Next { if l.N.Class == gc.PPARAMOUT { gc.Gvardef(l.N) } } } // Avoid taking the address for simple enough types. if componentgen(n, res) { return } // determine alignment. // want to avoid unaligned access, so have to use // smaller operations for less aligned types. // for example moving [4]byte must use 4 MOVB not 1 MOVW. align := int(n.Type.Align) var op int switch align { default: gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0)) case 1: op = arm.AMOVB case 2: op = arm.AMOVH case 4: op = arm.AMOVW } if w%int64(align) != 0 { gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0)) } c := int32(w / int64(align)) // offset on the stack osrc := stkof(n) odst := stkof(res) if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) { // osrc and odst both on stack, and at least one is in // an unknown position. Could generate code to test // for forward/backward copy, but instead just copy // to a temporary location first. var tmp gc.Node gc.Tempname(&tmp, n.Type) sgen(n, &tmp, w) sgen(&tmp, res, w) return } if osrc%int32(align) != 0 || odst%int32(align) != 0 { gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) } // if we are copying forward on the stack and // the src and dst overlap, then reverse direction dir := align if osrc < odst && int64(odst) < int64(osrc)+w { dir = -dir } if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 { var r0 gc.Node r0.Op = gc.OREGISTER r0.Val.U.Reg = REGALLOC_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Val.U.Reg = REGALLOC_R0 + 1 var r2 gc.Node r2.Op = gc.OREGISTER r2.Val.U.Reg = REGALLOC_R0 + 2 var src gc.Node regalloc(&src, gc.Types[gc.Tptr], &r1) var dst gc.Node regalloc(&dst, gc.Types[gc.Tptr], &r2) if n.Ullman >= res.Ullman { // eval n first agen(n, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } agen(res, &dst) } else { // eval res first if res.Op == gc.ONAME { gc.Gvardef(res) } agen(res, &dst) agen(n, &src) } var tmp gc.Node regalloc(&tmp, gc.Types[gc.Tptr], &r0) f := gc.Sysfunc("duffcopy") p := gins(obj.ADUFFCOPY, nil, f) gc.Afunclit(&p.To, f) // 8 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 8 * (128 - int64(c)) regfree(&tmp) regfree(&src) regfree(&dst) return } var dst gc.Node var src gc.Node if n.Ullman >= res.Ullman { agenr(n, &dst, res) // temporarily use dst regalloc(&src, gc.Types[gc.Tptr], nil) gins(arm.AMOVW, &dst, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } agen(res, &dst) } else { if res.Op == gc.ONAME { gc.Gvardef(res) } agenr(res, &dst, res) agenr(n, &src, nil) } var tmp gc.Node regalloc(&tmp, gc.Types[gc.TUINT32], nil) // set up end marker var nend gc.Node if c >= 4 { regalloc(&nend, gc.Types[gc.TUINT32], nil) p := gins(arm.AMOVW, &src, &nend) p.From.Type = obj.TYPE_ADDR if dir < 0 { p.From.Offset = int64(dir) } else { p.From.Offset = w } } // move src and dest to the end of block if necessary if dir < 0 { p := gins(arm.AMOVW, &src, &src) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) p = gins(arm.AMOVW, &dst, &dst) p.From.Type = obj.TYPE_ADDR p.From.Offset = w + int64(dir) } // move if c >= 4 { p := gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT ploop := p p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(arm.ACMP, &src, nil) raddr(&nend, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop) regfree(&nend) } else { var p *obj.Prog for { tmp14 := c c-- if tmp14 <= 0 { break } p = gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p.Scond |= arm.C_PBIT p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p.Scond |= arm.C_PBIT } } regfree(&dst) regfree(&src) regfree(&tmp) }