func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if gc.Reginuse(ppc64.REGRT1) { gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(ppc64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(ppc64.AMOVDU, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := (*obj.Prog)(p) p = gins(ppc64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R3 on the last zeroed dword boff = 8 } else if q >= 4 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R3 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(ppc64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(ppc64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO) var dst gc.Node // REGRT1 is reserved on arm64, see arm64/gsubr.go. gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(arm64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(arm64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(arm64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 p.Scond = arm64.C_XPRE pl := (*obj.Prog)(p) p = gcmp(arm64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R16 on the last zeroed dword boff = 8 } else if q >= 4 && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend p := gins(arm64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_arm64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R16 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(arm64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(arm64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := nl.Type.Width // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 8 // bytes q := w / 8 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSQ. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSQ loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Agenr(nl, &n1, nil) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for { tmp14 := q q-- if tmp14 <= 0 { break } n1.Type = z.Type gins(x86.AMOVQ, &z, &n1) n1.Xoffset += 8 } if c >= 4 { gc.Nodconst(&z, gc.Types[gc.TUINT32], 0) n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 c -= 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for { tmp15 := c c-- if tmp15 <= 0 { break } n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var oldn1 gc.Node var n1 gc.Node savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr]) gc.Agen(nl, &n1) var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr]) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || gc.Nacl { gconreg(movptr, q, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+ } else { if di := dzDI(q); di != 0 { gconreg(addptr, di, x86.REG_DI) } p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = dzOff(q) } z := ax di := n1 if w >= 8 && c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT64] di.Type = z.Type p := gins(x86.AMOVQ, &z, &di) p.To.Scale = 1 p.To.Offset = c - 8 } else if c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT32] di.Type = z.Type gins(x86.AMOVL, &z, &di) if c > 4 { p := gins(x86.AMOVL, &z, &di) p.To.Scale = 1 p.To.Offset = c - 4 } } else { for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } } restx(&n1, &oldn1) restx(&ax, &oldax) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 8 // bytes q := w / 8 // dwords if gc.Reginuse(mips.REGRT1) { gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(mips.ASUBV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(mips.AMOVV, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(mips.AMOVV, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := p p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl) gc.Regfree(&end) // The loop leaves R1 on the last zeroed dword boff = 8 // TODO(dfc): https://golang.org/issue/12108 // If DUFFZERO is used inside a tail call (see genwrapper) it will // overwrite the link register. } else if false && q >= 4 { p := gins(mips.ASUBV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := gc.Sysfunc("duffzero") p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s p.To.Offset = int64(8 * (128 - q)) // duffzero leaves R1 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(mips.AMOVV, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(mips.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSL. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSL loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.Tptr], nil) gc.Agen(nl, &n1) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for ; q > 0; q-- { n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for ; c > 0; c-- { n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI) gc.Agen(nl, &n1) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || (q >= 4 && gc.Nacl) { gconreg(x86.AMOVL, int64(q), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ } else if q >= 4 { p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) // 1 and 128 = magic constants: see ../../runtime/asm_386.s p.To.Offset = 1 * (128 - int64(q)) } else { for q > 0 { gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ q-- } } for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads var r0 gc.Node r0.Op = gc.OREGISTER r0.Reg = arm.REG_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Reg = arm.REG_R1 var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1) gc.Agen(nl, &dst) var nc gc.Node gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0) var nz gc.Node gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0) gc.Cgen(&nc, &nz) if q > 128 { var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p := gins(arm.AMOVW, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q) * 4 p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT pl := p p = gins(arm.ACMP, &dst, nil) raddr(&end, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl) gc.Regfree(&end) } else if q >= 4 && !gc.Nacl { f := gc.Sysfunc("duffzero") p := gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 4 * (128 - int64(q)) } else { var p *obj.Prog for q > 0 { p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT //print("1. %v\n", p); q-- } } var p *obj.Prog for c > 0 { p = gins(arm.AMOVB, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 1 p.Scond |= arm.C_PBIT //print("2. %v\n", p); c-- } gc.Regfree(&dst) gc.Regfree(&nz) }
// clearfat clears (i.e. replaces with zeros) the value pointed to by nl. func clearfat(nl *gc.Node) { if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], nil) gc.Agen(nl, &dst) var boff int64 w := nl.Type.Width if w > clearLoopCutoff { // Generate a loop clearing 256 bytes per iteration using XCs. var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p := gins(s390x.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = w - (w % 256) p = gins(s390x.AXC, &dst, &dst) p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.From3 = new(obj.Addr) p.From3.Offset = 256 p.From3.Type = obj.TYPE_CONST pl := p ginscon(s390x.AADD, 256, &dst) gins(s390x.ACMP, &dst, &end) gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), pl) gc.Regfree(&end) w = w % 256 } // Generate instructions to clear the remaining memory. for w > 0 { n := w // Can clear at most 256 bytes per instruction. if n > 256 { n = 256 } switch n { // Handle very small clears using moves. case 8, 4, 2, 1: ins := s390x.AMOVB switch n { case 8: ins = s390x.AMOVD case 4: ins = s390x.AMOVW case 2: ins = s390x.AMOVH } p := gins(ins, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.To.Type = obj.TYPE_MEM p.To.Offset = boff // Handle clears that would require multiple moves with a XC. default: p := gins(s390x.AXC, &dst, &dst) p.From.Type = obj.TYPE_MEM p.From.Offset = boff p.To.Type = obj.TYPE_MEM p.To.Offset = boff p.From3 = new(obj.Addr) p.From3.Offset = n p.From3.Type = obj.TYPE_CONST } boff += n w -= n } gc.Regfree(&dst) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } w := nl.Type.Width if w > 1024 || (w >= 64 && (gc.Nacl || isPlan9)) { var oldn1 gc.Node var n1 gc.Node savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr]) gc.Agen(nl, &n1) var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr]) gconreg(x86.AMOVL, 0, x86.REG_AX) gconreg(movptr, w/8, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+ if w%8 != 0 { n1.Op = gc.OINDREG clearfat_tail(&n1, w%8) } restx(&n1, &oldn1) restx(&ax, &oldax) return } if w >= 64 { var oldn1 gc.Node var n1 gc.Node savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr]) gc.Agen(nl, &n1) var vec_zero gc.Node var old_x0 gc.Node savex(x86.REG_X0, &vec_zero, &old_x0, nil, gc.Types[gc.TFLOAT64]) gins(x86.AXORPS, &vec_zero, &vec_zero) if di := dzDI(w); di != 0 { gconreg(addptr, di, x86.REG_DI) } p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = dzOff(w) if w%16 != 0 { n1.Op = gc.OINDREG n1.Xoffset -= 16 - w%16 gins(x86.AMOVUPS, &vec_zero, &n1) } restx(&vec_zero, &old_x0) restx(&n1, &oldn1) return } // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Agenr(nl, &n1, nil) n1.Op = gc.OINDREG clearfat_tail(&n1, w) gc.Regfree(&n1) }