/* * generate array index into res. * n might be any size; res is 32-bit. * returns Prog* to patch to panic call. */ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { gc.Cgen(n, res) return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gmove(&lo, res) if bounded { splitclean() return nil } var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gmove(&hi, &n1) gmove(&zero, &n2) gins(arm.ACMP, &n1, &n2) gc.Regfree(&n2) gc.Regfree(&n1) splitclean() return gc.Gbranch(arm.ABNE, nil, -1) }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // copy from byte to full registers t := gc.Types[gc.TUINT32] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } // largest ullman on left. if nl.Ullman < nr.Ullman { nl, nr = nr, nl } var nt gc.Node gc.Tempname(&nt, nl.Type) gc.Cgen(nl, &nt) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nr, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gmove(&nt, &n2) a := optoas(op, t) gins(a, &n2, &n1) gc.Regfree(&n2) gmove(&n1, res) gc.Regfree(&n1) return true }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gcmp(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { gins(arm.ACMP, &r1, n2) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } // General case. var r1, r2, g1, g2 gc.Node if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { r1 = *n1 } else { gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) } if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { r2 = *n2 } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) } gins(optoas(gc.OCMP, t), &r1, &r2) if r1.Op == gc.OREGISTER { gc.Regfree(&g1) gc.Regfree(&r1) } if r2.Op == gc.OREGISTER { gc.Regfree(&g2) gc.Regfree(&r2) } return gc.Gbranch(optoas(op, t), nil, likely) }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { t := nl.Type t0 := t if t.Width < 8 { if t.IsSigned() { t = gc.Types[gc.TINT64] } else { t = gc.Types[gc.TUINT64] } } a := optoas(gc.ODIV, t) var tl gc.Node gc.Regalloc(&tl, t0, nil) var tr gc.Node gc.Regalloc(&tr, t0, nil) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &tl) gc.Cgen(nr, &tr) } else { gc.Cgen(nr, &tr) gc.Cgen(nl, &tl) } if t != t0 { // Convert tl2 := tl tr2 := tr tl.Type = t tr.Type = t gmove(&tl2, &tl) gmove(&tr2, &tr) } // Handle divide-by-zero panic. p1 := ginsbranch(mips.ABNE, nil, &tr, nil, 0) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) gins3(a, &tr, &tl, nil) gc.Regfree(&tr) if op == gc.ODIV { var lo gc.Node gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO) gins(mips.AMOVV, &lo, &tl) } else { // remainder in REG_HI var hi gc.Node gc.Nodreg(&hi, gc.Types[gc.TUINT64], mips.REG_HI) gins(mips.AMOVV, &hi, &tl) } gmove(&tl, res) gc.Regfree(&tl) }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatalf("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate * as n, $c (CMP/CMPU) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatalf("ginscon2") case ppc64.ACMP: if -ppc64.BIG <= c && c <= ppc64.BIG { rawgins(as, n2, &n1) return } case ppc64.ACMPU: if 0 <= c && c <= 2*ppc64.BIG { rawgins(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(ppc64.AMOVD, &n1, &ntmp) rawgins(as, n2, &ntmp) gc.Regfree(&ntmp) }
/* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node switch as { case x86.AADDL, x86.AMOVL, x86.ALEAL: gc.Nodconst(&n1, gc.Types[gc.TINT32], c) default: gc.Nodconst(&n1, gc.Types[gc.TINT64], c) } if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { // cannot have 64-bit immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(x86.AMOVQ, &n1, &ntmp) gins(as, &ntmp, n2) gc.Regfree(&ntmp) return } gins(as, &n1, n2) }
// RightShiftWithCarry generates a constant unsigned // right shift with carry. // // res = n >> shift // with carry func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) { // Extra 1 is for carry bit. maxshift := uint(n.Type.Width*8 + 1) if shift == 0 { gmove(n, res) } else if shift < maxshift { // 1. clear rightmost bit of target var n1 gc.Node gc.Nodconst(&n1, n.Type, 1) gins(optoas(gc.ORSH, n.Type), &n1, n) gins(optoas(gc.OLSH, n.Type), &n1, n) // 2. add carry flag to target var n2 gc.Node gc.Nodconst(&n1, n.Type, 0) gc.Regalloc(&n2, n.Type, nil) gins(optoas(gc.OAS, n.Type), &n1, &n2) gins(arm64.AADC, &n2, n) // 3. right rotate 1 bit gc.Nodconst(&n1, n.Type, 1) gins(arm64.AROR, &n1, n) // ARM64 backend doesn't eliminate shifts by 0. It is manually checked here. if shift > 1 { var n3 gc.Node gc.Nodconst(&n3, n.Type, int64(shift-1)) cgen_shift(gc.ORSH, true, n, &n3, res) } else { gmove(n, res) } gc.Regfree(&n2) } else { gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift) } }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] || t.Etype == gc.Tptr { if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL { // Reverse comparison to place constant (including address constant) last. op = gc.Brrev(op) n1, n2 = n2, n1 } } // General case. var r1, r2, g1, g2 gc.Node // A special case to make write barriers more efficient. // Comparing the first field of a named struct can be done directly. base := n1 if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Type.Sym == n1.Right.Sym { base = n1.Left } if base.Op == gc.ONAME && base.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { r1 = *n1 } else { gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) } if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] || n2.Op == gc.OADDR && n2.Left.Op == gc.ONAME && n2.Left.Class == gc.PEXTERN { r2 = *n2 } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) } gins(optoas(gc.OCMP, t), &r1, &r2) if r1.Op == gc.OREGISTER { gc.Regfree(&g1) gc.Regfree(&r1) } if r2.Op == gc.OREGISTER { gc.Regfree(&g2) gc.Regfree(&r2) } return gc.Gbranch(optoas(op, t), nil, likely) }
/* * generate * as $c, n */ func ginscon(as int, c int64, n *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT32], c) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) gmove(&n1, &n2) gins(as, &n2, n) gc.Regfree(&n2) }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if t.IsInteger() && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } // General case. var r1, r2, g1, g2 gc.Node // A special case to make write barriers more efficient. // Comparing the first field of a named struct can be done directly. base := n1 if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym { base = n1.Left } if base.Op == gc.ONAME && base.Class != gc.PAUTOHEAP || n1.Op == gc.OINDREG { r1 = *n1 } else { gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) } if n2.Op == gc.OLITERAL && t.IsInteger() && gc.Smallintconst(n2) { r2 = *n2 } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) } gins(optoas(gc.OCMP, t), &r1, &r2) if r1.Op == gc.OREGISTER { gc.Regfree(&g1) gc.Regfree(&r1) } if r2.Op == gc.OREGISTER { gc.Regfree(&g2) gc.Regfree(&r2) } return gc.Gbranch(optoas(op, t), nil, likely) }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } // generate operands in "8-bit" registers. var n1b gc.Node gc.Regalloc(&n1b, nl.Type, res) gc.Cgen(nl, &n1b) var n2b gc.Node gc.Regalloc(&n2b, nr.Type, nil) gc.Cgen(nr, &n2b) // perform full-width multiplication. t := gc.Types[gc.TUINT64] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT64] } var n1 gc.Node gc.Nodreg(&n1, t, int(n1b.Reg)) var n2 gc.Node gc.Nodreg(&n2, t, int(n2b.Reg)) a := optoas(op, t) gins(a, &n2, &n1) // truncate. gmove(&n1, res) gc.Regfree(&n1b) gc.Regfree(&n2b) return true }
func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) { r := gc.GetReg(dr) gc.Nodreg(x, gc.Types[gc.TINT32], dr) // save current ax and dx if they are live // and not the destination *oldx = gc.Node{} if r > 0 && !gc.Samereg(x, res) { gc.Tempname(oldx, gc.Types[gc.TINT32]) gmove(x, oldx) } gc.Regalloc(x, t, x) }
// res = runtime.getg() func getg(res *gc.Node) { var n1 gc.Node gc.Regalloc(&n1, res.Type, res) mov := optoas(gc.OAS, gc.Types[gc.Tptr]) p := gins(mov, nil, &n1) p.From.Type = obj.TYPE_REG p.From.Reg = x86.REG_TLS p = gins(mov, nil, &n1) p.From = p.To p.From.Type = obj.TYPE_MEM p.From.Index = x86.REG_TLS p.From.Scale = 1 gmove(&n1, res) gc.Regfree(&n1) }
/* * register dr is one of the special ones (AX, CX, DI, SI, etc.). * we need to use it. if it is already allocated as a temporary * (r > 1; can only happen if a routine like sgen passed a * special as cgen's res and then cgen used regalloc to reuse * it as its own temporary), then move it for now to another * register. caller must call restx to move it back. * the move is not necessary if dr == res, because res is * known to be dead. */ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) { r := uint8(gc.GetReg(dr)) // save current ax and dx if they are live // and not the destination *oldx = gc.Node{} gc.Nodreg(x, t, dr) if r > 1 && !gc.Samereg(x, res) { gc.Regalloc(oldx, gc.Types[gc.TINT64], nil) x.Type = gc.Types[gc.TINT64] gmove(x, oldx) x.Type = t oldx.Etype = r // squirrel away old r value gc.SetReg(dr, 1) } }
/* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != mips.AMOVV && (c < -mips.BIG || c > mips.BIG) || n2.Op != gc.OREGISTER || as == mips.AMUL || as == mips.AMULU || as == mips.AMULV || as == mips.AMULVU { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(mips.AMOVV, &n1, &ntmp) rawgins(as, &ntmp, n2) gc.Regfree(&ntmp) return } rawgins(as, &n1, n2) }
/* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(arm64.AMOVD, &n1, &ntmp) gins(as, &ntmp, n2) gc.Regfree(&ntmp) return } rawgins(as, &n1, n2) }
// generate // as $c, n func ginscon(as obj.As, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != s390x.AMOVD && (c < -s390x.BIG || c > s390x.BIG) || n2.Op != gc.OREGISTER { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(s390x.AMOVD, &n1, &ntmp) rawgins(as, &ntmp, n2) gc.Regfree(&ntmp) return } rawgins(as, &n1, n2) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { var n1 gc.Node var n2 gc.Node var ax gc.Node var dx gc.Node t := nl.Type a := optoas(gc.OHMUL, t) // gen nl in n1. gc.Tempname(&n1, t) gc.Cgen(nl, &n1) // gen nr in n2. gc.Regalloc(&n2, t, res) gc.Cgen(nr, &n2) // multiply. gc.Nodreg(&ax, t, x86.REG_AX) gmove(&n2, &ax) gins(a, &n1, nil) gc.Regfree(&n2) if t.Width == 1 { // byte multiply behaves differently. gc.Nodreg(&ax, t, x86.REG_AH) gc.Nodreg(&dx, t, x86.REG_DX) gmove(&ax, &dx) } gc.Nodreg(&dx, t, x86.REG_DX) gmove(&dx, res) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { var n1 gc.Node var n2 gc.Node t := nl.Type a := optoas(gc.OHMUL, t) // gen nl in n1. gc.Tempname(&n1, t) gc.Cgen(nl, &n1) // gen nr in n2. gc.Regalloc(&n2, t, res) gc.Cgen(nr, &n2) var ax, oldax, dx, olddx gc.Node savex(x86.REG_AX, &ax, &oldax, res, gc.Types[gc.TUINT32]) savex(x86.REG_DX, &dx, &olddx, res, gc.Types[gc.TUINT32]) gmove(&n2, &ax) gins(a, &n1, nil) gc.Regfree(&n2) if t.Width == 1 { // byte multiply behaves differently. var byteAH, byteDX gc.Node gc.Nodreg(&byteAH, t, x86.REG_AH) gc.Nodreg(&byteDX, t, x86.REG_DX) gmove(&byteAH, &byteDX) } gmove(&dx, res) restx(&ax, &oldax) restx(&dx, &olddx) }
/* * generate * as n, $c (CMP) */ func ginscon2(as obj.As, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatalf("ginscon2") case arm64.ACMP: if -arm64.BIG <= c && c <= arm64.BIG { gcmp(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(arm64.AMOVD, &n1, &ntmp) gcmp(as, n2, &ntmp) gc.Regfree(&ntmp) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if gc.Reginuse(ppc64.REGRT1) { gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(ppc64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(ppc64.AMOVDU, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := (*obj.Prog)(p) p = gins(ppc64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R3 on the last zeroed dword boff = 8 } else if q >= 4 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R3 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(ppc64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(ppc64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
func blockcopy(n, res *gc.Node, osrc, odst, w int64) { // determine alignment. // want to avoid unaligned access, so have to use // smaller operations for less aligned types. // for example moving [4]byte must use 4 MOVB not 1 MOVW. align := int(n.Type.Align) var op int switch align { default: gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type) case 1: op = mips.AMOVB case 2: op = mips.AMOVH case 4: op = mips.AMOVW case 8: op = mips.AMOVV } if w%int64(align) != 0 { gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) } c := int32(w / int64(align)) // if we are copying forward on the stack and // the src and dst overlap, then reverse direction dir := align if osrc < odst && odst < osrc+w { dir = -dir } var dst gc.Node var src gc.Node if n.Ullman >= res.Ullman { gc.Agenr(n, &dst, res) // temporarily use dst gc.Regalloc(&src, gc.Types[gc.Tptr], nil) gins(mips.AMOVV, &dst, &src) if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agen(res, &dst) } else { if res.Op == gc.ONAME { gc.Gvardef(res) } gc.Agenr(res, &dst, res) gc.Agenr(n, &src, nil) } var tmp gc.Node gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) // set up end marker var nend gc.Node // move src and dest to the end of block if necessary if dir < 0 { if c >= 4 { gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) gins(mips.AMOVV, &src, &nend) } p := gins(mips.AADDV, nil, &src) p.From.Type = obj.TYPE_CONST p.From.Offset = w p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = w } else { p := gins(mips.AADDV, nil, &src) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-dir) p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-dir) if c >= 4 { gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) p := gins(mips.AMOVV, &src, &nend) p.From.Type = obj.TYPE_ADDR p.From.Offset = w } } // move // TODO: enable duffcopy for larger copies. if c >= 4 { p := gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) ploop := p p = gins(mips.AADDV, nil, &src) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(dir) p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(dir) gc.Patch(ginsbranch(mips.ABNE, nil, &src, &nend, 0), ploop) gc.Regfree(&nend) } else { // TODO: Instead of generating ADDV $-8,R8; ADDV // $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7); // ADDV $8,R7;) just generate the offsets directly and // eliminate the ADDs. That will produce shorter, more // pipeline-able code. var p *obj.Prog for ; c > 0; c-- { p = gins(op, &src, &tmp) p.From.Type = obj.TYPE_MEM p.From.Offset = int64(dir) p = gins(mips.AADDV, nil, &src) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(dir) p = gins(op, &tmp, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(dir) p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(dir) } } gc.Regfree(&dst) gc.Regfree(&src) gc.Regfree(&tmp) }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := int(optoas(op, nl.Type)) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int()) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } var n1 gc.Node gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var n2 gc.Node gc.Regalloc(&n2, nl.Type, res) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins(optoas(gc.OCMP, tcount), &n1, &n3) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)) if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will generate undefined result. // Also need to explicitly trap on division on zero, // the hardware will silently generate undefined result. // DIVW will leave unpredicable result in higher 32-bit, // so always use DIVD/DIVDU. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { check = 0 } } if t.Width < 8 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT64] } else { t = gc.Types[gc.TUINT64] } check = 0 } a := optoas(gc.ODIV, t) var tl gc.Node gc.Regalloc(&tl, t0, nil) var tr gc.Node gc.Regalloc(&tr, t0, nil) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &tl) gc.Cgen(nr, &tr) } else { gc.Cgen(nr, &tr) gc.Cgen(nl, &tl) } if t != t0 { // Convert tl2 := tl tr2 := tr tl.Type = t tr.Type = t gmove(&tl2, &tl) gmove(&tr2, &tr) } // Handle divide-by-zero panic. p1 := gins(optoas(gc.OCMP, t), &tr, nil) p1.To.Type = obj.TYPE_REG p1.To.Reg = ppc64.REGZERO p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) var p2 *obj.Prog if check != 0 { var nm1 gc.Node gc.Nodconst(&nm1, t, -1) gins(optoas(gc.OCMP, t), &tr, &nm1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &tl) gmove(&tl, res) } else { // a % (-1) is 0. var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } p1 = gins(a, &tr, &tl) if op == gc.ODIV { gc.Regfree(&tr) gmove(&tl, res) } else { // A%B = A-(A/B*B) var tm gc.Node gc.Regalloc(&tm, t, nil) // patch div to use the 3 register form // TODO(minux): add gins3? p1.Reg = p1.To.Reg p1.To.Reg = tm.Reg gins(optoas(gc.OMUL, t), &tr, &tm) gc.Regfree(&tr) gins(optoas(gc.OSUB, t), &tm, &tl) gc.Regfree(&tm) gmove(&tl, res) } gc.Regfree(&tl) if check != 0 { gc.Patch(p2, gc.Pc) } }
/* * generate comparison of nl, nr, both 64-bit. * nl is memory; nr is constant or memory. */ func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) { var lo1 gc.Node var hi1 gc.Node var lo2 gc.Node var hi2 gc.Node var rr gc.Node split64(nl, &lo1, &hi1) split64(nr, &lo2, &hi2) // compare most significant word; // if they differ, we're done. t := hi1.Type if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL { gins(x86.ACMPL, &hi1, &hi2) } else { gc.Regalloc(&rr, gc.Types[gc.TINT32], nil) gins(x86.AMOVL, &hi1, &rr) gins(x86.ACMPL, &rr, &hi2) gc.Regfree(&rr) } var br *obj.Prog switch op { default: gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t) // cmp hi // jne L // cmp lo // jeq to // L: case gc.OEQ: br = gc.Gbranch(x86.AJNE, nil, -likely) // cmp hi // jne to // cmp lo // jne to case gc.ONE: gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to) // cmp hi // jgt to // jlt L // cmp lo // jge to (or jgt to) // L: case gc.OGE, gc.OGT: gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to) br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely) // cmp hi // jlt to // jgt L // cmp lo // jle to (or jlt to) // L: case gc.OLE, gc.OLT: gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to) br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely) } // compare least significant word t = lo1.Type if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL { gins(x86.ACMPL, &lo1, &lo2) } else { gc.Regalloc(&rr, gc.Types[gc.TINT32], nil) gins(x86.AMOVL, &lo1, &rr) gins(x86.ACMPL, &rr, &lo2) gc.Regfree(&rr) } // jump again gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to) // point first branch down here if appropriate if br != nil { gc.Patch(br, gc.Pc) } splitclean() splitclean() }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := (*gc.Type)(t.Type) if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r1 gc.Node var a int if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // value -> value copy, first operand in memory. // any floating point operand requires register // src, so goto hard to copy to register first. if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { cvt = gc.Types[ft] goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = arm64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm64.AMOVBU case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = arm64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm64.AMOVHU case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = arm64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = arm64.AMOVWU case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = arm64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = arm64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = arm64.AMOVBU goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = arm64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = arm64.AMOVHU goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = arm64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = arm64.AMOVWU goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = arm64.AFCVTZSSW goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = arm64.AFCVTZSDW goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = arm64.AFCVTZSS goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = arm64.AFCVTZSD goto rdst case gc.TFLOAT32<<16 | gc.TUINT32: a = arm64.AFCVTZUSW goto rdst case gc.TFLOAT64<<16 | gc.TUINT32: a = arm64.AFCVTZUDW goto rdst case gc.TFLOAT32<<16 | gc.TUINT64: a = arm64.AFCVTZUS goto rdst case gc.TFLOAT64<<16 | gc.TUINT64: a = arm64.AFCVTZUD goto rdst case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TUINT32] goto hard /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32: a = arm64.ASCVTFWS goto rdst case gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64: a = arm64.ASCVTFWD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = arm64.ASCVTFS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = arm64.ASCVTFD goto rdst case gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32: a = arm64.AUCVTFWS goto rdst case gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: a = arm64.AUCVTFWD goto rdst case gc.TUINT64<<16 | gc.TFLOAT32: a = arm64.AUCVTFS goto rdst case gc.TUINT64<<16 | gc.TFLOAT64: a = arm64.AUCVTFD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = arm64.AFCVTSD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = arm64.AFCVTDS goto rdst } gins(a, f, t) return // requires register destination rdst: gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && nr.Int() != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } a := optoas(op, t) var n3 gc.Node gc.Regalloc(&n3, t0, nil) var ax gc.Node var oldax gc.Node if nl.Ullman >= nr.Ullman { savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen gc.Cgen(nr, &n3) gc.Regfree(&ax) } else { gc.Cgen(nr, &n3) savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) } if t != t0 { // Convert ax1 := ax n31 := n3 ax.Type = t n3.Type = t gmove(&ax1, &ax) gmove(&n31, &n3) } var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } var p2 *obj.Prog if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &ax) gmove(&ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) if !gc.Issigned[t.Etype] { gc.Nodconst(&n4, t, 0) gmove(&n4, &dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(a, &n3, nil) gc.Regfree(&n3) if op == gc.ODIV { gmove(&ax, res) } else { gmove(&dx, res) } restx(&dx, &olddx) if check != 0 { gc.Patch(p2, gc.Pc) } restx(&ax, &oldax) }