/* * generate array index into res. * n might be any size; res is 32-bit. * returns Prog* to patch to panic call. */ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { gc.Cgen(n, res) return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gmove(&lo, res) if bounded { splitclean() return nil } var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gmove(&hi, &n1) gmove(&zero, &n2) gins(arm.ACMP, &n1, &n2) gc.Regfree(&n2) gc.Regfree(&n1) splitclean() return gc.Gbranch(arm.ABNE, nil, -1) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { gins(arm.ACMP, &r1, n2) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) rawgins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } // General case. var r1, r2, g1, g2 gc.Node if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { r1 = *n1 } else { gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) } if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { r2 = *n2 } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) } gins(optoas(gc.OCMP, t), &r1, &r2) if r1.Op == gc.OREGISTER { gc.Regfree(&g1) gc.Regfree(&r1) } if r2.Op == gc.OREGISTER { gc.Regfree(&g2) gc.Regfree(&r2) } return gc.Gbranch(optoas(op, t), nil, likely) }
/* * generate an addressable node in res, containing the value of n. * n is an array index, and might be any size; res width is <= 32-bit. * returns Prog* to patch to panic call. */ func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { if n.Addable != 0 { // nothing to do. *res = *n } else { gc.Tempname(res, gc.Types[gc.TUINT32]) gc.Cgen(n, res) } return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gc.Tempname(res, gc.Types[gc.TUINT32]) gmove(&lo, res) if bounded { splitclean() return nil } var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gins(x86.ACMPL, &hi, &zero) splitclean() return gc.Gbranch(x86.AJNE, nil, +1) }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatal("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
/* * generate floating-point operation. */ func cgen_float(n *gc.Node, res *gc.Node) { nl := n.Left switch n.Op { case gc.OEQ, gc.ONE, gc.OLT, gc.OLE, gc.OGE: p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Pc gmove(gc.Nodbool(true), res) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gc.Bgen(n, true, 0, p2) gmove(gc.Nodbool(false), res) gc.Patch(p3, gc.Pc) return case gc.OPLUS: gc.Cgen(nl, res) return case gc.OCONV: if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) { gc.Cgen(nl, res) return } var n2 gc.Node gc.Tempname(&n2, n.Type) var n1 gc.Node gc.Mgen(nl, &n1, res) gmove(&n1, &n2) gmove(&n2, res) gc.Mfree(&n1) return } if gc.Thearch.Use387 { cgen_float387(n, res) } else { cgen_floatsse(n, res) } }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } // generate operands in "8-bit" registers. var n1b gc.Node gc.Regalloc(&n1b, nl.Type, res) gc.Cgen(nl, &n1b) var n2b gc.Node gc.Regalloc(&n2b, nr.Type, nil) gc.Cgen(nr, &n2b) // perform full-width multiplication. t := gc.Types[gc.TUINT64] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT64] } var n1 gc.Node gc.Nodreg(&n1, t, int(n1b.Reg)) var n2 gc.Node gc.Nodreg(&n2, t, int(n2b.Reg)) a := optoas(op, t) gins(a, &n2, &n1) // truncate. gmove(&n1, res) gc.Regfree(&n1b) gc.Regfree(&n2b) return true }
// floating-point. 387 (not SSE2) func cgen_float387(n *gc.Node, res *gc.Node) { var f0 gc.Node var f1 gc.Node nl := n.Left nr := n.Right gc.Nodreg(&f0, nl.Type, x86.REG_F0) gc.Nodreg(&f1, n.Type, x86.REG_F0+1) if nr != nil { // binary if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &f0) if nr.Addable != 0 { gins(foptoas(int(n.Op), n.Type, 0), nr, &f0) } else { gc.Cgen(nr, &f0) gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1) } } else { gc.Cgen(nr, &f0) if nl.Addable != 0 { gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0) } else { gc.Cgen(nl, &f0) gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1) } } gmove(&f0, res) return } // unary gc.Cgen(nl, &f0) if n.Op != gc.OCONV && n.Op != gc.OPLUS { gins(foptoas(int(n.Op), n.Type, 0), nil, nil) } gmove(&f0, res) return }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { var n1 gc.Node var n2 gc.Node var ax gc.Node var dx gc.Node t := nl.Type a := optoas(gc.OHMUL, t) // gen nl in n1. gc.Tempname(&n1, t) gc.Cgen(nl, &n1) // gen nr in n2. gc.Regalloc(&n2, t, res) gc.Cgen(nr, &n2) // multiply. gc.Nodreg(&ax, t, x86.REG_AX) gmove(&n2, &ax) gins(a, &n1, nil) gc.Regfree(&n2) if t.Width == 1 { // byte multiply behaves differently. gc.Nodreg(&ax, t, x86.REG_AH) gc.Nodreg(&dx, t, x86.REG_DX) gmove(&ax, &dx) } gc.Nodreg(&dx, t, x86.REG_DX) gmove(&dx, res) }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // copy from byte to full registers t := gc.Types[gc.TUINT32] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } var nt gc.Node gc.Tempname(&nt, nl.Type) gc.Cgen(nl, &nt) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nr, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gmove(&nt, &n2) a := optoas(op, t) gins(a, &n2, &n1) gc.Regfree(&n2) gmove(&n1, res) gc.Regfree(&n1) return true }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := (*gc.Node)(nl) nl = nr nr = tmp } t := (*gc.Type)(nl.Type) w := int(int(t.Width * 8)) var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TINT64, gc.TUINT64: if gc.Issigned[t.Etype] { gins(ppc64.AMULHD, &n2, &n1) } else { gins(ppc64.AMULHDU, &n2, &n1) } default: gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0)) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) { var n1 gc.Node gc.Regalloc(&n1, t, nil) gc.Cgen(n, &n1) a := optoas(gc.OCMP, t) if a != arm.ACMP { var n2 gc.Node gc.Nodconst(&n2, t, 0) var n3 gc.Node gc.Regalloc(&n3, t, nil) gmove(&n2, &n3) gins(a, &n1, &n3) gc.Regfree(&n3) } else { gins(arm.ATST, &n1, nil) } a = optoas(o, t) gc.Patch(gc.Gbranch(a, t, likely), to) gc.Regfree(&n1) }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will generate undefined result. // Also need to explicitly trap on division on zero, // the hardware will silently generate undefined result. // DIVW will leave unpredicable result in higher 32-bit, // so always use DIVD/DIVDU. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 8 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT64] } else { t = gc.Types[gc.TUINT64] } check = 0 } a := optoas(gc.ODIV, t) var tl gc.Node gc.Regalloc(&tl, t0, nil) var tr gc.Node gc.Regalloc(&tr, t0, nil) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &tl) gc.Cgen(nr, &tr) } else { gc.Cgen(nr, &tr) gc.Cgen(nl, &tl) } if t != t0 { // Convert tl2 := tl tr2 := tr tl.Type = t tr.Type = t gmove(&tl2, &tl) gmove(&tr2, &tr) } // Handle divide-by-zero panic. p1 := gins(optoas(gc.OCMP, t), &tr, nil) p1.To.Type = obj.TYPE_REG p1.To.Reg = ppc64.REGZERO p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) var p2 *obj.Prog if check != 0 { var nm1 gc.Node gc.Nodconst(&nm1, t, -1) gins(optoas(gc.OCMP, t), &tr, &nm1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &tl) gmove(&tl, res) } else { // a % (-1) is 0. var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } p1 = gins(a, &tr, &tl) if op == gc.ODIV { gc.Regfree(&tr) gmove(&tl, res) } else { // A%B = A-(A/B*B) var tm gc.Node gc.Regalloc(&tm, t, nil) // patch div to use the 3 register form // TODO(minux): add gins3? p1.Reg = p1.To.Reg p1.To.Reg = tm.Val.U.Reg gins(optoas(gc.OMUL, t), &tr, &tm) gc.Regfree(&tr) gins(optoas(gc.OSUB, t), &tm, &tl) gc.Regfree(&tm) gmove(&tl, res) } gc.Regfree(&tl) if check != 0 { gc.Patch(p2, gc.Pc) } }
/* * attempt to generate 64-bit * res = n * return 1 on success, 0 if op not handled. */ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0)) } switch n.Op { default: gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0)) case gc.OMINUS: gc.Cgen(n.Left, res) var hi1 gc.Node var lo1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANEGL, nil, &lo1) gins(x86.AADCL, ncon(0), &hi1) gins(x86.ANEGL, nil, &hi1) splitclean() return case gc.OCOM: gc.Cgen(n.Left, res) var lo1 gc.Node var hi1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANOTL, nil, &lo1) gins(x86.ANOTL, nil, &hi1) splitclean() return // binary operators. // common setup below. case gc.OADD, gc.OSUB, gc.OMUL, gc.OLROT, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR: break } l := n.Left r := n.Right if !l.Addable { var t1 gc.Node gc.Tempname(&t1, l.Type) gc.Cgen(l, &t1) l = &t1 } if r != nil && !r.Addable { var t2 gc.Node gc.Tempname(&t2, r.Type) gc.Cgen(r, &t2) r = &t2 } var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX) // Setup for binary operation. var hi1 gc.Node var lo1 gc.Node split64(l, &lo1, &hi1) var lo2 gc.Node var hi2 gc.Node if gc.Is64(r.Type) { split64(r, &lo2, &hi2) } // Do op. Leave result in DX:AX. switch n.Op { // TODO: Constants case gc.OADD: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AADDL, &lo2, &ax) gins(x86.AADCL, &hi2, &dx) // TODO: Constants. case gc.OSUB: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.ASUBL, &lo2, &ax) gins(x86.ASBBL, &hi2, &dx) // let's call the next two EX and FX. case gc.OMUL: var ex gc.Node gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil) var fx gc.Node gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil) // load args into DX:AX and EX:CX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AMOVL, &lo2, &cx) gins(x86.AMOVL, &hi2, &ex) // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply. gins(x86.AMOVL, &dx, &fx) gins(x86.AORL, &ex, &fx) p1 := gc.Gbranch(x86.AJNE, nil, 0) gins(x86.AMULL, &cx, nil) // implicit &ax p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // full 64x64 -> 64, from 32x32 -> 64. gins(x86.AIMULL, &cx, &dx) gins(x86.AMOVL, &ax, &fx) gins(x86.AIMULL, &ex, &fx) gins(x86.AADDL, &dx, &fx) gins(x86.AMOVL, &cx, &dx) gins(x86.AMULL, &dx, nil) // implicit &ax gins(x86.AADDL, &fx, &dx) gc.Patch(p2, gc.Pc) gc.Regfree(&ex) gc.Regfree(&fx) // We only rotate by a constant c in [0,64). // if c >= 32: // lo, hi = hi, lo // c -= 32 // if c == 0: // no-op // else: // t = hi // shld hi:lo, c // shld lo:t, c case gc.OLROT: v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 32 { // reverse during load to do the first 32 bits of rotate v -= 32 gins(x86.AMOVL, &lo1, &dx) gins(x86.AMOVL, &hi1, &ax) } else { gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) } if v == 0 { } else // done { gins(x86.AMOVL, &dx, &cx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_CX // double-width shift p1.From.Scale = 0 } case gc.OLSH: if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&lo1, &hi2) if v > 32 { gins(x86.ASHLL, ncon(uint32(v-32)), &hi2) } gins(x86.AMOVL, ncon(0), &lo2) splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, ncon(uint32(v)), &ax) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) gc.Patch(p2, gc.Pc) // if shift count is >= 32, zero low. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &ax, &dx) gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count gins(x86.AXORL, &ax, &ax) p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHLL, &cx, &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, &cx, &ax) gc.Patch(p2, gc.Pc) case gc.ORSH: if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &lo2) gins(x86.ASARL, ncon(31), &lo2) gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) } splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&hi1, &lo2) if v > 32 { gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2) } if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &hi2) } splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero or sign-extend value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, ncon(31), &dx) gins(x86.AMOVL, &dx, &ax) } else { gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) } gc.Patch(p2, gc.Pc) // if shift count is >= 32, sign-extend hi. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &dx, &ax) if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count gins(x86.ASARL, ncon(31), &dx) } else { gins(x86.ASHRL, &cx, &ax) gins(x86.AXORL, &dx, &dx) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHRL, &cx, &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), &cx, &dx) gc.Patch(p2, gc.Pc) // make constant the right side (it usually is anyway). case gc.OXOR, gc.OAND, gc.OOR: if lo1.Op == gc.OLITERAL { nswap(&lo1, &lo2) nswap(&hi1, &hi2) } if lo2.Op == gc.OLITERAL { // special cases for constants. lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval)) hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval)) splitclean() // right side split64(res, &lo2, &hi2) switch n.Op { case gc.OXOR: gmove(&lo1, &lo2) gmove(&hi1, &hi2) switch lv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &lo2) default: gins(x86.AXORL, ncon(lv), &lo2) } switch hv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &hi2) default: gins(x86.AXORL, ncon(hv), &hi2) } case gc.OAND: switch lv { case 0: gins(x86.AMOVL, ncon(0), &lo2) default: gmove(&lo1, &lo2) if lv != 0xffffffff { gins(x86.AANDL, ncon(lv), &lo2) } } switch hv { case 0: gins(x86.AMOVL, ncon(0), &hi2) default: gmove(&hi1, &hi2) if hv != 0xffffffff { gins(x86.AANDL, ncon(hv), &hi2) } } case gc.OOR: switch lv { case 0: gmove(&lo1, &lo2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &lo2) default: gmove(&lo1, &lo2) gins(x86.AORL, ncon(lv), &lo2) } switch hv { case 0: gmove(&hi1, &hi2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &hi2) default: gmove(&hi1, &hi2) gins(x86.AORL, ncon(hv), &hi2) } } splitclean() splitclean() return } gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(optoas(int(n.Op), lo1.Type), &lo2, &ax) gins(optoas(int(n.Op), lo1.Type), &hi2, &dx) } if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo1, &hi1) gins(x86.AMOVL, &ax, &lo1) gins(x86.AMOVL, &dx, &hi1) splitclean() }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := n.Int() if v >= 32000 || v <= -32000 { break } switch as { default: return false case x86.AADDB, x86.AADDW, x86.AADDL, x86.AADDQ, x86.ASUBB, x86.ASUBW, x86.ASUBL, x86.ASUBQ, x86.AANDB, x86.AANDW, x86.AANDL, x86.AANDQ, x86.AORB, x86.AORW, x86.AORL, x86.AORQ, x86.AXORB, x86.AXORW, x86.AXORL, x86.AXORQ, x86.AINCB, x86.AINCW, x86.AINCL, x86.AINCQ, x86.ADECB, x86.ADECW, x86.ADECL, x86.ADECQ, x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(movptr, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Index = obj.TYPE_NONE gc.Fixlargeoffset(&n1) gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }
/* * generate division. * caller must set: * ax = allocated AX register * dx = allocated DX register * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } var t1 gc.Node gc.Tempname(&t1, t) var t2 gc.Node gc.Tempname(&t2, t) if t0 != t { var t3 gc.Node gc.Tempname(&t3, t0) var t4 gc.Node gc.Tempname(&t4, t0) gc.Cgen(nl, &t3) gc.Cgen(nr, &t4) // Convert. gmove(&t3, &t1) gmove(&t4, &t2) } else { gc.Cgen(nl, &t1) gc.Cgen(nr, &t2) } var n1 gc.Node if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) { gc.Regalloc(&n1, t, res) } else { gc.Regalloc(&n1, t, nil) } gmove(&t2, &n1) gmove(&t1, ax) var p2 *obj.Prog var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, ax) gmove(ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } if !gc.Issigned[t.Etype] { var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(optoas(op, t), &n1, nil) gc.Regfree(&n1) if op == gc.ODIV { gmove(ax, res) } else { gmove(dx, res) } if check != 0 { gc.Patch(p2, gc.Pc) } }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Type.Width > 4 { gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0)) } w := int(nl.Type.Width * 8) a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n2 gc.Node gc.Tempname(&n2, nl.Type) gc.Cgen(nl, &n2) var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gmove(&n2, &n1) sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 gins(a, ncon(uint32(w)-1), &n1) gins(a, ncon(uint32(w)-1), &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } var oldcx gc.Node var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) if reg[x86.REG_CX] > 1 && !gc.Samereg(&cx, res) { gc.Tempname(&oldcx, gc.Types[gc.TUINT32]) gmove(&cx, &oldcx) } var n1 gc.Node var nt gc.Node if nr.Type.Width > 4 { gc.Tempname(&nt, nr.Type) n1 = nt } else { gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX } var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) } else { gc.Cgen(nr, &n1) gc.Cgen(nl, &n2) } // test and fix up large shifts if bounded { if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) splitclean() } } else { var p1 *obj.Prog if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0)) p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) splitclean() gc.Patch(p2, gc.Pc) } else { gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) } if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gins(a, ncon(uint32(w)-1), &n2) } else { gmove(ncon(0), &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { gmove(&oldcx, &cx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } rcx := int(reg[x86.REG_CX]) var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX) var oldcx gc.Node if rcx > 0 && !gc.Samereg(&cx, res) { gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil) gmove(&cx, &oldcx) } cx.Type = tcount var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins(optoas(gc.OCMP, tcount), &n1, &n3) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { cx.Type = gc.Types[gc.TUINT64] gmove(&oldcx, &cx) gc.Regfree(&oldcx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Type.Width > 4 { gc.Fatal("cgen_shift %v", nl.Type) } w := int(nl.Type.Width * 8) if op == gc.OLROT { v := nr.Int() var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) if w == 32 { gc.Cgen(nl, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1) } else { var n2 gc.Node gc.Regalloc(&n2, nl.Type, nil) gc.Cgen(nl, &n2) gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1) gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1) gc.Regfree(&n2) // Ensure sign/zero-extended result. gins(optoas(gc.OAS, nl.Type), &n1, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int()) if sc == 0 { } else // nothing to do if sc >= uint64(nl.Type.Width*8) { if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) } else { gins(arm.AEOR, &n1, &n1) } } else { if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1) } else if op == gc.ORSH { gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH } else { gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1) } } if w < 32 && op == gc.OLSH { gins(optoas(gc.OAS, nl.Type), &n1, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } tr := nr.Type var t gc.Node var n1 gc.Node var n2 gc.Node var n3 gc.Node if tr.Width > 4 { var nt gc.Node gc.Tempname(&nt, nr.Type) if nl.Ullman >= nr.Ullman { gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) gc.Cgen(nr, &nt) n1 = nt } else { gc.Cgen(nr, &nt) gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) } var hi gc.Node var lo gc.Node split64(&nt, &lo, &hi) gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil) gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil) gmove(&lo, &n1) gmove(&hi, &n3) splitclean() gins(arm.ATST, &n3, nil) gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) p1 := gins(arm.AMOVW, &t, &n1) p1.Scond = arm.C_SCOND_NE tr = gc.Types[gc.TUINT32] gc.Regfree(&n3) } else { if nl.Ullman >= nr.Ullman { gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) gc.Regalloc(&n1, nr.Type, nil) gc.Cgen(nr, &n1) } else { gc.Regalloc(&n1, nr.Type, nil) gc.Cgen(nr, &n1) gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) } } // test for shift being 0 gins(arm.ATST, &n1, nil) p3 := gc.Gbranch(arm.ABEQ, nil, -1) // test and fix up large shifts // TODO: if(!bounded), don't emit some of this. gc.Regalloc(&n3, tr, nil) gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) gmove(&t, &n3) gins(arm.ACMP, &n1, &n3) if op == gc.ORSH { var p1 *obj.Prog var p2 *obj.Prog if gc.Issigned[nl.Type.Etype] { p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2) p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2) } else { p1 = gins(arm.AEOR, &n2, &n2) p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2) } p1.Scond = arm.C_SCOND_HS p2.Scond = arm.C_SCOND_LO } else { p1 := gins(arm.AEOR, &n2, &n2) p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2) p1.Scond = arm.C_SCOND_HS p2.Scond = arm.C_SCOND_LO } gc.Regfree(&n3) gc.Patch(p3, gc.Pc) // Left-shift of smaller word must be sign/zero-extended. if w < 32 && op == gc.OLSH { gins(optoas(gc.OAS, nl.Type), &n2, &n2) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } a := optoas(op, t) var n3 gc.Node gc.Regalloc(&n3, t0, nil) var ax gc.Node var oldax gc.Node if nl.Ullman >= nr.Ullman { savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen gc.Cgen(nr, &n3) gc.Regfree(&ax) } else { gc.Cgen(nr, &n3) savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) } if t != t0 { // Convert ax1 := ax n31 := n3 ax.Type = t n3.Type = t gmove(&ax1, &ax) gmove(&n31, &n3) } var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } var p2 *obj.Prog if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &ax) gmove(&ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) if !gc.Issigned[t.Etype] { gc.Nodconst(&n4, t, 0) gmove(&n4, &dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(a, &n3, nil) gc.Regfree(&n3) if op == gc.ODIV { gmove(&ax, res) } else { gmove(&dx, res) } restx(&dx, &olddx) if check != 0 { gc.Patch(p2, gc.Pc) } restx(&ax, &oldax) }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads var r0 gc.Node r0.Op = gc.OREGISTER r0.Reg = arm.REG_R0 var r1 gc.Node r1.Op = gc.OREGISTER r1.Reg = arm.REG_R1 var dst gc.Node gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1) gc.Agen(nl, &dst) var nc gc.Node gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0) var nz gc.Node gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0) gc.Cgen(&nc, &nz) if q > 128 { var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p := gins(arm.AMOVW, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q) * 4 p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT pl := p p = gins(arm.ACMP, &dst, nil) raddr(&end, p) gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl) gc.Regfree(&end) } else if q >= 4 && !gc.Nacl { f := gc.Sysfunc("duffzero") p := gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_arm.s p.To.Offset = 4 * (128 - int64(q)) } else { var p *obj.Prog for q > 0 { p = gins(arm.AMOVW, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 4 p.Scond |= arm.C_PBIT //print("1. %P\n", p); q-- } } var p *obj.Prog for c > 0 { p = gins(arm.AMOVB, &nz, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 1 p.Scond |= arm.C_PBIT //print("2. %P\n", p); c-- } gc.Regfree(&dst) gc.Regfree(&nz) }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := gc.Mpgetfix(n.Val.U.Xval) if v >= 32000 || v <= -32000 { break } switch as { default: return false case arm.AADD, arm.ASUB, arm.AAND, arm.AORR, arm.AEOR, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVW: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(arm.AMOVW, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Name = obj.NAME_NONE n1.Type = n.Type gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := int(optoas(op, nl.Type)) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int()) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } var n1 gc.Node gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var n2 gc.Node gc.Regalloc(&n2, nl.Type, res) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gcmp(optoas(gc.OCMP, tcount), &n1, &n3) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)) if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func cgen_floatsse(n *gc.Node, res *gc.Node) { var a int nl := n.Left nr := n.Right switch n.Op { default: gc.Dump("cgen_floatsse", n) gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0)) return case gc.OMINUS, gc.OCOM: nr = gc.Nodintconst(-1) gc.Convlit(&nr, n.Type) a = foptoas(gc.OMUL, nl.Type, 0) goto sbop // symmetric binary case gc.OADD, gc.OMUL: a = foptoas(int(n.Op), nl.Type, 0) goto sbop // asymmetric binary case gc.OSUB, gc.OMOD, gc.ODIV: a = foptoas(int(n.Op), nl.Type, 0) goto abop } sbop: // symmetric binary if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL { r := nl nl = nr nr = r } abop: // asymmetric binary if nl.Ullman >= nr.Ullman { var nt gc.Node gc.Tempname(&nt, nl.Type) gc.Cgen(nl, &nt) var n2 gc.Node gc.Mgen(nr, &n2, nil) var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gmove(&nt, &n1) gins(a, &n2, &n1) gmove(&n1, res) gc.Regfree(&n1) gc.Mfree(&n2) } else { var n2 gc.Node gc.Regalloc(&n2, nr.Type, res) gc.Cgen(nr, &n2) var n1 gc.Node gc.Regalloc(&n1, nl.Type, nil) gc.Cgen(nl, &n1) gins(a, &n2, &n1) gc.Regfree(&n2) gmove(&n1, res) gc.Regfree(&n1) } return }
func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) { nl := n.Left nr := n.Right a := int(n.Op) if true_ == 0 { // brcom is not valid on floats when NaN is involved. p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // No need to avoid re-genning ninit. bgen_float(n, 1, -likely, p2) gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to) gc.Patch(p2, gc.Pc) return } var tmp gc.Node var et int var n2 gc.Node var ax gc.Node if !gc.Thearch.Use387 { if nl.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, nl.Type) gc.Cgen(nl, &n1) nl = &n1 } if nr.Addable == 0 { var tmp gc.Node gc.Tempname(&tmp, nr.Type) gc.Cgen(nr, &tmp) nr = &tmp } var n2 gc.Node gc.Regalloc(&n2, nr.Type, nil) gmove(nr, &n2) nr = &n2 if nl.Op != gc.OREGISTER { var n3 gc.Node gc.Regalloc(&n3, nl.Type, nil) gmove(nl, &n3) nl = &n3 } if a == gc.OGE || a == gc.OGT { // only < and <= work right with NaN; reverse if needed r := nr nr = nl nl = r a = gc.Brrev(a) } gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr) if nl.Op == gc.OREGISTER { gc.Regfree(nl) } gc.Regfree(nr) goto ret } else { goto x87 } x87: a = gc.Brrev(a) // because the args are stacked if a == gc.OGE || a == gc.OGT { // only < and <= work right with NaN; reverse if needed r := nr nr = nl nl = r a = gc.Brrev(a) } gc.Nodreg(&tmp, nr.Type, x86.REG_F0) gc.Nodreg(&n2, nr.Type, x86.REG_F0+1) gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) et = gc.Simsimtype(nr.Type) if et == gc.TFLOAT64 { if nl.Ullman > nr.Ullman { gc.Cgen(nl, &tmp) gc.Cgen(nr, &tmp) gins(x86.AFXCHD, &tmp, &n2) } else { gc.Cgen(nr, &tmp) gc.Cgen(nl, &tmp) } gins(x86.AFUCOMIP, &tmp, &n2) gins(x86.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF } else { // TODO(rsc): The moves back and forth to memory // here are for truncating the value to 32 bits. // This handles 32-bit comparison but presumably // all the other ops have the same problem. // We need to figure out what the right general // solution is, besides telling people to use float64. var t1 gc.Node gc.Tempname(&t1, gc.Types[gc.TFLOAT32]) var t2 gc.Node gc.Tempname(&t2, gc.Types[gc.TFLOAT32]) gc.Cgen(nr, &t1) gc.Cgen(nl, &t2) gmove(&t2, &tmp) gins(x86.AFCOMFP, &t1, &tmp) gins(x86.AFSTSW, nil, &ax) gins(x86.ASAHF, nil, nil) } goto ret ret: if a == gc.OEQ { // neither NE nor P p1 := gc.Gbranch(x86.AJNE, nil, -likely) p2 := gc.Gbranch(x86.AJPS, nil, -likely) gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to) gc.Patch(p1, gc.Pc) gc.Patch(p2, gc.Pc) } else if a == gc.ONE { // either NE or P gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to) gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to) } else { gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to) } }
/* * attempt to generate 64-bit * res = n * return 1 on success, 0 if op not handled. */ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0)) } l := n.Left var t1 gc.Node if l.Addable == 0 { gc.Tempname(&t1, l.Type) gc.Cgen(l, &t1) l = &t1 } var hi1 gc.Node var lo1 gc.Node split64(l, &lo1, &hi1) switch n.Op { default: gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0)) case gc.OMINUS: var lo2 gc.Node var hi2 gc.Node split64(res, &lo2, &hi2) gc.Regalloc(&t1, lo1.Type, nil) var al gc.Node gc.Regalloc(&al, lo1.Type, nil) var ah gc.Node gc.Regalloc(&ah, hi1.Type, nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gmove(ncon(0), &t1) p1 := gins(arm.ASUB, &al, &t1) p1.Scond |= arm.C_SBIT gins(arm.AMOVW, &t1, &lo2) gmove(ncon(0), &t1) gins(arm.ASBC, &ah, &t1) gins(arm.AMOVW, &t1, &hi2) gc.Regfree(&t1) gc.Regfree(&al) gc.Regfree(&ah) splitclean() splitclean() return case gc.OCOM: gc.Regalloc(&t1, lo1.Type, nil) gmove(ncon(^uint32(0)), &t1) var lo2 gc.Node var hi2 gc.Node split64(res, &lo2, &hi2) var n1 gc.Node gc.Regalloc(&n1, lo1.Type, nil) gins(arm.AMOVW, &lo1, &n1) gins(arm.AEOR, &t1, &n1) gins(arm.AMOVW, &n1, &lo2) gins(arm.AMOVW, &hi1, &n1) gins(arm.AEOR, &t1, &n1) gins(arm.AMOVW, &n1, &hi2) gc.Regfree(&t1) gc.Regfree(&n1) splitclean() splitclean() return // binary operators. // common setup below. case gc.OADD, gc.OSUB, gc.OMUL, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR, gc.OLROT: break } // setup for binary operators r := n.Right if r != nil && r.Addable == 0 { var t2 gc.Node gc.Tempname(&t2, r.Type) gc.Cgen(r, &t2) r = &t2 } var hi2 gc.Node var lo2 gc.Node if gc.Is64(r.Type) { split64(r, &lo2, &hi2) } var al gc.Node gc.Regalloc(&al, lo1.Type, nil) var ah gc.Node gc.Regalloc(&ah, hi1.Type, nil) // Do op. Leave result in ah:al. switch n.Op { default: gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0)) // TODO: Constants case gc.OADD: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi2, &bh) gins(arm.AMOVW, &lo2, &bl) p1 := gins(arm.AADD, &bl, &al) p1.Scond |= arm.C_SBIT gins(arm.AADC, &bh, &ah) gc.Regfree(&bl) gc.Regfree(&bh) // TODO: Constants. case gc.OSUB: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo2, &bl) gins(arm.AMOVW, &hi2, &bh) p1 := gins(arm.ASUB, &bl, &al) p1.Scond |= arm.C_SBIT gins(arm.ASBC, &bh, &ah) gc.Regfree(&bl) gc.Regfree(&bh) // TODO(kaib): this can be done with 4 regs and does not need 6 case gc.OMUL: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) var cl gc.Node gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil) var ch gc.Node gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil) // load args into bh:bl and bh:bl. gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) gins(arm.AMOVW, &hi2, &ch) gins(arm.AMOVW, &lo2, &cl) // bl * cl -> ah al p1 := gins(arm.AMULLU, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bl.Val.U.Reg p1.Reg = cl.Val.U.Reg p1.To.Type = obj.TYPE_REGREG p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(al.Val.U.Reg) //print("%P\n", p1); // bl * ch + ah -> ah p1 = gins(arm.AMULA, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bl.Val.U.Reg p1.Reg = ch.Val.U.Reg p1.To.Type = obj.TYPE_REGREG2 p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(ah.Val.U.Reg) //print("%P\n", p1); // bh * cl + ah -> ah p1 = gins(arm.AMULA, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bh.Val.U.Reg p1.Reg = cl.Val.U.Reg p1.To.Type = obj.TYPE_REGREG2 p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(ah.Val.U.Reg) //print("%P\n", p1); gc.Regfree(&bh) gc.Regfree(&bl) gc.Regfree(&ch) gc.Regfree(&cl) // We only rotate by a constant c in [0,64). // if c >= 32: // lo, hi = hi, lo // c -= 32 // if c == 0: // no-op // else: // t = hi // shld hi:lo, c // shld lo:t, c case gc.OLROT: v := uint64(gc.Mpgetfix(r.Val.U.Xval)) var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) if v >= 32 { // reverse during load to do the first 32 bits of rotate v -= 32 gins(arm.AMOVW, &hi1, &bl) gins(arm.AMOVW, &lo1, &bh) } else { gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) } if v == 0 { gins(arm.AMOVW, &bh, &ah) gins(arm.AMOVW, &bl, &al) } else { // rotate by 1 <= v <= 31 // MOVW bl<<v, al // MOVW bh<<v, ah // OR bl>>(32-v), ah // OR bh>>(32-v), al gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al) gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah) gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah) gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al) } gc.Regfree(&bl) gc.Regfree(&bh) case gc.OLSH: var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) var p6 *obj.Prog var s gc.Node var n1 gc.Node var creg gc.Node var p1 *obj.Prog var p2 *obj.Prog var p3 *obj.Prog var p4 *obj.Prog var p5 *obj.Prog if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al) // here and below (verify it optimizes to EOR) gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) } else if v > 32 { gins(arm.AEOR, &al, &al) // MOVW bl<<(v-32), ah gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah) } else if v == 32 { gins(arm.AEOR, &al, &al) gins(arm.AMOVW, &bl, &ah) } else if v > 0 { // MOVW bl<<v, al gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al) // MOVW bh<<v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah) // OR bl>>(32-v), ah gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah) } else { gins(arm.AMOVW, &bl, &al) gins(arm.AMOVW, &bh, &ah) } goto olsh_break } gc.Regalloc(&s, gc.Types[gc.TUINT32], nil) gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil) if gc.Is64(r.Type) { // shift is >= 1<<32 var cl gc.Node var ch gc.Node split64(r, &cl, &ch) gmove(&ch, &s) gins(arm.ATST, &s, nil) p6 = gc.Gbranch(arm.ABNE, nil, 0) gmove(&cl, &s) splitclean() } else { gmove(r, &s) p6 = nil } gins(arm.ATST, &s, nil) // shift == 0 p1 = gins(arm.AMOVW, &bl, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bh, &ah) p1.Scond = arm.C_SCOND_EQ p2 = gc.Gbranch(arm.ABEQ, nil, 0) // shift is < 32 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO bl<<s, al p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al) p1.Scond = arm.C_SCOND_LO // MOVW.LO bh<<s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah) p1.Scond = arm.C_SCOND_LO // SUB.LO s, creg p1 = gins(arm.ASUB, &s, &creg) p1.Scond = arm.C_SCOND_LO // OR.LO bl>>creg, ah p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah) p1.Scond = arm.C_SCOND_LO // BLO end p3 = gc.Gbranch(arm.ABLO, nil, 0) // shift == 32 p1 = gins(arm.AEOR, &al, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bl, &ah) p1.Scond = arm.C_SCOND_EQ p4 = gc.Gbranch(arm.ABEQ, nil, 0) // shift is < 64 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // EOR.LO al, al p1 = gins(arm.AEOR, &al, &al) p1.Scond = arm.C_SCOND_LO // MOVW.LO creg>>1, creg p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg) p1.Scond = arm.C_SCOND_LO // SUB.LO creg, s p1 = gins(arm.ASUB, &creg, &s) p1.Scond = arm.C_SCOND_LO // MOVW bl<<s, ah p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah) p1.Scond = arm.C_SCOND_LO p5 = gc.Gbranch(arm.ABLO, nil, 0) // shift >= 64 if p6 != nil { gc.Patch(p6, gc.Pc) } gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) gc.Patch(p2, gc.Pc) gc.Patch(p3, gc.Pc) gc.Patch(p4, gc.Pc) gc.Patch(p5, gc.Pc) gc.Regfree(&s) gc.Regfree(&creg) olsh_break: gc.Regfree(&bl) gc.Regfree(&bh) case gc.ORSH: var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) var p4 *obj.Prog var p5 *obj.Prog var n1 gc.Node var p6 *obj.Prog var s gc.Node var p1 *obj.Prog var p2 *obj.Prog var creg gc.Node var p3 *obj.Prog if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al) // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) } } else if v > 32 { if bh.Type.Etype == gc.TINT32 { // MOVW bh->(v-32), al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al) // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { // MOVW bh>>(v-32), al gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al) gins(arm.AEOR, &ah, &ah) } } else if v == 32 { gins(arm.AMOVW, &bh, &al) if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &ah, &ah) } } else if v > 0 { // MOVW bl>>v, al gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al) // OR bh<<(32-v), al gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al) if bh.Type.Etype == gc.TINT32 { // MOVW bh->v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah) } else { // MOVW bh>>v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah) } } else { gins(arm.AMOVW, &bl, &al) gins(arm.AMOVW, &bh, &ah) } goto orsh_break } gc.Regalloc(&s, gc.Types[gc.TUINT32], nil) gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil) if gc.Is64(r.Type) { // shift is >= 1<<32 var ch gc.Node var cl gc.Node split64(r, &cl, &ch) gmove(&ch, &s) gins(arm.ATST, &s, nil) var p1 *obj.Prog if bh.Type.Etype == gc.TINT32 { p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { p1 = gins(arm.AEOR, &ah, &ah) } p1.Scond = arm.C_SCOND_NE p6 = gc.Gbranch(arm.ABNE, nil, 0) gmove(&cl, &s) splitclean() } else { gmove(r, &s) p6 = nil } gins(arm.ATST, &s, nil) // shift == 0 p1 = gins(arm.AMOVW, &bl, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bh, &ah) p1.Scond = arm.C_SCOND_EQ p2 = gc.Gbranch(arm.ABEQ, nil, 0) // check if shift is < 32 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO bl>>s, al p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al) p1.Scond = arm.C_SCOND_LO // SUB.LO s,creg p1 = gins(arm.ASUB, &s, &creg) p1.Scond = arm.C_SCOND_LO // OR.LO bh<<(32-s), al p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al) p1.Scond = arm.C_SCOND_LO if bh.Type.Etype == gc.TINT32 { // MOVW bh->s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah) } else { // MOVW bh>>s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah) } p1.Scond = arm.C_SCOND_LO // BLO end p3 = gc.Gbranch(arm.ABLO, nil, 0) // shift == 32 p1 = gins(arm.AMOVW, &bh, &al) p1.Scond = arm.C_SCOND_EQ if bh.Type.Etype == gc.TINT32 { gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &ah, &ah) } p4 = gc.Gbranch(arm.ABEQ, nil, 0) // check if shift is < 64 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO creg>>1, creg p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg) p1.Scond = arm.C_SCOND_LO // SUB.LO creg, s p1 = gins(arm.ASUB, &creg, &s) p1.Scond = arm.C_SCOND_LO if bh.Type.Etype == gc.TINT32 { // MOVW bh->(s-32), al p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al) p1.Scond = arm.C_SCOND_LO } else { // MOVW bh>>(v-32), al p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al) p1.Scond = arm.C_SCOND_LO } // BLO end p5 = gc.Gbranch(arm.ABLO, nil, 0) // s >= 64 if p6 != nil { gc.Patch(p6, gc.Pc) } if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al) } else { gins(arm.AEOR, &al, &al) } gc.Patch(p2, gc.Pc) gc.Patch(p3, gc.Pc) gc.Patch(p4, gc.Pc) gc.Patch(p5, gc.Pc) gc.Regfree(&s) gc.Regfree(&creg) orsh_break: gc.Regfree(&bl) gc.Regfree(&bh) // TODO(kaib): literal optimizations // make constant the right side (it usually is anyway). // if(lo1.op == OLITERAL) { // nswap(&lo1, &lo2); // nswap(&hi1, &hi2); // } // if(lo2.op == OLITERAL) { // // special cases for constants. // lv = mpgetfix(lo2.val.u.xval); // hv = mpgetfix(hi2.val.u.xval); // splitclean(); // right side // split64(res, &lo2, &hi2); // switch(n->op) { // case OXOR: // gmove(&lo1, &lo2); // gmove(&hi1, &hi2); // switch(lv) { // case 0: // break; // case 0xffffffffu: // gins(ANOTL, N, &lo2); // break; // default: // gins(AXORL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // break; // case 0xffffffffu: // gins(ANOTL, N, &hi2); // break; // default: // gins(AXORL, ncon(hv), &hi2); // break; // } // break; // case OAND: // switch(lv) { // case 0: // gins(AMOVL, ncon(0), &lo2); // break; // default: // gmove(&lo1, &lo2); // if(lv != 0xffffffffu) // gins(AANDL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // gins(AMOVL, ncon(0), &hi2); // break; // default: // gmove(&hi1, &hi2); // if(hv != 0xffffffffu) // gins(AANDL, ncon(hv), &hi2); // break; // } // break; // case OOR: // switch(lv) { // case 0: // gmove(&lo1, &lo2); // break; // case 0xffffffffu: // gins(AMOVL, ncon(0xffffffffu), &lo2); // break; // default: // gmove(&lo1, &lo2); // gins(AORL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // gmove(&hi1, &hi2); // break; // case 0xffffffffu: // gins(AMOVL, ncon(0xffffffffu), &hi2); // break; // default: // gmove(&hi1, &hi2); // gins(AORL, ncon(hv), &hi2); // break; // } // break; // } // splitclean(); // splitclean(); // goto out; // } case gc.OXOR, gc.OAND, gc.OOR: var n1 gc.Node gc.Regalloc(&n1, lo1.Type, nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo2, &n1) gins(optoas(int(n.Op), lo1.Type), &n1, &al) gins(arm.AMOVW, &hi2, &n1) gins(optoas(int(n.Op), lo1.Type), &n1, &ah) gc.Regfree(&n1) } if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo1, &hi1) gins(arm.AMOVW, &al, &lo1) gins(arm.AMOVW, &ah, &hi1) splitclean() //out: gc.Regfree(&al) gc.Regfree(&ah) }