func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if t.IsInteger() && gc.Isconst(n2, gc.CTINT) { ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64()) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) rawgins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if t.IsInteger() && n1.Op == gc.OLITERAL && n1.Int64() == 0 && n2.Op != gc.OLITERAL { op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if t.IsInteger() && n2.Op == gc.OLITERAL && n2.Int64() == 0 { gins(arm.ACMP, &r1, n2) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatalf("split64 %v", n.Type) } if nsclean >= len(sclean) { gc.Fatalf("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Name.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node n.Convconst(&n1, n.Type) i := n1.Int64() gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
func intLiteral(n *gc.Node) (x int64, ok bool) { switch { case n == nil: return case gc.Isconst(n, gc.CTINT): return n.Int64(), true case gc.Isconst(n, gc.CTBOOL): return int64(obj.Bool2int(n.Bool())), true } return }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will generate undefined result. // Also need to explicitly trap on division on zero, // the hardware will silently generate undefined result. // DIVW will leave unpredicable result in higher 32-bit, // so always use DIVD/DIVDU. t := nl.Type t0 := t check := 0 if t.IsSigned() { check = 1 if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 { check = 0 } } if t.Width < 8 { if t.IsSigned() { t = gc.Types[gc.TINT64] } else { t = gc.Types[gc.TUINT64] } check = 0 } a := optoas(gc.ODIV, t) var tl gc.Node gc.Regalloc(&tl, t0, nil) var tr gc.Node gc.Regalloc(&tr, t0, nil) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &tl) gc.Cgen(nr, &tr) } else { gc.Cgen(nr, &tr) gc.Cgen(nl, &tl) } if t != t0 { // Convert tl2 := tl tr2 := tr tl.Type = t tr.Type = t gmove(&tl2, &tl) gmove(&tr2, &tr) } // Handle divide-by-zero panic. p1 := gins(optoas(gc.OCMP, t), &tr, nil) p1.To.Type = obj.TYPE_REG p1.To.Reg = s390x.REGZERO p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) var p2 *obj.Prog if check != 0 { var nm1 gc.Node gc.Nodconst(&nm1, t, -1) gins(optoas(gc.OCMP, t), &tr, &nm1) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &tl) gmove(&tl, res) } else { // a % (-1) is 0. var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } p1 = gins(a, &tr, &tl) if op == gc.ODIV { gc.Regfree(&tr) gmove(&tl, res) } else { // A%B = A-(A/B*B) var tm gc.Node gc.Regalloc(&tm, t, nil) // patch div to use the 3 register form // TODO(minux): add gins3? p1.Reg = p1.To.Reg p1.To.Reg = tm.Reg gins(optoas(gc.OMUL, t), &tr, &tm) gc.Regfree(&tr) gins(optoas(gc.OSUB, t), &tm, &tl) gc.Regfree(&tm) gmove(&tl, res) } gc.Regfree(&tl) if check != 0 { gc.Patch(p2, gc.Pc) } }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := n.Int64() if v >= 32000 || v <= -32000 { break } switch as { default: return false case arm.AADD, arm.ASUB, arm.AAND, arm.AORR, arm.AEOR, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVW: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatalf("can't happen") } gins(arm.AMOVW, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Name = obj.NAME_NONE n1.Type = n.Type gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var a obj.As if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = tt // so big switch will choose a simple mov // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } // 64-bit immediates are really 32-bit sign-extended // unless moving into a register. if gc.Isint[tt] { if i := con.Int64(); int64(int32(i)) != i { goto hard } } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = x86.AMOVQL case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = x86.AMOVQ /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = x86.AMOVBQSX goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = x86.AMOVBQZX goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = x86.AMOVWQSX goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = x86.AMOVWQZX goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = x86.AMOVLQSX goto rdst // AMOVL into a register zeros the top of the register, // so this is not always necessary, but if we rely on AMOVL // the optimizer is almost certain to screw with us. case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = x86.AMOVLQZX goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = x86.ACVTTSS2SQ goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = x86.ACVTTSD2SQ goto rdst // convert via int32. case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: a := x86.ACVTTSS2SQ if ft == gc.TFLOAT64 { a = x86.ACVTTSD2SQ } bignodes() var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], nil) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) var r3 gc.Node gc.Regalloc(&r3, gc.Types[ft], nil) var r4 gc.Node gc.Regalloc(&r4, gc.Types[tt], nil) gins(optoas(gc.OAS, f.Type), f, &r1) gins(optoas(gc.OCMP, f.Type), &bigf, &r1) p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gins(optoas(gc.OAS, f.Type), &bigf, &r3) gins(optoas(gc.OSUB, f.Type), &r3, &r1) gins(a, &r1, &r2) gins(x86.AMOVQ, &bigi, &r4) gins(x86.AXORQ, &r4, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) gc.Regfree(&r4) gc.Regfree(&r3) gc.Regfree(&r2) gc.Regfree(&r1) return /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = x86.ACVTSQ2SS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = x86.ACVTSQ2SD goto rdst // convert via int32 case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native int64 -> uint64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: a := x86.ACVTSQ2SS if tt == gc.TFLOAT64 { a = x86.ACVTSQ2SD } var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) var one gc.Node gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) var r1 gc.Node gc.Regalloc(&r1, f.Type, f) var r2 gc.Node gc.Regalloc(&r2, t.Type, t) var r3 gc.Node gc.Regalloc(&r3, f.Type, nil) var r4 gc.Node gc.Regalloc(&r4, f.Type, nil) gmove(f, &r1) gins(x86.ACMPQ, &r1, &zero) p1 := gc.Gbranch(x86.AJLT, nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gmove(&r1, &r3) gins(x86.ASHRQ, &one, &r3) gmove(&r1, &r4) gins(x86.AANDL, &one, &r4) gins(x86.AORQ, &r4, &r3) gins(a, &r3, &r2) gins(optoas(gc.OADD, t.Type), &r2, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) gc.Regfree(&r4) gc.Regfree(&r3) gc.Regfree(&r2) gc.Regfree(&r1) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register destination rdst: { var r1 gc.Node gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: var r1 gc.Node gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := n.Int64() if v >= 32000 || v <= -32000 { break } switch as { default: return false case x86.AADDB, x86.AADDW, x86.AADDL, x86.AADDQ, x86.ASUBB, x86.ASUBW, x86.ASUBL, x86.ASUBQ, x86.AANDB, x86.AANDW, x86.AANDL, x86.AANDQ, x86.AORB, x86.AORW, x86.AORL, x86.AORQ, x86.AXORB, x86.AXORW, x86.AXORL, x86.AXORQ, x86.AINCB, x86.AINCW, x86.AINCL, x86.AINCQ, x86.ADECB, x86.ADECW, x86.ADECL, x86.ADECQ, x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatalf("can't happen") } gins(movptr, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Index = x86.REG_NONE gc.Fixlargeoffset(&n1) gc.Naddr(a, &n1) return true case gc.OINDEX: return false } return false }
/* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { return false } v := n.Int64() switch as { default: return false // operations that can cope with a 32-bit immediate // TODO(mundaym): logical operations can work on high bits case s390x.AADD, s390x.AADDC, s390x.ASUB, s390x.AMULLW, s390x.AAND, s390x.AOR, s390x.AXOR, s390x.ASLD, s390x.ASLW, s390x.ASRAW, s390x.ASRAD, s390x.ASRW, s390x.ASRD, s390x.AMOVB, s390x.AMOVBZ, s390x.AMOVH, s390x.AMOVHZ, s390x.AMOVW, s390x.AMOVWZ, s390x.AMOVD: if int64(int32(v)) != v { return false } // for comparisons avoid immediates unless they can // fit into a int8/uint8 // this favours combined compare and branch instructions case s390x.ACMP: if int64(int8(v)) != v { return false } case s390x.ACMPU: if int64(uint8(v)) != v { return false } } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY gc.Naddr(a, n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] // check that the offset fits into a 12-bit displacement if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 { sudoclean() return false } gc.Naddr(a, &n1) return true } gc.Regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { gc.Agen(nn, reg) n1.Xoffset = oary[0] } else { gc.Cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatalf("can't happen") } gins(s390x.AMOVD, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Index = 0 // check that the offset fits into a 12-bit displacement if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 { tmp := n1 tmp.Op = gc.OREGISTER tmp.Type = gc.Types[gc.Tptr] tmp.Xoffset = 0 gc.Cgen_checknil(&tmp) ginscon(s390x.AADD, n1.Xoffset, &tmp) n1.Xoffset = 0 } gc.Naddr(a, &n1) return true } return false }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int64()) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } rcx := gc.GetReg(x86.REG_CX) var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX) var oldcx gc.Node if rcx > 0 && !gc.Samereg(&cx, res) { gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil) gmove(&cx, &oldcx) } cx.Type = tcount var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins(optoas(gc.OCMP, tcount), &n1, &n3) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) if op == gc.ORSH && nl.Type.IsSigned() { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { cx.Type = gc.Types[gc.TUINT64] gmove(&oldcx, &cx) gc.Regfree(&oldcx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
// generate move: // t = f // hard part is conversions. func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } var a obj.As // cannot have two memory operands if gc.Ismem(f) && gc.Ismem(t) { if gmvc(f, t) { return } goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = tt // so big switch will choose a simple mov // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if t.Type.IsFloat() { goto hard } // all immediates are 16-bit sign-extended // unless moving into a register. if t.Type.IsInteger() { if i := con.Int64(); int64(int16(i)) != i { goto hard } } // immediate moves to memory have a 12-bit unsigned displacement if t.Xoffset < 0 || t.Xoffset >= 4096-8 { goto hard } } } // a float-to-int or int-to-float conversion requires the source operand in a register if gc.Ismem(f) && ((f.Type.IsFloat() && t.Type.IsInteger()) || (f.Type.IsInteger() && t.Type.IsFloat())) { cvt = f.Type goto hard } // a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register if gc.Ismem(f) && f.Type.IsFloat() && t.Type.IsFloat() && (ft != tt) { cvt = f.Type goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) // integer copy and truncate case gc.TINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = s390x.AMOVB case gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = s390x.AMOVBZ case gc.TINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = s390x.AMOVH case gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = s390x.AMOVHZ case gc.TINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, gc.TUINT64<<16 | gc.TINT32: a = s390x.AMOVW case gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = s390x.AMOVWZ case gc.TINT64<<16 | gc.TINT64, gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = s390x.AMOVD // sign extend int8 case gc.TINT8<<16 | gc.TINT16, gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = s390x.AMOVB goto rdst // sign extend uint8 case gc.TUINT8<<16 | gc.TINT16, gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = s390x.AMOVBZ goto rdst // sign extend int16 case gc.TINT16<<16 | gc.TINT32, gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = s390x.AMOVH goto rdst // zero extend uint16 case gc.TUINT16<<16 | gc.TINT32, gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = s390x.AMOVHZ goto rdst // sign extend int32 case gc.TINT32<<16 | gc.TINT64, gc.TINT32<<16 | gc.TUINT64: a = s390x.AMOVW goto rdst // zero extend uint32 case gc.TUINT32<<16 | gc.TINT64, gc.TUINT32<<16 | gc.TUINT64: a = s390x.AMOVWZ goto rdst // float to integer case gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT16: cvt = gc.Types[gc.TUINT32] goto hard case gc.TFLOAT32<<16 | gc.TUINT32: a = s390x.ACLFEBR goto rdst case gc.TFLOAT32<<16 | gc.TUINT64: a = s390x.ACLGEBR goto rdst case gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TUINT16: cvt = gc.Types[gc.TUINT32] goto hard case gc.TFLOAT64<<16 | gc.TUINT32: a = s390x.ACLFDBR goto rdst case gc.TFLOAT64<<16 | gc.TUINT64: a = s390x.ACLGDBR goto rdst case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TINT16: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT32<<16 | gc.TINT32: a = s390x.ACFEBRA goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = s390x.ACGEBRA goto rdst case gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TINT16: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT64<<16 | gc.TINT32: a = s390x.ACFDBRA goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = s390x.ACGDBRA goto rdst // integer to float case gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32: cvt = gc.Types[gc.TUINT32] goto hard case gc.TUINT32<<16 | gc.TFLOAT32: a = s390x.ACELFBR goto rdst case gc.TUINT64<<16 | gc.TFLOAT32: a = s390x.ACELGBR goto rdst case gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TUINT32<<16 | gc.TFLOAT64: a = s390x.ACDLFBR goto rdst case gc.TUINT64<<16 | gc.TFLOAT64: a = s390x.ACDLGBR goto rdst case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32: cvt = gc.Types[gc.TINT32] goto hard case gc.TINT32<<16 | gc.TFLOAT32: a = s390x.ACEFBRA goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = s390x.ACEGBRA goto rdst case gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TINT32<<16 | gc.TFLOAT64: a = s390x.ACDFBRA goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = s390x.ACDGBRA goto rdst // float to float case gc.TFLOAT32<<16 | gc.TFLOAT32: a = s390x.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = s390x.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = s390x.ALDEBR goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = s390x.ALEDBR goto rdst } gins(a, f, t) return // requires register destination rdst: if t != nil && t.Op == gc.OREGISTER { gins(a, f, t) return } else { var r1 gc.Node gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: var r1 gc.Node gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Type.Width > 4 { gc.Fatalf("cgen_shift %v", nl.Type) } w := int(nl.Type.Width * 8) if op == gc.OLROT { v := nr.Int64() var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) if w == 32 { gc.Cgen(nl, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1) } else { var n2 gc.Node gc.Regalloc(&n2, nl.Type, nil) gc.Cgen(nl, &n2) gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1) gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1) gc.Regfree(&n2) // Ensure sign/zero-extended result. gins(optoas(gc.OAS, nl.Type), &n1, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int64()) if sc == 0 { } else // nothing to do if sc >= uint64(nl.Type.Width*8) { if op == gc.ORSH && nl.Type.IsSigned() { gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) } else { gins(arm.AEOR, &n1, &n1) } } else { if op == gc.ORSH && nl.Type.IsSigned() { gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1) } else if op == gc.ORSH { gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH } else { gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1) } } if w < 32 && op == gc.OLSH { gins(optoas(gc.OAS, nl.Type), &n1, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } tr := nr.Type var t gc.Node var n1 gc.Node var n2 gc.Node var n3 gc.Node if tr.Width > 4 { var nt gc.Node gc.Tempname(&nt, nr.Type) if nl.Ullman >= nr.Ullman { gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) gc.Cgen(nr, &nt) n1 = nt } else { gc.Cgen(nr, &nt) gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) } var hi gc.Node var lo gc.Node split64(&nt, &lo, &hi) gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil) gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil) gmove(&lo, &n1) gmove(&hi, &n3) splitclean() gins(arm.ATST, &n3, nil) gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) p1 := gins(arm.AMOVW, &t, &n1) p1.Scond = arm.C_SCOND_NE tr = gc.Types[gc.TUINT32] gc.Regfree(&n3) } else { if nl.Ullman >= nr.Ullman { gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) gc.Regalloc(&n1, nr.Type, nil) gc.Cgen(nr, &n1) } else { gc.Regalloc(&n1, nr.Type, nil) gc.Cgen(nr, &n1) gc.Regalloc(&n2, nl.Type, res) gc.Cgen(nl, &n2) } } // test for shift being 0 gins(arm.ATST, &n1, nil) p3 := gc.Gbranch(arm.ABEQ, nil, -1) // test and fix up large shifts // TODO: if(!bounded), don't emit some of this. gc.Regalloc(&n3, tr, nil) gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) gmove(&t, &n3) gins(arm.ACMP, &n1, &n3) if op == gc.ORSH { var p1 *obj.Prog var p2 *obj.Prog if nl.Type.IsSigned() { p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2) p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2) } else { p1 = gins(arm.AEOR, &n2, &n2) p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2) } p1.Scond = arm.C_SCOND_HS p2.Scond = arm.C_SCOND_LO } else { p1 := gins(arm.AEOR, &n2, &n2) p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2) p1.Scond = arm.C_SCOND_HS p2.Scond = arm.C_SCOND_LO } gc.Regfree(&n3) gc.Patch(p3, gc.Pc) // Left-shift of smaller word must be sign/zero-extended. if w < 32 && op == gc.OLSH { gins(optoas(gc.OAS, nl.Type), &n2, &n2) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * attempt to generate 64-bit * res = n * return 1 on success, 0 if op not handled. */ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) gc.Fatalf("cgen64 %v of %v", n.Op, res.Op) } switch n.Op { default: gc.Fatalf("cgen64 %v", n.Op) case gc.OMINUS: gc.Cgen(n.Left, res) var hi1 gc.Node var lo1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANEGL, nil, &lo1) gins(x86.AADCL, ncon(0), &hi1) gins(x86.ANEGL, nil, &hi1) splitclean() return case gc.OCOM: gc.Cgen(n.Left, res) var lo1 gc.Node var hi1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANOTL, nil, &lo1) gins(x86.ANOTL, nil, &hi1) splitclean() return // binary operators. // common setup below. case gc.OADD, gc.OSUB, gc.OMUL, gc.OLROT, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR: break } l := n.Left r := n.Right if !l.Addable { var t1 gc.Node gc.Tempname(&t1, l.Type) gc.Cgen(l, &t1) l = &t1 } if r != nil && !r.Addable { var t2 gc.Node gc.Tempname(&t2, r.Type) gc.Cgen(r, &t2) r = &t2 } var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX) // Setup for binary operation. var hi1 gc.Node var lo1 gc.Node split64(l, &lo1, &hi1) var lo2 gc.Node var hi2 gc.Node if gc.Is64(r.Type) { split64(r, &lo2, &hi2) } // Do op. Leave result in DX:AX. switch n.Op { // TODO: Constants case gc.OADD: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AADDL, &lo2, &ax) gins(x86.AADCL, &hi2, &dx) // TODO: Constants. case gc.OSUB: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.ASUBL, &lo2, &ax) gins(x86.ASBBL, &hi2, &dx) case gc.OMUL: // let's call the next three EX, FX and GX var ex, fx, gx gc.Node gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil) gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil) gc.Regalloc(&gx, gc.Types[gc.TPTR32], nil) // load args into DX:AX and EX:GX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AMOVL, &lo2, &gx) gins(x86.AMOVL, &hi2, &ex) // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply. gins(x86.AMOVL, &dx, &fx) gins(x86.AORL, &ex, &fx) p1 := gc.Gbranch(x86.AJNE, nil, 0) gins(x86.AMULL, &gx, nil) // implicit &ax p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // full 64x64 -> 64, from 32x32 -> 64. gins(x86.AIMULL, &gx, &dx) gins(x86.AMOVL, &ax, &fx) gins(x86.AIMULL, &ex, &fx) gins(x86.AADDL, &dx, &fx) gins(x86.AMOVL, &gx, &dx) gins(x86.AMULL, &dx, nil) // implicit &ax gins(x86.AADDL, &fx, &dx) gc.Patch(p2, gc.Pc) gc.Regfree(&ex) gc.Regfree(&fx) gc.Regfree(&gx) // We only rotate by a constant c in [0,64). // if c >= 32: // lo, hi = hi, lo // c -= 32 // if c == 0: // no-op // else: // t = hi // shld hi:lo, c // shld lo:t, c case gc.OLROT: v := uint64(r.Int64()) if v >= 32 { // reverse during load to do the first 32 bits of rotate v -= 32 gins(x86.AMOVL, &lo1, &dx) gins(x86.AMOVL, &hi1, &ax) } else { gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) } if v == 0 { } else // done { gins(x86.AMOVL, &dx, &cx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_CX // double-width shift p1.From.Scale = 0 } case gc.OLSH: if r.Op == gc.OLITERAL { v := uint64(r.Int64()) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&lo1, &hi2) if v > 32 { gins(x86.ASHLL, ncon(uint32(v-32)), &hi2) } gins(x86.AMOVL, ncon(0), &lo2) splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, ncon(uint32(v)), &ax) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) gc.Patch(p2, gc.Pc) // if shift count is >= 32, zero low. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &ax, &dx) gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count gins(x86.AXORL, &ax, &ax) p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHLL, &cx, &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, &cx, &ax) gc.Patch(p2, gc.Pc) case gc.ORSH: if r.Op == gc.OLITERAL { v := uint64(r.Int64()) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &lo2) gins(x86.ASARL, ncon(31), &lo2) gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) } splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&hi1, &lo2) if v > 32 { gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2) } if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &hi2) } splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero or sign-extend value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, ncon(31), &dx) gins(x86.AMOVL, &dx, &ax) } else { gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) } gc.Patch(p2, gc.Pc) // if shift count is >= 32, sign-extend hi. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &dx, &ax) if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count gins(x86.ASARL, ncon(31), &dx) } else { gins(x86.ASHRL, &cx, &ax) gins(x86.AXORL, &dx, &dx) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHRL, &cx, &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), &cx, &dx) gc.Patch(p2, gc.Pc) // make constant the right side (it usually is anyway). case gc.OXOR, gc.OAND, gc.OOR: if lo1.Op == gc.OLITERAL { nswap(&lo1, &lo2) nswap(&hi1, &hi2) } if lo2.Op == gc.OLITERAL { // special cases for constants. lv := uint32(lo2.Int64()) hv := uint32(hi2.Int64()) splitclean() // right side split64(res, &lo2, &hi2) switch n.Op { case gc.OXOR: gmove(&lo1, &lo2) gmove(&hi1, &hi2) switch lv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &lo2) default: gins(x86.AXORL, ncon(lv), &lo2) } switch hv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &hi2) default: gins(x86.AXORL, ncon(hv), &hi2) } case gc.OAND: switch lv { case 0: gins(x86.AMOVL, ncon(0), &lo2) default: gmove(&lo1, &lo2) if lv != 0xffffffff { gins(x86.AANDL, ncon(lv), &lo2) } } switch hv { case 0: gins(x86.AMOVL, ncon(0), &hi2) default: gmove(&hi1, &hi2) if hv != 0xffffffff { gins(x86.AANDL, ncon(hv), &hi2) } } case gc.OOR: switch lv { case 0: gmove(&lo1, &lo2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &lo2) default: gmove(&lo1, &lo2) gins(x86.AORL, ncon(lv), &lo2) } switch hv { case 0: gmove(&hi1, &hi2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &hi2) default: gmove(&hi1, &hi2) gins(x86.AORL, ncon(hv), &hi2) } } splitclean() splitclean() return } gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(optoas(n.Op, lo1.Type), &lo2, &ax) gins(optoas(n.Op, lo1.Type), &hi2, &dx) } if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo1, &hi1) gins(x86.AMOVL, &ax, &lo1) gins(x86.AMOVL, &dx, &hi1) splitclean() }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Type.Width > 4 { gc.Fatalf("cgen_shift %v", nl.Type) } w := int(nl.Type.Width * 8) a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n2 gc.Node gc.Tempname(&n2, nl.Type) gc.Cgen(nl, &n2) var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gmove(&n2, &n1) sc := uint64(nr.Int64()) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 gins(a, ncon(uint32(w)-1), &n1) gins(a, ncon(uint32(w)-1), &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } var oldcx gc.Node var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) if gc.GetReg(x86.REG_CX) > 1 && !gc.Samereg(&cx, res) { gc.Tempname(&oldcx, gc.Types[gc.TUINT32]) gmove(&cx, &oldcx) } var n1 gc.Node var nt gc.Node if nr.Type.Width > 4 { gc.Tempname(&nt, nr.Type) n1 = nt } else { gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX } var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) } else { gc.Cgen(nr, &n1) gc.Cgen(nl, &n2) } // test and fix up large shifts if bounded { if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) splitclean() } } else { var p1 *obj.Prog if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0)) p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) splitclean() gc.Patch(p2, gc.Pc) } else { gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) } if op == gc.ORSH && nl.Type.IsSigned() { gins(a, ncon(uint32(w)-1), &n2) } else { gmove(ncon(0), &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { gmove(&oldcx, &cx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate division. * caller must set: * ax = allocated AX register * dx = allocated DX register * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := false if t.IsSigned() { check = true if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -1<<uint64(t.Width*8-1) { check = false } else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 { check = false } } if t.Width < 4 { if t.IsSigned() { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = false } var t1 gc.Node gc.Tempname(&t1, t) var t2 gc.Node gc.Tempname(&t2, t) if t0 != t { var t3 gc.Node gc.Tempname(&t3, t0) var t4 gc.Node gc.Tempname(&t4, t0) gc.Cgen(nl, &t3) gc.Cgen(nr, &t4) // Convert. gmove(&t3, &t1) gmove(&t4, &t2) } else { gc.Cgen(nl, &t1) gc.Cgen(nr, &t2) } var n1 gc.Node if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) { gc.Regalloc(&n1, t, res) } else { gc.Regalloc(&n1, t, nil) } gmove(&t2, &n1) gmove(&t1, ax) var p2 *obj.Prog var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } if check { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, ax) gmove(ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } if !t.IsSigned() { var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(optoas(op, t), &n1, nil) gc.Regfree(&n1) if op == gc.ODIV { gmove(ax, res) } else { gmove(dx, res) } if check { gc.Patch(p2, gc.Pc) } }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(nr.Int64()) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } var n1 gc.Node gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var n2 gc.Node gc.Regalloc(&n2, nl.Type, res) if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { var rtmp gc.Node gc.Nodreg(&rtmp, tcount, mips.REGTMP) gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins3(mips.ASGTU, &n3, &n1, &rtmp) p1 := ginsbranch(mips.ABNE, nil, &rtmp, nil, 0) if op == gc.ORSH && nl.Type.IsSigned() { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := false if t.IsSigned() { check = true if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<<uint64(t.Width*8-1)) { check = false } else if gc.Isconst(nr, gc.CTINT) && nr.Int64() != -1 { check = false } } if t.Width < 4 { if t.IsSigned() { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = false } a := optoas(op, t) var n3 gc.Node gc.Regalloc(&n3, t0, nil) var ax gc.Node var oldax gc.Node if nl.Ullman >= nr.Ullman { savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen gc.Cgen(nr, &n3) gc.Regfree(&ax) } else { gc.Cgen(nr, &n3) savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) } if t != t0 { // Convert ax1 := ax n31 := n3 ax.Type = t n3.Type = t gmove(&ax1, &ax) gmove(&n31, &n3) } var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } var p2 *obj.Prog if check { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &ax) gmove(&ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) if !t.IsSigned() { gc.Nodconst(&n4, t, 0) gmove(&n4, &dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(a, &n3, nil) gc.Regfree(&n3) if op == gc.ODIV { gmove(&ax, res) } else { gmove(&dx, res) } restx(&dx, &olddx) if check { gc.Patch(p2, gc.Pc) } restx(&ax, &oldax) }