/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { t := nl.Type a := optoas(gc.OHMUL, t) if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) var ax gc.Node gc.Nodreg(&ax, t, x86.REG_AX) gmove(&n1, &ax) gins(a, &n2, nil) gc.Regfree(&n2) gc.Regfree(&n1) var dx gc.Node if t.Width == 1 { // byte multiply behaves differently. gc.Nodreg(&ax, t, x86.REG_AH) gc.Nodreg(&dx, t, x86.REG_DX) gmove(&ax, &dx) } gc.Nodreg(&dx, t, x86.REG_DX) gmove(&dx, res) }
/* * generate array index into res. * n might be any size; res is 32-bit. * returns Prog* to patch to panic call. */ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { gc.Cgen(n, res) return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gmove(&lo, res) if bounded { splitclean() return nil } var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gmove(&hi, &n1) gmove(&zero, &n2) gins(arm.ACMP, &n1, &n2) gc.Regfree(&n2) gc.Regfree(&n1) splitclean() return gc.Gbranch(arm.ABNE, nil, -1) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { gins(arm.ACMP, &r1, n2) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) gins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } var r1, r2, g1, g2 gc.Node gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) { ginscon2(optoas(gc.OCMP, t), &r1, n2.Int()) } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) rawgins(optoas(gc.OCMP, t), &r1, &r2) gc.Regfree(&g2) gc.Regfree(&r2) } gc.Regfree(&g1) gc.Regfree(&r1) return gc.Gbranch(optoas(op, t), nil, likely) }
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { // Reverse comparison to place constant last. op = gc.Brrev(op) n1, n2 = n2, n1 } // General case. var r1, r2, g1, g2 gc.Node if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { r1 = *n1 } else { gc.Regalloc(&r1, t, n1) gc.Regalloc(&g1, n1.Type, &r1) gc.Cgen(n1, &g1) gmove(&g1, &r1) } if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { r2 = *n2 } else { gc.Regalloc(&r2, t, n2) gc.Regalloc(&g2, n1.Type, &r2) gc.Cgen(n2, &g2) gmove(&g2, &r2) } gins(optoas(gc.OCMP, t), &r1, &r2) if r1.Op == gc.OREGISTER { gc.Regfree(&g1) gc.Regfree(&r1) } if r2.Op == gc.OREGISTER { gc.Regfree(&g2) gc.Regfree(&r2) } return gc.Gbranch(optoas(op, t), nil, likely) }
func sudoclean() { if clean[cleani-1].Op != gc.OEMPTY { gc.Regfree(&clean[cleani-1]) } if clean[cleani-2].Op != gc.OEMPTY { gc.Regfree(&clean[cleani-2]) } cleani -= 2 }
/* * generate high multiply * res = (nl * nr) >> wordsize */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } t := nl.Type w := int(t.Width * 8) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nl, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gc.Cgen(nr, &n2) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) case gc.TUINT8, gc.TUINT16: gins(optoas(gc.OMUL, t), &n2, &n1) gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) // perform a long multiplication. case gc.TINT32, gc.TUINT32: var p *obj.Prog if gc.Issigned[t.Etype] { p = gins(arm.AMULL, &n2, nil) } else { p = gins(arm.AMULLU, &n2, nil) } // n2 * n1 -> (n1 n2) p.Reg = n1.Reg p.To.Type = obj.TYPE_REGREG p.To.Reg = n1.Reg p.To.Offset = int64(n2.Reg) default: gc.Fatal("cgen_hmul %v", t) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
/* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node switch as { case x86.AADDL, x86.AMOVL, x86.ALEAL: gc.Nodconst(&n1, gc.Types[gc.TINT32], c) default: gc.Nodconst(&n1, gc.Types[gc.TINT64], c) } if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { // cannot have 64-bit immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(x86.AMOVQ, &n1, &ntmp) gins(as, &ntmp, n2) gc.Regfree(&ntmp) return } gins(as, &n1, n2) }
/* * generate * as n, $c (CMP/CMPU) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatal("ginscon2") case ppc64.ACMP: if -ppc64.BIG <= c && c <= ppc64.BIG { rawgins(as, n2, &n1) return } case ppc64.ACMPU: if 0 <= c && c <= 2*ppc64.BIG { rawgins(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(ppc64.AMOVD, &n1, &ntmp) rawgins(as, n2, &ntmp) gc.Regfree(&ntmp) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := (*gc.Node)(nl) nl = nr nr = tmp } t := (*gc.Type)(nl.Type) w := int(int(t.Width * 8)) var n1 gc.Node gc.Cgenr(nl, &n1, res) var n2 gc.Node gc.Cgenr(nr, &n2, nil) switch gc.Simtype[t.Etype] { case gc.TINT8, gc.TINT16, gc.TINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TUINT8, gc.TUINT16, gc.TUINT32: gins(optoas(gc.OMUL, t), &n2, &n1) p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1)) p.From.Type = obj.TYPE_CONST p.From.Offset = int64(w) case gc.TINT64, gc.TUINT64: if gc.Issigned[t.Etype] { gins(ppc64.AMULHD, &n2, &n1) } else { gins(ppc64.AMULHDU, &n2, &n1) } default: gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0)) } gc.Cgen(&n1, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func restx(x *gc.Node, oldx *gc.Node) { if oldx.Op != 0 { x.Type = gc.Types[gc.TINT64] reg[x.Reg] = uint8(oldx.Ostk) gmove(oldx, x) gc.Regfree(oldx) } }
func restx(x *gc.Node, oldx *gc.Node) { gc.Regfree(x) if oldx.Op != 0 { x.Type = gc.Types[gc.TINT32] gmove(oldx, x) } }
func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { gc.Regfree(&sclean[nsclean]) } }
/* * generate * as $c, n */ func ginscon(as int, c int64, n *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT32], c) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) gmove(&n1, &n2) gins(as, &n2, n) gc.Regfree(&n2) }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } // generate operands in "8-bit" registers. var n1b gc.Node gc.Regalloc(&n1b, nl.Type, res) gc.Cgen(nl, &n1b) var n2b gc.Node gc.Regalloc(&n2b, nr.Type, nil) gc.Cgen(nr, &n2b) // perform full-width multiplication. t := gc.Types[gc.TUINT64] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT64] } var n1 gc.Node gc.Nodreg(&n1, t, int(n1b.Reg)) var n2 gc.Node gc.Nodreg(&n2, t, int(n2b.Reg)) a := optoas(op, t) gins(a, &n2, &n1) // truncate. gmove(&n1, res) gc.Regfree(&n1b) gc.Regfree(&n2b) return true }
func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) { var n1 gc.Node gc.Regalloc(&n1, t, nil) gc.Cgen(n, &n1) a := optoas(gc.OCMP, t) if a != arm.ACMP { var n2 gc.Node gc.Nodconst(&n2, t, 0) var n3 gc.Node gc.Regalloc(&n3, t, nil) gmove(&n2, &n3) gins(a, &n1, &n3) gc.Regfree(&n3) } else { gins(arm.ATST, &n1, nil) } a = optoas(o, t) gc.Patch(gc.Gbranch(a, t, likely), to) gc.Regfree(&n1) }
// res = runtime.getg() func getg(res *gc.Node) { var n1 gc.Node gc.Regalloc(&n1, res.Type, res) mov := optoas(gc.OAS, gc.Types[gc.Tptr]) p := gins(mov, nil, &n1) p.From.Type = obj.TYPE_REG p.From.Reg = x86.REG_TLS p = gins(mov, nil, &n1) p.From = p.To p.From.Type = obj.TYPE_MEM p.From.Index = x86.REG_TLS p.From.Scale = 1 gmove(&n1, res) gc.Regfree(&n1) }
/* * generate byte multiply: * res = nl * nr * there is no 2-operand byte multiply instruction so * we do a full-width multiplication and truncate afterwards. */ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool { if optoas(op, nl.Type) != x86.AIMULB { return false } // copy from byte to full registers t := gc.Types[gc.TUINT32] if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } // largest ullman on left. if nl.Ullman < nr.Ullman { tmp := nl nl = nr nr = tmp } var nt gc.Node gc.Tempname(&nt, nl.Type) gc.Cgen(nl, &nt) var n1 gc.Node gc.Regalloc(&n1, t, res) gc.Cgen(nr, &n1) var n2 gc.Node gc.Regalloc(&n2, t, nil) gmove(&nt, &n2) a := optoas(op, t) gins(a, &n2, &n1) gc.Regfree(&n2) gmove(&n1, res) gc.Regfree(&n1) return true }
/* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) rawgins(ppc64.AMOVD, &n1, &ntmp) rawgins(as, &ntmp, n2) gc.Regfree(&ntmp) return } rawgins(as, &n1, n2) }
/* * generate high multiply: * res = (nl*nr) >> width */ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { var n1 gc.Node var n2 gc.Node var ax gc.Node var dx gc.Node t := nl.Type a := optoas(gc.OHMUL, t) // gen nl in n1. gc.Tempname(&n1, t) gc.Cgen(nl, &n1) // gen nr in n2. gc.Regalloc(&n2, t, res) gc.Cgen(nr, &n2) // multiply. gc.Nodreg(&ax, t, x86.REG_AX) gmove(&n2, &ax) gins(a, &n1, nil) gc.Regfree(&n2) if t.Width == 1 { // byte multiply behaves differently. gc.Nodreg(&ax, t, x86.REG_AH) gc.Nodreg(&dx, t, x86.REG_DX) gmove(&ax, &dx) } gc.Nodreg(&dx, t, x86.REG_DX) gmove(&dx, res) }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { if nl.Type.Width > 4 { gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0)) } w := int(nl.Type.Width * 8) a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n2 gc.Node gc.Tempname(&n2, nl.Type) gc.Cgen(nl, &n2) var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gmove(&n2, &n1) sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 gins(a, ncon(uint32(w)-1), &n1) gins(a, ncon(uint32(w)-1), &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } var oldcx gc.Node var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) if reg[x86.REG_CX] > 1 && !gc.Samereg(&cx, res) { gc.Tempname(&oldcx, gc.Types[gc.TUINT32]) gmove(&cx, &oldcx) } var n1 gc.Node var nt gc.Node if nr.Type.Width > 4 { gc.Tempname(&nt, nr.Type) n1 = nt } else { gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX } var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) } else { gc.Cgen(nr, &n1) gc.Cgen(nl, &n2) } // test and fix up large shifts if bounded { if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) splitclean() } } else { var p1 *obj.Prog if nr.Type.Width > 4 { // delayed reg alloc gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX var lo gc.Node var hi gc.Node split64(&nt, &lo, &hi) gmove(&lo, &n1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0)) p2 := gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1) gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) splitclean() gc.Patch(p2, gc.Pc) } else { gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w))) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) } if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gins(a, ncon(uint32(w)-1), &n2) } else { gmove(ncon(0), &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { gmove(&oldcx, &cx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a int if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) var r2 gc.Node gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &fhi, &r2) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r2, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: gc.Regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) gc.Regfree(&r1) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate division. * caller must set: * ax = allocated AX register * dx = allocated DX register * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } var t1 gc.Node gc.Tempname(&t1, t) var t2 gc.Node gc.Tempname(&t2, t) if t0 != t { var t3 gc.Node gc.Tempname(&t3, t0) var t4 gc.Node gc.Tempname(&t4, t0) gc.Cgen(nl, &t3) gc.Cgen(nr, &t4) // Convert. gmove(&t3, &t1) gmove(&t4, &t2) } else { gc.Cgen(nl, &t1) gc.Cgen(nr, &t2) } var n1 gc.Node if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) { gc.Regalloc(&n1, t, res) } else { gc.Regalloc(&n1, t, nil) } gmove(&t2, &n1) gmove(&t1, ax) var p2 *obj.Prog var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n1, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, ax) gmove(ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } if !gc.Issigned[t.Etype] { var nz gc.Node gc.Nodconst(&nz, t, 0) gmove(&nz, dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(optoas(op, t), &n1, nil) gc.Regfree(&n1) if op == gc.ODIV { gmove(ax, res) } else { gmove(dx, res) } if check != 0 { gc.Patch(p2, gc.Pc) } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := uint32(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 4 // bytes q := w / 4 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSL. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSL loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.Tptr], nil) gc.Agen(nl, &n1) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for { tmp14 := q q-- if tmp14 <= 0 { break } n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for { tmp15 := c c-- if tmp15 <= 0 { break } n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI) gc.Agen(nl, &n1) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || (q >= 4 && gc.Nacl) { gconreg(x86.AMOVL, int64(q), x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ } else if q >= 4 { p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) // 1 and 128 = magic constants: see ../../runtime/asm_386.s p.To.Offset = 1 * (128 - int64(q)) } else { for q > 0 { gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+ q-- } } for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { gc.Dump("\nclearfat", nl) } w := nl.Type.Width // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 8 // bytes q := w / 8 // quads if q < 4 { // Write sequence of MOV 0, off(base) instead of using STOSQ. // The hope is that although the code will be slightly longer, // the MOVs will have no dependencies and pipeline better // than the unrolled STOSQ loop. // NOTE: Must use agen, not igen, so that optimizer sees address // being taken. We are not writing on field boundaries. var n1 gc.Node gc.Agenr(nl, &n1, nil) n1.Op = gc.OINDREG var z gc.Node gc.Nodconst(&z, gc.Types[gc.TUINT64], 0) for { tmp14 := q q-- if tmp14 <= 0 { break } n1.Type = z.Type gins(x86.AMOVQ, &z, &n1) n1.Xoffset += 8 } if c >= 4 { gc.Nodconst(&z, gc.Types[gc.TUINT32], 0) n1.Type = z.Type gins(x86.AMOVL, &z, &n1) n1.Xoffset += 4 c -= 4 } gc.Nodconst(&z, gc.Types[gc.TUINT8], 0) for { tmp15 := c c-- if tmp15 <= 0 { break } n1.Type = z.Type gins(x86.AMOVB, &z, &n1) n1.Xoffset++ } gc.Regfree(&n1) return } var oldn1 gc.Node var n1 gc.Node savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr]) gc.Agen(nl, &n1) var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr]) gconreg(x86.AMOVL, 0, x86.REG_AX) if q > 128 || gc.Nacl { gconreg(movptr, q, x86.REG_CX) gins(x86.AREP, nil, nil) // repeat gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+ } else { if di := dzDI(q); di != 0 { gconreg(addptr, di, x86.REG_DI) } p := gins(obj.ADUFFZERO, nil, nil) p.To.Type = obj.TYPE_ADDR p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = dzOff(q) } z := ax di := n1 if w >= 8 && c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT64] di.Type = z.Type p := gins(x86.AMOVQ, &z, &di) p.To.Scale = 1 p.To.Offset = c - 8 } else if c >= 4 { di.Op = gc.OINDREG z.Type = gc.Types[gc.TINT32] di.Type = z.Type gins(x86.AMOVL, &z, &di) if c > 4 { p := gins(x86.AMOVL, &z, &di) p.To.Scale = 1 p.To.Offset = c - 4 } } else { for c > 0 { gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+ c-- } } restx(&n1, &oldn1) restx(&ax, &oldax) }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := (*gc.Type)(t.Type) if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r2 gc.Node var r1 gc.Node var a int if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // float constants come from memory. //if(isfloat[tt]) // goto hard; // 64-bit immediates are also from memory. //if(isint[tt]) // goto hard; //// 64-bit immediates are really 32-bit sign-extended //// unless moving into a register. //if(isint[tt]) { // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0) // goto hard; // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0) // goto hard; //} // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = ppc64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = ppc64.AMOVBZ case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = ppc64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = ppc64.AMOVHZ case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = ppc64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = ppc64.AMOVWZ case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = ppc64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = ppc64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = ppc64.AMOVBZ goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = ppc64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = ppc64.AMOVHZ goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = ppc64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = ppc64.AMOVWZ goto rdst //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32, gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: bignodes() var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) gmove(f, &r1) if tt == gc.TUINT64 { gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) gmove(&bigf, &r2) gins(ppc64.AFCMPU, &r1, &r2) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) gins(ppc64.AFSUB, &r2, &r1) gc.Patch(p1, gc.Pc) gc.Regfree(&r2) } gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) var r3 gc.Node gc.Regalloc(&r3, gc.Types[gc.TINT64], t) gins(ppc64.AFCTIDZ, &r1, &r2) p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil)) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AMOVD, nil, &r3) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 gc.Regfree(&r2) gc.Regfree(&r1) if tt == gc.TUINT64 { p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP) gins(ppc64.AMOVD, &bigi, &r1) gins(ppc64.AADD, &r1, &r3) gc.Patch(p1, gc.Pc) } gmove(&r3, t) gc.Regfree(&r3) return //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native int64 -> uint64 conversion. // otherwise, halve (rounding to odd?), convert, and double. /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64, gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: bignodes() var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) gmove(f, &r1) if ft == gc.TUINT64 { gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP) gmove(&bigi, &r2) gins(ppc64.ACMPU, &r1, &r2) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1)) p2.From.Type = obj.TYPE_CONST p2.From.Offset = 1 gc.Patch(p1, gc.Pc) } gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil)) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AFMOVD, nil, &r2) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 gins(ppc64.AFCFID, &r2, &r2) gc.Regfree(&r1) if ft == gc.TUINT64 { p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO) gins(ppc64.AFMUL, &r1, &r2) gc.Patch(p1, gc.Pc) } gmove(&r2, t) gc.Regfree(&r2) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = ppc64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = ppc64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = ppc64.AFMOVS goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = ppc64.AFRSP goto rdst } gins(a, f, t) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate shift according to op, one of: * res = nl << nr * res = nl >> nr */ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { a := optoas(op, nl.Type) if nr.Op == gc.OLITERAL { var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gc.Cgen(nl, &n1) sc := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if sc >= uint64(nl.Type.Width*8) { // large shift gets 2 shifts by width-1 var n3 gc.Node gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n1) gins(a, &n3, &n1) } else { gins(a, nr, &n1) } gmove(&n1, res) gc.Regfree(&n1) return } if nl.Ullman >= gc.UINF { var n4 gc.Node gc.Tempname(&n4, nl.Type) gc.Cgen(nl, &n4) nl = &n4 } if nr.Ullman >= gc.UINF { var n5 gc.Node gc.Tempname(&n5, nr.Type) gc.Cgen(nr, &n5) nr = &n5 } rcx := int(reg[x86.REG_CX]) var n1 gc.Node gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX) // Allow either uint32 or uint64 as shift type, // to avoid unnecessary conversion from uint32 to uint64 // just to do the comparison. tcount := gc.Types[gc.Simtype[nr.Type.Etype]] if tcount.Etype < gc.TUINT32 { tcount = gc.Types[gc.TUINT32] } gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX var n3 gc.Node gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX) var oldcx gc.Node if rcx > 0 && !gc.Samereg(&cx, res) { gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil) gmove(&cx, &oldcx) } cx.Type = tcount var n2 gc.Node if gc.Samereg(&cx, res) { gc.Regalloc(&n2, nl.Type, nil) } else { gc.Regalloc(&n2, nl.Type, res) } if nl.Ullman >= nr.Ullman { gc.Cgen(nl, &n2) gc.Cgen(nr, &n1) gmove(&n1, &n3) } else { gc.Cgen(nr, &n1) gmove(&n1, &n3) gc.Cgen(nl, &n2) } gc.Regfree(&n3) // test and fix up large shifts if !bounded { gc.Nodconst(&n3, tcount, nl.Type.Width*8) gins(optoas(gc.OCMP, tcount), &n1, &n3) p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1) if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) gins(a, &n3, &n2) } else { gc.Nodconst(&n3, nl.Type, 0) gmove(&n3, &n2) } gc.Patch(p1, gc.Pc) } gins(a, &n1, &n2) if oldcx.Op != 0 { cx.Type = gc.Types[gc.TUINT64] gmove(&oldcx, &cx) gc.Regfree(&oldcx) } gmove(&n2, res) gc.Regfree(&n1) gc.Regfree(&n2) }
func cgen_floatsse(n *gc.Node, res *gc.Node) { var a int nl := n.Left nr := n.Right switch n.Op { default: gc.Dump("cgen_floatsse", n) gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0)) return case gc.OMINUS, gc.OCOM: nr = gc.Nodintconst(-1) gc.Convlit(&nr, n.Type) a = foptoas(gc.OMUL, nl.Type, 0) goto sbop // symmetric binary case gc.OADD, gc.OMUL: a = foptoas(int(n.Op), nl.Type, 0) goto sbop // asymmetric binary case gc.OSUB, gc.OMOD, gc.ODIV: a = foptoas(int(n.Op), nl.Type, 0) goto abop } sbop: // symmetric binary if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL { r := nl nl = nr nr = r } abop: // asymmetric binary if nl.Ullman >= nr.Ullman { var nt gc.Node gc.Tempname(&nt, nl.Type) gc.Cgen(nl, &nt) var n2 gc.Node gc.Mgen(nr, &n2, nil) var n1 gc.Node gc.Regalloc(&n1, nl.Type, res) gmove(&nt, &n1) gins(a, &n2, &n1) gmove(&n1, res) gc.Regfree(&n1) gc.Mfree(&n2) } else { var n2 gc.Node gc.Regalloc(&n2, nr.Type, res) gc.Cgen(nr, &n2) var n1 gc.Node gc.Regalloc(&n1, nl.Type, nil) gc.Cgen(nl, &n1) gins(a, &n2, &n1) gc.Regfree(&n2) gmove(&n1, res) gc.Regfree(&n1) } return }
func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) { nl := n.Left nr := n.Right a := int(n.Op) if true_ == 0 { // brcom is not valid on floats when NaN is involved. p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // No need to avoid re-genning ninit. bgen_float(n, 1, -likely, p2) gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to) gc.Patch(p2, gc.Pc) return } var tmp gc.Node var et int var n2 gc.Node var ax gc.Node if !gc.Thearch.Use387 { if nl.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, nl.Type) gc.Cgen(nl, &n1) nl = &n1 } if nr.Addable == 0 { var tmp gc.Node gc.Tempname(&tmp, nr.Type) gc.Cgen(nr, &tmp) nr = &tmp } var n2 gc.Node gc.Regalloc(&n2, nr.Type, nil) gmove(nr, &n2) nr = &n2 if nl.Op != gc.OREGISTER { var n3 gc.Node gc.Regalloc(&n3, nl.Type, nil) gmove(nl, &n3) nl = &n3 } if a == gc.OGE || a == gc.OGT { // only < and <= work right with NaN; reverse if needed r := nr nr = nl nl = r a = gc.Brrev(a) } gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr) if nl.Op == gc.OREGISTER { gc.Regfree(nl) } gc.Regfree(nr) goto ret } else { goto x87 } x87: a = gc.Brrev(a) // because the args are stacked if a == gc.OGE || a == gc.OGT { // only < and <= work right with NaN; reverse if needed r := nr nr = nl nl = r a = gc.Brrev(a) } gc.Nodreg(&tmp, nr.Type, x86.REG_F0) gc.Nodreg(&n2, nr.Type, x86.REG_F0+1) gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) et = gc.Simsimtype(nr.Type) if et == gc.TFLOAT64 { if nl.Ullman > nr.Ullman { gc.Cgen(nl, &tmp) gc.Cgen(nr, &tmp) gins(x86.AFXCHD, &tmp, &n2) } else { gc.Cgen(nr, &tmp) gc.Cgen(nl, &tmp) } gins(x86.AFUCOMIP, &tmp, &n2) gins(x86.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF } else { // TODO(rsc): The moves back and forth to memory // here are for truncating the value to 32 bits. // This handles 32-bit comparison but presumably // all the other ops have the same problem. // We need to figure out what the right general // solution is, besides telling people to use float64. var t1 gc.Node gc.Tempname(&t1, gc.Types[gc.TFLOAT32]) var t2 gc.Node gc.Tempname(&t2, gc.Types[gc.TFLOAT32]) gc.Cgen(nr, &t1) gc.Cgen(nl, &t2) gmove(&t2, &tmp) gins(x86.AFCOMFP, &t1, &tmp) gins(x86.AFSTSW, nil, &ax) gins(x86.ASAHF, nil, nil) } goto ret ret: if a == gc.OEQ { // neither NE nor P p1 := gc.Gbranch(x86.AJNE, nil, -likely) p2 := gc.Gbranch(x86.AJPS, nil, -likely) gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to) gc.Patch(p1, gc.Pc) gc.Patch(p2, gc.Pc) } else if a == gc.ONE { // either NE or P gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to) gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to) } else { gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to) } }
/* * generate division. * generates one of: * res = nl / nr * res = nl % nr * according to op. */ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { // Have to be careful about handling // most negative int divided by -1 correctly. // The hardware will trap. // Also the byte divide instruction needs AH, // which we otherwise don't have to deal with. // Easiest way to avoid for int8, int16: use int32. // For int32 and int64, use explicit test. // Could use int64 hw for int32. t := nl.Type t0 := t check := 0 if gc.Issigned[t.Etype] { check = 1 if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) { check = 0 } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 { check = 0 } } if t.Width < 4 { if gc.Issigned[t.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } check = 0 } a := optoas(op, t) var n3 gc.Node gc.Regalloc(&n3, t0, nil) var ax gc.Node var oldax gc.Node if nl.Ullman >= nr.Ullman { savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen gc.Cgen(nr, &n3) gc.Regfree(&ax) } else { gc.Cgen(nr, &n3) savex(x86.REG_AX, &ax, &oldax, res, t0) gc.Cgen(nl, &ax) } if t != t0 { // Convert ax1 := ax n31 := n3 ax.Type = t n3.Type = t gmove(&ax1, &ax) gmove(&n31, &n3) } var n4 gc.Node if gc.Nacl { // Native Client does not relay the divide-by-zero trap // to the executing program, so we must insert a check // for ourselves. gc.Nodconst(&n4, t, 0) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if panicdiv == nil { panicdiv = gc.Sysfunc("panicdivide") } gc.Ginscall(panicdiv, -1) gc.Patch(p1, gc.Pc) } var p2 *obj.Prog if check != 0 { gc.Nodconst(&n4, t, -1) gins(optoas(gc.OCMP, t), &n3, &n4) p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) if op == gc.ODIV { // a / (-1) is -a. gins(optoas(gc.OMINUS, t), nil, &ax) gmove(&ax, res) } else { // a % (-1) is 0. gc.Nodconst(&n4, t, 0) gmove(&n4, res) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) } var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) if !gc.Issigned[t.Etype] { gc.Nodconst(&n4, t, 0) gmove(&n4, &dx) } else { gins(optoas(gc.OEXTEND, t), nil, nil) } gins(a, &n3, nil) gc.Regfree(&n3) if op == gc.ODIV { gmove(&ax, res) } else { gmove(&dx, res) } restx(&dx, &olddx) if check != 0 { gc.Patch(p2, gc.Pc) } restx(&ax, &oldax) }