func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 two64f = *ncon(0) two64f.Type = gc.Types[gc.TFLOAT64] two64f.Val.Ctype = gc.CTFLT two64f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.) two63f = two64f two63f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.) zerof = two64f zerof.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(zerof.Val.U.Fval, 0) }
func anyregalloc() bool { var j int for i := x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } for i := x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] != 0 { return true } } return false } /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) var i int switch et { case gc.TINT64, gc.TUINT64: gc.Fatal("regalloc64") case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_AX && i <= x86.REG_DI { goto out } } for i = x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_AX; i <= x86.REG_DI; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of fixed registers") goto err case gc.TFLOAT32, gc.TFLOAT64: if gc.Use_sse == 0 { i = x86.REG_F0 goto out } if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_X0 && i <= x86.REG_X7 { goto out } } for i = x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_X0; i <= x86.REG_X7; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of floating registers") } gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0)) err: gc.Nodreg(n, t, 0) return out: if i == x86.REG_SP { fmt.Printf("alloc SP\n") } if reg[i] == 0 { regpc[i] = uint32(obj.Getcallerpc(&n)) if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP { gc.Dump("regalloc-o", o) gc.Fatal("regalloc %v", obj.Rconv(i)) } } reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == x86.REG_SP { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) { gc.Fatal("regfree %v", obj.Rconv(i)) } } /* * generate * as $c, reg */ func gconreg(as int, c int64, reg int) { var n1 gc.Node var n2 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) gc.Nodreg(&n2, gc.Types[gc.TINT64], reg) gins(as, &n1, &n2) } /* * swap node contents */ func nswap(a *gc.Node, b *gc.Node) { t := *a *a = *b *b = t } /* * return constant i node. * overwritten by next call, but useful in calls to gins. */ var ncon_n gc.Node func ncon(i uint32) *gc.Node { if ncon_n.Type == nil { gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) } gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) return &ncon_n } var sclean [10]gc.Node var nsclean int /* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } } func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { regfree(&sclean[nsclean]) } } /* * set up nodes representing fp constants */ var zerof gc.Node var two64f gc.Node var two63f gc.Node var bignodes_did int func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 two64f = *ncon(0) two64f.Type = gc.Types[gc.TFLOAT64] two64f.Val.Ctype = gc.CTFLT two64f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.) two63f = two64f two63f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.) zerof = two64f zerof.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(zerof.Val.U.Fval, 0) } func memname(n *gc.Node, t *gc.Type) { gc.Tempname(n, t) n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing n.Orig.Sym = n.Sym } func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a int if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) var r2 gc.Node gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &fhi, &r2) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r2, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) regfree(&r1) return // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } func floatmove(f *gc.Node, t *gc.Node) { var r1 gc.Node ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type // cannot have two floating point memory operands. if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: if gc.Use_sse != 0 { floatmove_sse(f, t) } else { floatmove_387(f, t) } return // float to very long integer. case gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64: if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: if !gc.Ismem(f) { cvt = f.Type goto hardmem } bignodes() var f0 gc.Node gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0) var f1 gc.Node gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1) var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &f0) } else { gins(x86.AFMOVD, f, &f0) } // if 0 > v { answer = 0 } gins(x86.AFMOVD, &zerof, &f0) gins(x86.AFUCOMIP, &f0, &f1) p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) // if 1<<64 <= v { answer = 0 too } gins(x86.AFMOVD, &two64f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) gc.Patch(p1, gc.Pc) gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gins(x86.AMOVL, ncon(0), &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) // in range; algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) // actual work gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0) gins(x86.AFMOVVP, &f0, t) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFSUBDP, &f0, &f1) gins(x86.AFMOVVP, &f0, t) split64(t, &tlo, &thi) gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63 gc.Patch(p3, gc.Pc) splitclean() // restore rounding mode gins(x86.AFLDCW, &t1, nil) gc.Patch(p1, gc.Pc) return /* * integer to float */ case gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op == gc.OREGISTER { goto hardmem } var f0 gc.Node gc.Nodreg(&f0, t.Type, x86.REG_F0) gins(x86.AFMOVV, f, &f0) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &f0, t) } else { gins(x86.AFMOVDP, &f0, t) } return // algorithm is: // if small enough, use native int64 -> float64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) var t1 gc.Node gc.Tempname(&t1, f.Type) var tlo gc.Node var thi gc.Node split64(&t1, &tlo, &thi) gmove(f, &t1) gins(x86.ACMPL, &thi, ncon(0)) p1 := gc.Gbranch(x86.AJLT, nil, 0) // native var r1 gc.Node gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) gins(x86.AFMOVV, &t1, &r1) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } p2 := gc.Gbranch(obj.AJMP, nil, 0) // simulated gc.Patch(p1, gc.Pc) gmove(&tlo, &ax) gmove(&thi, &dx) p1 = gins(x86.ASHRL, ncon(1), &ax) p1.From.Index = x86.REG_DX // double-width shift DX -> AX p1.From.Scale = 0 gins(x86.AMOVL, ncon(0), &cx) gins(x86.ASETCC, nil, &cx) gins(x86.AORL, &cx, &ax) gins(x86.ASHRL, ncon(1), &dx) gmove(&dx, &thi) gmove(&ax, &tlo) gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) var r2 gc.Node gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1) gins(x86.AFMOVV, &t1, &r1) gins(x86.AFMOVD, &r1, &r1) gins(x86.AFADDDP, &r1, &r2) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } gc.Patch(p2, gc.Pc) splitclean() return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return } func floatmove_387(f *gc.Node, t *gc.Node) { var r1 gc.Node var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type switch uint32(ft)<<16 | uint32(tt) { default: goto fatal /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT64: if t.Op == gc.OREGISTER { goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if f.Op != gc.OREGISTER { if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return // convert via int32. case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: var t1 gc.Node gc.Tempname(&t1, gc.Types[gc.TINT32]) gmove(f, &t1) switch tt { default: gc.Fatal("gmove %v", gc.Nconv(t, 0)) case gc.TINT8: gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1))) p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1) gins(x86.ACMPL, &t1, ncon(0x7f)) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gc.Patch(p2, gc.Pc) gmove(ncon(-0x80&(1<<32-1)), &t1) gc.Patch(p3, gc.Pc) gmove(&t1, t) case gc.TUINT8: gins(x86.ATESTL, ncon(0xffffff00), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) case gc.TUINT16: gins(x86.ATESTL, ncon(0xffff0000), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) } return // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem /* * integer to float */ case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op != gc.OREGISTER { goto hard } if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } switch ft { case gc.TINT16: a = x86.AFMOVW case gc.TINT32: a = x86.AFMOVL default: a = x86.AFMOVV } // convert via int32 memory case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hardmem // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem // The way the code generator uses floating-point // registers, a move from F0 to F0 is intended as a no-op. // On the x86, it's not: it pushes a second copy of F0 // on the floating point stack. So toss it away here. // Also, F0 is the *only* register we ever evaluate // into, so we should only see register/register as F0/F0. /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32, gc.TFLOAT64<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } a = x86.AFMOVF if ft == gc.TFLOAT64 { a = x86.AFMOVD } if gc.Ismem(t) { if f.Op != gc.OREGISTER || f.Val.U.Reg != x86.REG_F0 { gc.Fatal("gmove %v", gc.Nconv(f, 0)) } a = x86.AFMOVFP if ft == gc.TFLOAT64 { a = x86.AFMOVDP } } case gc.TFLOAT32<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } if f.Op == gc.OREGISTER { gins(x86.AFMOVDP, f, t) } else { gins(x86.AFMOVF, f, t) } return case gc.TFLOAT64<<16 | gc.TFLOAT32: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { var r1 gc.Node gc.Tempname(&r1, gc.Types[gc.TFLOAT32]) gins(x86.AFMOVFP, f, &r1) gins(x86.AFMOVF, &r1, t) return } if f.Op == gc.OREGISTER { gins(x86.AFMOVFP, f, t) } else { gins(x86.AFMOVD, f, t) } return } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // should not happen fatal: gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) return } func floatmove_sse(f *gc.Node, t *gc.Node) { var r1 gc.Node var cvt *gc.Type var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) switch uint32(ft)<<16 | uint32(tt) { // should not happen default: gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return // convert via int32. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst // convert via int32 memory /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // requires register destination rdst: regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER { gc.Fatal("gins MOVF reg, reg") } if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL { gc.Fatal("gins CVTSD2SS const") } if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == x86.REG_F0 { gc.Fatal("gins MOVSD into F0") } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAL: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0)) } } var af obj.Addr var at obj.Addr if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := 0 switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 } if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) { gc.Dump("bad width from:", f) gc.Dump("bad width to:", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatal("bad use of addr: %v", p) } return p } func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false } func sudoclean() { } func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { *a = obj.Addr{} return false }