/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatalf("split64 %v", n.Type) } if nsclean >= len(sclean) { gc.Fatalf("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME, gc.OINDREG: // nothing } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node n.Convconst(&n1, n.Type) i := n1.Int64() gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
func bignodes() { if bignodes_did { return } bignodes_did = true gc.Nodconst(&zerof, gc.Types[gc.TINT64], 0) zerof.Convconst(&zerof, gc.Types[gc.TFLOAT64]) var i big.Int i.SetInt64(1) i.Lsh(&i, 63) var bigi gc.Node gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) bigi.SetBigInt(&i) bigi.Convconst(&two63f, gc.Types[gc.TFLOAT64]) gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) i.Lsh(&i, 1) bigi.SetBigInt(&i) bigi.Convconst(&two64f, gc.Types[gc.TFLOAT64]) }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", f, t) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands; // except 64-bit, which always copies via registers anyway. var a obj.As var r1 gc.Node if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT32]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT32]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = gc.Simsimtype(con.Type) // constants can't move directly to memory if gc.Ismem(t) && !gc.Is64(t.Type) { goto hard } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatalf("gmove %v -> %v", f, t) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8: // same size if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8: a = arm.AMOVBS case gc.TUINT8<<16 | gc.TUINT8: if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = arm.AMOVBU case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8: a = arm.AMOVBS goto trunc64 case gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm.AMOVBU goto trunc64 case gc.TINT16<<16 | gc.TINT16: // same size if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16: a = arm.AMOVHS case gc.TUINT16<<16 | gc.TUINT16: if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = arm.AMOVHU case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16: a = arm.AMOVHS goto trunc64 case gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm.AMOVHU goto trunc64 case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = arm.AMOVW case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Regalloc(&r1, t.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &r1, t) gc.Regfree(&r1) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, flo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, fhi.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &fhi, &r2) gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = arm.AMOVBS goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = arm.AMOVBU goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = arm.AMOVHS goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = arm.AMOVHU goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, tlo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, thi.Type, nil) gmove(f, &r1) p1 := gins(arm.AMOVW, &r1, &r2) p1.From.Type = obj.TYPE_SHIFT p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 p1.From.Reg = 0 //print("gmove: %v\n", p1); gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) var r1 gc.Node gc.Regalloc(&r1, thi.Type, nil) gins(arm.AMOVW, ncon(0), &r1) gins(arm.AMOVW, &r1, &thi) gc.Regfree(&r1) splitclean() return // case CASE(TFLOAT64, TUINT64): /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TUINT32, // case CASE(TFLOAT32, TUINT64): gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TUINT32: fa := arm.AMOVF a := arm.AMOVFW if ft == gc.TFLOAT64 { fa = arm.AMOVD a = arm.AMOVDW } ta := arm.AMOVW switch tt { case gc.TINT8: ta = arm.AMOVBS case gc.TUINT8: ta = arm.AMOVBU case gc.TINT16: ta = arm.AMOVHS case gc.TUINT16: ta = arm.AMOVHU } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to fpu p1 := gins(a, &r1, &r1) // convert to w switch tt { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(arm.AMOVW, &r1, &r2) // copy to cpu gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: fa := arm.AMOVW switch ft { case gc.TINT8: fa = arm.AMOVBS case gc.TUINT8: fa = arm.AMOVBU case gc.TINT16: fa = arm.AMOVHS case gc.TUINT16: fa = arm.AMOVHU } a := arm.AMOVWF ta := arm.AMOVF if tt == gc.TFLOAT64 { a = arm.AMOVWD ta = arm.AMOVD } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to cpu gins(arm.AMOVW, &r1, &r2) // copy to fpu p1 := gins(a, &r2, &r2) // convert switch ft { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: gc.Fatalf("gmove UINT64, TFLOAT not implemented") return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVF, f, &r1) gins(arm.AMOVFD, &r1, &r1) gins(arm.AMOVD, &r1, t) gc.Regfree(&r1) return case gc.TFLOAT64<<16 | gc.TFLOAT32: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVD, f, &r1) gins(arm.AMOVDF, &r1, &r1) gins(arm.AMOVF, &r1, t) gc.Regfree(&r1) return } gins(a, f, t) return // TODO(kaib): we almost always require a register dest anyway, this can probably be // removed. // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // truncate 64 bit integer trunc64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) gc.Regalloc(&r1, t.Type, nil) gins(a, &flo, &r1) gins(a, &r1, t) gc.Regfree(&r1) splitclean() return }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r2 gc.Node var r1 gc.Node var a obj.As if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(mips.AMOVV, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(mips.AMOVV, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // value -> value copy, first operand in memory. // any floating point operand requires register // src, so goto hard to copy to register first. if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { cvt = gc.Types[ft] goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = mips.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = mips.AMOVBU case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = mips.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = mips.AMOVHU case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = mips.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, // truncate gc.TUINT64<<16 | gc.TUINT32: a = mips.AMOVWU case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = mips.AMOVV /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = mips.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = mips.AMOVBU goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = mips.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = mips.AMOVHU goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = mips.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = mips.AMOVWU goto rdst //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32, gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: bignodes() gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil) gmove(f, &r1) if tt == gc.TUINT64 { gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) gmove(&bigf, &r2) gins3(mips.ACMPGED, &r1, &r2, nil) p1 := gc.Gbranch(mips.ABFPF, nil, 0) gins(mips.ASUBD, &r2, &r1) gc.Patch(p1, gc.Pc) gc.Regfree(&r2) } gc.Regalloc(&r2, gc.Types[gc.TINT64], t) gins(mips.ATRUNCDV, &r1, &r1) gins(mips.AMOVV, &r1, &r2) gc.Regfree(&r1) if tt == gc.TUINT64 { p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP) gmove(&bigi, &r1) gins(mips.AADDVU, &r1, &r2) gc.Patch(p1, gc.Pc) } gmove(&r2, t) gc.Regfree(&r2) return //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native int64 -> float64 conversion. // otherwise, halve (x -> (x>>1)|(x&1)), convert, and double. /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64, gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: bignodes() var rtmp gc.Node gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) gmove(f, &r1) if ft == gc.TUINT64 { gc.Nodreg(&rtmp, gc.Types[gc.TUINT64], mips.REGTMP) gmove(&bigi, &rtmp) gins(mips.AAND, &r1, &rtmp) p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0) var r3 gc.Node gc.Regalloc(&r3, gc.Types[gc.TUINT64], nil) p2 := gins3(mips.AAND, nil, &r1, &r3) p2.From.Type = obj.TYPE_CONST p2.From.Offset = 1 p3 := gins(mips.ASRLV, nil, &r1) p3.From.Type = obj.TYPE_CONST p3.From.Offset = 1 gins(mips.AOR, &r3, &r1) gc.Regfree(&r3) gc.Patch(p1, gc.Pc) } gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) gins(mips.AMOVV, &r1, &r2) gins(mips.AMOVVD, &r2, &r2) gc.Regfree(&r1) if ft == gc.TUINT64 { p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0) gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], mips.FREGTWO) gins(mips.AMULD, &r1, &r2) gc.Patch(p1, gc.Pc) } gmove(&r2, t) gc.Regfree(&r2) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = mips.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = mips.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = mips.AMOVFD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = mips.AMOVDF goto rdst } gins(a, f, t) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var a obj.As if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = tt // so big switch will choose a simple mov // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } // 64-bit immediates are really 32-bit sign-extended // unless moving into a register. if gc.Isint[tt] { if i := con.Int64(); int64(int32(i)) != i { goto hard } } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = x86.AMOVQL case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = x86.AMOVQ /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = x86.AMOVBQSX goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = x86.AMOVBQZX goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = x86.AMOVWQSX goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = x86.AMOVWQZX goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = x86.AMOVLQSX goto rdst // AMOVL into a register zeros the top of the register, // so this is not always necessary, but if we rely on AMOVL // the optimizer is almost certain to screw with us. case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = x86.AMOVLQZX goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = x86.ACVTTSS2SQ goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = x86.ACVTTSD2SQ goto rdst // convert via int32. case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: a := x86.ACVTTSS2SQ if ft == gc.TFLOAT64 { a = x86.ACVTTSD2SQ } bignodes() var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], nil) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) var r3 gc.Node gc.Regalloc(&r3, gc.Types[ft], nil) var r4 gc.Node gc.Regalloc(&r4, gc.Types[tt], nil) gins(optoas(gc.OAS, f.Type), f, &r1) gins(optoas(gc.OCMP, f.Type), &bigf, &r1) p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gins(optoas(gc.OAS, f.Type), &bigf, &r3) gins(optoas(gc.OSUB, f.Type), &r3, &r1) gins(a, &r1, &r2) gins(x86.AMOVQ, &bigi, &r4) gins(x86.AXORQ, &r4, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) gc.Regfree(&r4) gc.Regfree(&r3) gc.Regfree(&r2) gc.Regfree(&r1) return /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = x86.ACVTSQ2SS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = x86.ACVTSQ2SD goto rdst // convert via int32 case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native int64 -> uint64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: a := x86.ACVTSQ2SS if tt == gc.TFLOAT64 { a = x86.ACVTSQ2SD } var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) var one gc.Node gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) var r1 gc.Node gc.Regalloc(&r1, f.Type, f) var r2 gc.Node gc.Regalloc(&r2, t.Type, t) var r3 gc.Node gc.Regalloc(&r3, f.Type, nil) var r4 gc.Node gc.Regalloc(&r4, f.Type, nil) gmove(f, &r1) gins(x86.ACMPQ, &r1, &zero) p1 := gc.Gbranch(x86.AJLT, nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gmove(&r1, &r3) gins(x86.ASHRQ, &one, &r3) gmove(&r1, &r4) gins(x86.AANDL, &one, &r4) gins(x86.AORQ, &r4, &r3) gins(a, &r3, &r2) gins(optoas(gc.OADD, t.Type), &r2, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) gc.Regfree(&r4) gc.Regfree(&r3) gc.Regfree(&r2) gc.Regfree(&r1) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register destination rdst: { var r1 gc.Node gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: var r1 gc.Node gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r1 gc.Node var a obj.As if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // value -> value copy, first operand in memory. // any floating point operand requires register // src, so goto hard to copy to register first. if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { cvt = gc.Types[ft] goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = arm64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm64.AMOVBU case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = arm64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm64.AMOVHU case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = arm64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = arm64.AMOVWU case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = arm64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = arm64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = arm64.AMOVBU goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = arm64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = arm64.AMOVHU goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = arm64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = arm64.AMOVWU goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = arm64.AFCVTZSSW goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = arm64.AFCVTZSDW goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = arm64.AFCVTZSS goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = arm64.AFCVTZSD goto rdst case gc.TFLOAT32<<16 | gc.TUINT32: a = arm64.AFCVTZUSW goto rdst case gc.TFLOAT64<<16 | gc.TUINT32: a = arm64.AFCVTZUDW goto rdst case gc.TFLOAT32<<16 | gc.TUINT64: a = arm64.AFCVTZUS goto rdst case gc.TFLOAT64<<16 | gc.TUINT64: a = arm64.AFCVTZUD goto rdst case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TUINT32] goto hard /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32: a = arm64.ASCVTFWS goto rdst case gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64: a = arm64.ASCVTFWD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = arm64.ASCVTFS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = arm64.ASCVTFD goto rdst case gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32: a = arm64.AUCVTFWS goto rdst case gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: a = arm64.AUCVTFWD goto rdst case gc.TUINT64<<16 | gc.TFLOAT32: a = arm64.AUCVTFS goto rdst case gc.TUINT64<<16 | gc.TFLOAT64: a = arm64.AUCVTFD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = arm64.AFCVTSD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = arm64.AFCVTDS goto rdst } gins(a, f, t) return // requires register destination rdst: gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", f, t) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a obj.As if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatalf("gmove %v -> %v", f, t) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { // Implementation of conversion-free x = y for int64 or uint64 x. // This is generated by the code that copies small values out of closures, // and that code has DX live, so avoid DX and just use AX twice. var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &fhi, &r1) gins(x86.AMOVL, &r1, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: gc.Regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) gc.Regfree(&r1) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
func floatmove(f *gc.Node, t *gc.Node) { var r1 gc.Node ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type // cannot have two floating point memory operands. if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = gc.Simsimtype(con.Type) // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: if gc.Thearch.Use387 { floatmove_387(f, t) } else { floatmove_sse(f, t) } return // float to very long integer. case gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64: if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: if !gc.Ismem(f) { cvt = f.Type goto hardmem } bignodes() var f0 gc.Node gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0) var f1 gc.Node gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1) var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &f0) } else { gins(x86.AFMOVD, f, &f0) } // if 0 > v { answer = 0 } gins(x86.AFMOVD, &zerof, &f0) gins(x86.AFUCOMP, &f0, &f1) gins(x86.AFSTSW, nil, &ax) gins(x86.ASAHF, nil, nil) p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) // if 1<<64 <= v { answer = 0 too } gins(x86.AFMOVD, &two64f, &f0) gins(x86.AFUCOMP, &f0, &f1) gins(x86.AFSTSW, nil, &ax) gins(x86.ASAHF, nil, nil) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) gc.Patch(p1, gc.Pc) gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gins(x86.AMOVL, ncon(0), &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) // in range; algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) // actual work gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFUCOMP, &f0, &f1) gins(x86.AFSTSW, nil, &ax) gins(x86.ASAHF, nil, nil) p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0) gins(x86.AFMOVVP, &f0, t) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFSUBDP, &f0, &f1) gins(x86.AFMOVVP, &f0, t) split64(t, &tlo, &thi) gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63 gc.Patch(p3, gc.Pc) splitclean() // restore rounding mode gins(x86.AFLDCW, &t1, nil) gc.Patch(p1, gc.Pc) return /* * integer to float */ case gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op == gc.OREGISTER { goto hardmem } var f0 gc.Node gc.Nodreg(&f0, t.Type, x86.REG_F0) gins(x86.AFMOVV, f, &f0) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &f0, t) } else { gins(x86.AFMOVDP, &f0, t) } return // algorithm is: // if small enough, use native int64 -> float64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) var t1 gc.Node gc.Tempname(&t1, f.Type) var tlo gc.Node var thi gc.Node split64(&t1, &tlo, &thi) gmove(f, &t1) gins(x86.ACMPL, &thi, ncon(0)) p1 := gc.Gbranch(x86.AJLT, nil, 0) // native var r1 gc.Node gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) gins(x86.AFMOVV, &t1, &r1) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } p2 := gc.Gbranch(obj.AJMP, nil, 0) // simulated gc.Patch(p1, gc.Pc) gmove(&tlo, &ax) gmove(&thi, &dx) p1 = gins(x86.ASHRL, ncon(1), &ax) p1.From.Index = x86.REG_DX // double-width shift DX -> AX p1.From.Scale = 0 gins(x86.AMOVL, ncon(0), &cx) gins(x86.ASETCC, nil, &cx) gins(x86.AORL, &cx, &ax) gins(x86.ASHRL, ncon(1), &dx) gmove(&dx, &thi) gmove(&ax, &tlo) gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) var r2 gc.Node gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1) gins(x86.AFMOVV, &t1, &r1) gins(x86.AFMOVD, &r1, &r1) gins(x86.AFADDDP, &r1, &r2) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } gc.Patch(p2, gc.Pc) splitclean() return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return }
/* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r2 gc.Node var r1 gc.Node var a obj.As if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: f.Convconst(&con, t.Type) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node f.Convconst(&con, gc.Types[gc.TUINT64]) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // float constants come from memory. //if(isfloat[tt]) // goto hard; // 64-bit immediates are also from memory. //if(isint[tt]) // goto hard; //// 64-bit immediates are really 32-bit sign-extended //// unless moving into a register. //if(isint[tt]) { // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0) // goto hard; // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0) // goto hard; //} // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = ppc64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = ppc64.AMOVBZ case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = ppc64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = ppc64.AMOVHZ case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = ppc64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = ppc64.AMOVWZ case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = ppc64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = ppc64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = ppc64.AMOVBZ goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = ppc64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = ppc64.AMOVHZ goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = ppc64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = ppc64.AMOVWZ goto rdst //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32, gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: bignodes() var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) gmove(f, &r1) if tt == gc.TUINT64 { gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) gmove(&bigf, &r2) gins(ppc64.AFCMPU, &r1, &r2) p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) gins(ppc64.AFSUB, &r2, &r1) gc.Patch(p1, gc.Pc) gc.Regfree(&r2) } gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil) var r3 gc.Node gc.Regalloc(&r3, gc.Types[gc.TINT64], t) gins(ppc64.AFCTIDZ, &r1, &r2) p1 := gins(ppc64.AFMOVD, &r2, nil) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AMOVD, nil, &r3) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 gc.Regfree(&r2) gc.Regfree(&r1) if tt == gc.TUINT64 { p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP) gins(ppc64.AMOVD, &bigi, &r1) gins(ppc64.AADD, &r1, &r3) gc.Patch(p1, gc.Pc) } gmove(&r3, t) gc.Regfree(&r3) return /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64, gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: bignodes() // The algorithm is: // if small enough, use native int64 -> float64 conversion, // otherwise halve (x -> (x>>1)|(x&1)), convert, and double. // Note: could use FCFIDU instead if target supports it. var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TINT64], nil) gmove(f, &r1) if ft == gc.TUINT64 { gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP) gmove(&bigi, &r2) gins(ppc64.ACMPU, &r1, &r2) p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) var r3 gc.Node gc.Regalloc(&r3, gc.Types[gc.TUINT64], nil) p2 := gins(ppc64.AANDCC, nil, &r3) // andi. p2.Reg = r1.Reg p2.From.Type = obj.TYPE_CONST p2.From.Offset = 1 p3 := gins(ppc64.ASRD, nil, &r1) p3.From.Type = obj.TYPE_CONST p3.From.Offset = 1 gins(ppc64.AOR, &r3, &r1) gc.Regfree(&r3) gc.Patch(p1, gc.Pc) } gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t) p1 := gins(ppc64.AMOVD, &r1, nil) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AFMOVD, nil, &r2) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 gins(ppc64.AFCFID, &r2, &r2) gc.Regfree(&r1) if ft == gc.TUINT64 { p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO) gins(ppc64.AFMUL, &r1, &r2) gc.Patch(p1, gc.Pc) } gmove(&r2, t) gc.Regfree(&r2) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = ppc64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = ppc64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = ppc64.AFMOVS goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = ppc64.AFRSP goto rdst } gins(a, f, t) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
// generate move: // t = f // hard part is conversions. func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } var a obj.As // cannot have two memory operands if gc.Ismem(f) && gc.Ismem(t) { if gmvc(f, t) { return } goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node f.Convconst(&con, t.Type) f = &con ft = tt // so big switch will choose a simple mov // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if t.Type.IsFloat() { goto hard } // all immediates are 16-bit sign-extended // unless moving into a register. if t.Type.IsInteger() { if i := con.Int64(); int64(int16(i)) != i { goto hard } } // immediate moves to memory have a 12-bit unsigned displacement if t.Xoffset < 0 || t.Xoffset >= 4096-8 { goto hard } } } // a float-to-int or int-to-float conversion requires the source operand in a register if gc.Ismem(f) && ((f.Type.IsFloat() && t.Type.IsInteger()) || (f.Type.IsInteger() && t.Type.IsFloat())) { cvt = f.Type goto hard } // a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register if gc.Ismem(f) && f.Type.IsFloat() && t.Type.IsFloat() && (ft != tt) { cvt = f.Type goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) // integer copy and truncate case gc.TINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = s390x.AMOVB case gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = s390x.AMOVBZ case gc.TINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = s390x.AMOVH case gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = s390x.AMOVHZ case gc.TINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, gc.TUINT64<<16 | gc.TINT32: a = s390x.AMOVW case gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = s390x.AMOVWZ case gc.TINT64<<16 | gc.TINT64, gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = s390x.AMOVD // sign extend int8 case gc.TINT8<<16 | gc.TINT16, gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = s390x.AMOVB goto rdst // sign extend uint8 case gc.TUINT8<<16 | gc.TINT16, gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = s390x.AMOVBZ goto rdst // sign extend int16 case gc.TINT16<<16 | gc.TINT32, gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = s390x.AMOVH goto rdst // zero extend uint16 case gc.TUINT16<<16 | gc.TINT32, gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = s390x.AMOVHZ goto rdst // sign extend int32 case gc.TINT32<<16 | gc.TINT64, gc.TINT32<<16 | gc.TUINT64: a = s390x.AMOVW goto rdst // zero extend uint32 case gc.TUINT32<<16 | gc.TINT64, gc.TUINT32<<16 | gc.TUINT64: a = s390x.AMOVWZ goto rdst // float to integer case gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT16: cvt = gc.Types[gc.TUINT32] goto hard case gc.TFLOAT32<<16 | gc.TUINT32: a = s390x.ACLFEBR goto rdst case gc.TFLOAT32<<16 | gc.TUINT64: a = s390x.ACLGEBR goto rdst case gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TUINT16: cvt = gc.Types[gc.TUINT32] goto hard case gc.TFLOAT64<<16 | gc.TUINT32: a = s390x.ACLFDBR goto rdst case gc.TFLOAT64<<16 | gc.TUINT64: a = s390x.ACLGDBR goto rdst case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TINT16: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT32<<16 | gc.TINT32: a = s390x.ACFEBRA goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = s390x.ACGEBRA goto rdst case gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TINT16: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT64<<16 | gc.TINT32: a = s390x.ACFDBRA goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = s390x.ACGDBRA goto rdst // integer to float case gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32: cvt = gc.Types[gc.TUINT32] goto hard case gc.TUINT32<<16 | gc.TFLOAT32: a = s390x.ACELFBR goto rdst case gc.TUINT64<<16 | gc.TFLOAT32: a = s390x.ACELGBR goto rdst case gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TUINT32<<16 | gc.TFLOAT64: a = s390x.ACDLFBR goto rdst case gc.TUINT64<<16 | gc.TFLOAT64: a = s390x.ACDLGBR goto rdst case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32: cvt = gc.Types[gc.TINT32] goto hard case gc.TINT32<<16 | gc.TFLOAT32: a = s390x.ACEFBRA goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = s390x.ACEGBRA goto rdst case gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TINT32<<16 | gc.TFLOAT64: a = s390x.ACDFBRA goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = s390x.ACDGBRA goto rdst // float to float case gc.TFLOAT32<<16 | gc.TFLOAT32: a = s390x.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = s390x.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = s390x.ALDEBR goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = s390x.ALEDBR goto rdst } gins(a, f, t) return // requires register destination rdst: if t != nil && t.Op == gc.OREGISTER { gins(a, f, t) return } else { var r1 gc.Node gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: var r1 gc.Node gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }