/* * generate array index into res. * n might be any size; res is 32-bit. * returns Prog* to patch to panic call. */ func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { gc.Cgen(n, res) return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gmove(&lo, res) if bounded { splitclean() return nil } var n1 gc.Node gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) var n2 gc.Node gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gmove(&hi, &n1) gmove(&zero, &n2) gins(arm.ACMP, &n1, &n2) gc.Regfree(&n2) gc.Regfree(&n1) splitclean() return gc.Gbranch(arm.ABNE, nil, -1) }
/* * generate an addressable node in res, containing the value of n. * n is an array index, and might be any size; res width is <= 32-bit. * returns Prog* to patch to panic call. */ func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { if !gc.Is64(n.Type) { if n.Addable != 0 { // nothing to do. *res = *n } else { gc.Tempname(res, gc.Types[gc.TUINT32]) gc.Cgen(n, res) } return nil } var tmp gc.Node gc.Tempname(&tmp, gc.Types[gc.TINT64]) gc.Cgen(n, &tmp) var lo gc.Node var hi gc.Node split64(&tmp, &lo, &hi) gc.Tempname(res, gc.Types[gc.TUINT32]) gmove(&lo, res) if bounded { splitclean() return nil } var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) gins(x86.ACMPL, &hi, &zero) splitclean() return gc.Gbranch(x86.AJNE, nil, +1) }
/* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { gc.Igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node gc.Cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } }
/* * generate division according to op, one of: * res = nl / nr * res = nl % nr */ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) { if gc.Is64(nl.Type) { gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0)) } var t *gc.Type if gc.Issigned[nl.Type.Etype] { t = gc.Types[gc.TINT32] } else { t = gc.Types[gc.TUINT32] } var ax gc.Node var oldax gc.Node savex(x86.REG_AX, &ax, &oldax, res, t) var olddx gc.Node var dx gc.Node savex(x86.REG_DX, &dx, &olddx, res, t) dodiv(op, nl, nr, res, &ax, &dx) restx(&dx, &olddx) restx(&ax, &oldax) }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a int if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) var r2 gc.Node gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &fhi, &r2) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r2, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: gc.Regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) gc.Regfree(&r1) return // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return }
func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands; // except 64-bit, which always copies via registers anyway. var a int var r1 gc.Node if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return case gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val) var r1 gc.Node gc.Regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) gc.Regfree(&r1) return } f = &con ft = gc.Simsimtype(con.Type) // constants can't move directly to memory if gc.Ismem(t) && !gc.Is64(t.Type) { goto hard } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8: // same size if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8: a = arm.AMOVBS case gc.TUINT8<<16 | gc.TUINT8: if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = arm.AMOVBU case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8: a = arm.AMOVBS goto trunc64 case gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm.AMOVBU goto trunc64 case gc.TINT16<<16 | gc.TINT16: // same size if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16: a = arm.AMOVHS case gc.TUINT16<<16 | gc.TUINT16: if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = arm.AMOVHU case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16: a = arm.AMOVHS goto trunc64 case gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm.AMOVHU goto trunc64 case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = arm.AMOVW case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Regalloc(&r1, t.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &r1, t) gc.Regfree(&r1) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, flo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, fhi.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &fhi, &r2) gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = arm.AMOVBS goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = arm.AMOVBU goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = arm.AMOVHS goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = arm.AMOVHU goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node gc.Regalloc(&r1, tlo.Type, nil) var r2 gc.Node gc.Regalloc(&r2, thi.Type, nil) gmove(f, &r1) p1 := gins(arm.AMOVW, &r1, &r2) p1.From.Type = obj.TYPE_SHIFT p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Reg)&15 // r1->31 p1.From.Reg = 0 //print("gmove: %P\n", p1); gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) gc.Regfree(&r1) gc.Regfree(&r2) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) var r1 gc.Node gc.Regalloc(&r1, thi.Type, nil) gins(arm.AMOVW, ncon(0), &r1) gins(arm.AMOVW, &r1, &thi) gc.Regfree(&r1) splitclean() return // case CASE(TFLOAT64, TUINT64): /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TUINT32, // case CASE(TFLOAT32, TUINT64): gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TUINT32: fa := arm.AMOVF a := arm.AMOVFW if ft == gc.TFLOAT64 { fa = arm.AMOVD a = arm.AMOVDW } ta := arm.AMOVW switch tt { case gc.TINT8: ta = arm.AMOVBS case gc.TUINT8: ta = arm.AMOVBU case gc.TINT16: ta = arm.AMOVHS case gc.TUINT16: ta = arm.AMOVHU } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to fpu p1 := gins(a, &r1, &r1) // convert to w switch tt { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(arm.AMOVW, &r1, &r2) // copy to cpu gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: fa := arm.AMOVW switch ft { case gc.TINT8: fa = arm.AMOVBS case gc.TUINT8: fa = arm.AMOVBU case gc.TINT16: fa = arm.AMOVHS case gc.TUINT16: fa = arm.AMOVHU } a := arm.AMOVWF ta := arm.AMOVF if tt == gc.TFLOAT64 { a = arm.AMOVWD ta = arm.AMOVD } var r1 gc.Node gc.Regalloc(&r1, gc.Types[ft], f) var r2 gc.Node gc.Regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to cpu gins(arm.AMOVW, &r1, &r2) // copy to fpu p1 := gins(a, &r2, &r2) // convert switch ft { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(ta, &r2, t) // store gc.Regfree(&r1) gc.Regfree(&r2) return case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: gc.Fatal("gmove UINT64, TFLOAT not implemented") return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVF, f, &r1) gins(arm.AMOVFD, &r1, &r1) gins(arm.AMOVD, &r1, t) gc.Regfree(&r1) return case gc.TFLOAT64<<16 | gc.TFLOAT32: var r1 gc.Node gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVD, f, &r1) gins(arm.AMOVDF, &r1, &r1) gins(arm.AMOVF, &r1, t) gc.Regfree(&r1) return } gins(a, f, t) return // TODO(kaib): we almost always require a register dest anyway, this can probably be // removed. // requires register destination rdst: { gc.Regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) gc.Regfree(&r1) return } // requires register intermediate hard: gc.Regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) gc.Regfree(&r1) return // truncate 64 bit integer trunc64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) gc.Regalloc(&r1, t.Type, nil) gins(a, &flo, &r1) gins(a, &r1, t) gc.Regfree(&r1) splitclean() return }
/* * attempt to generate 64-bit * res = n * return 1 on success, 0 if op not handled. */ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0)) } l := n.Left var t1 gc.Node if l.Addable == 0 { gc.Tempname(&t1, l.Type) gc.Cgen(l, &t1) l = &t1 } var hi1 gc.Node var lo1 gc.Node split64(l, &lo1, &hi1) switch n.Op { default: gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0)) case gc.OMINUS: var lo2 gc.Node var hi2 gc.Node split64(res, &lo2, &hi2) gc.Regalloc(&t1, lo1.Type, nil) var al gc.Node gc.Regalloc(&al, lo1.Type, nil) var ah gc.Node gc.Regalloc(&ah, hi1.Type, nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gmove(ncon(0), &t1) p1 := gins(arm.ASUB, &al, &t1) p1.Scond |= arm.C_SBIT gins(arm.AMOVW, &t1, &lo2) gmove(ncon(0), &t1) gins(arm.ASBC, &ah, &t1) gins(arm.AMOVW, &t1, &hi2) gc.Regfree(&t1) gc.Regfree(&al) gc.Regfree(&ah) splitclean() splitclean() return case gc.OCOM: gc.Regalloc(&t1, lo1.Type, nil) gmove(ncon(^uint32(0)), &t1) var lo2 gc.Node var hi2 gc.Node split64(res, &lo2, &hi2) var n1 gc.Node gc.Regalloc(&n1, lo1.Type, nil) gins(arm.AMOVW, &lo1, &n1) gins(arm.AEOR, &t1, &n1) gins(arm.AMOVW, &n1, &lo2) gins(arm.AMOVW, &hi1, &n1) gins(arm.AEOR, &t1, &n1) gins(arm.AMOVW, &n1, &hi2) gc.Regfree(&t1) gc.Regfree(&n1) splitclean() splitclean() return // binary operators. // common setup below. case gc.OADD, gc.OSUB, gc.OMUL, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR, gc.OLROT: break } // setup for binary operators r := n.Right if r != nil && r.Addable == 0 { var t2 gc.Node gc.Tempname(&t2, r.Type) gc.Cgen(r, &t2) r = &t2 } var hi2 gc.Node var lo2 gc.Node if gc.Is64(r.Type) { split64(r, &lo2, &hi2) } var al gc.Node gc.Regalloc(&al, lo1.Type, nil) var ah gc.Node gc.Regalloc(&ah, hi1.Type, nil) // Do op. Leave result in ah:al. switch n.Op { default: gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0)) // TODO: Constants case gc.OADD: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi2, &bh) gins(arm.AMOVW, &lo2, &bl) p1 := gins(arm.AADD, &bl, &al) p1.Scond |= arm.C_SBIT gins(arm.AADC, &bh, &ah) gc.Regfree(&bl) gc.Regfree(&bh) // TODO: Constants. case gc.OSUB: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo2, &bl) gins(arm.AMOVW, &hi2, &bh) p1 := gins(arm.ASUB, &bl, &al) p1.Scond |= arm.C_SBIT gins(arm.ASBC, &bh, &ah) gc.Regfree(&bl) gc.Regfree(&bh) // TODO(kaib): this can be done with 4 regs and does not need 6 case gc.OMUL: var bl gc.Node gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil) var bh gc.Node gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil) var cl gc.Node gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil) var ch gc.Node gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil) // load args into bh:bl and bh:bl. gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) gins(arm.AMOVW, &hi2, &ch) gins(arm.AMOVW, &lo2, &cl) // bl * cl -> ah al p1 := gins(arm.AMULLU, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bl.Val.U.Reg p1.Reg = cl.Val.U.Reg p1.To.Type = obj.TYPE_REGREG p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(al.Val.U.Reg) //print("%P\n", p1); // bl * ch + ah -> ah p1 = gins(arm.AMULA, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bl.Val.U.Reg p1.Reg = ch.Val.U.Reg p1.To.Type = obj.TYPE_REGREG2 p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(ah.Val.U.Reg) //print("%P\n", p1); // bh * cl + ah -> ah p1 = gins(arm.AMULA, nil, nil) p1.From.Type = obj.TYPE_REG p1.From.Reg = bh.Val.U.Reg p1.Reg = cl.Val.U.Reg p1.To.Type = obj.TYPE_REGREG2 p1.To.Reg = ah.Val.U.Reg p1.To.Offset = int64(ah.Val.U.Reg) //print("%P\n", p1); gc.Regfree(&bh) gc.Regfree(&bl) gc.Regfree(&ch) gc.Regfree(&cl) // We only rotate by a constant c in [0,64). // if c >= 32: // lo, hi = hi, lo // c -= 32 // if c == 0: // no-op // else: // t = hi // shld hi:lo, c // shld lo:t, c case gc.OLROT: v := uint64(gc.Mpgetfix(r.Val.U.Xval)) var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) if v >= 32 { // reverse during load to do the first 32 bits of rotate v -= 32 gins(arm.AMOVW, &hi1, &bl) gins(arm.AMOVW, &lo1, &bh) } else { gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) } if v == 0 { gins(arm.AMOVW, &bh, &ah) gins(arm.AMOVW, &bl, &al) } else { // rotate by 1 <= v <= 31 // MOVW bl<<v, al // MOVW bh<<v, ah // OR bl>>(32-v), ah // OR bh>>(32-v), al gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al) gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah) gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah) gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al) } gc.Regfree(&bl) gc.Regfree(&bh) case gc.OLSH: var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) var p6 *obj.Prog var s gc.Node var n1 gc.Node var creg gc.Node var p1 *obj.Prog var p2 *obj.Prog var p3 *obj.Prog var p4 *obj.Prog var p5 *obj.Prog if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al) // here and below (verify it optimizes to EOR) gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) } else if v > 32 { gins(arm.AEOR, &al, &al) // MOVW bl<<(v-32), ah gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah) } else if v == 32 { gins(arm.AEOR, &al, &al) gins(arm.AMOVW, &bl, &ah) } else if v > 0 { // MOVW bl<<v, al gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al) // MOVW bh<<v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah) // OR bl>>(32-v), ah gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah) } else { gins(arm.AMOVW, &bl, &al) gins(arm.AMOVW, &bh, &ah) } goto olsh_break } gc.Regalloc(&s, gc.Types[gc.TUINT32], nil) gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil) if gc.Is64(r.Type) { // shift is >= 1<<32 var cl gc.Node var ch gc.Node split64(r, &cl, &ch) gmove(&ch, &s) gins(arm.ATST, &s, nil) p6 = gc.Gbranch(arm.ABNE, nil, 0) gmove(&cl, &s) splitclean() } else { gmove(r, &s) p6 = nil } gins(arm.ATST, &s, nil) // shift == 0 p1 = gins(arm.AMOVW, &bl, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bh, &ah) p1.Scond = arm.C_SCOND_EQ p2 = gc.Gbranch(arm.ABEQ, nil, 0) // shift is < 32 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO bl<<s, al p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al) p1.Scond = arm.C_SCOND_LO // MOVW.LO bh<<s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah) p1.Scond = arm.C_SCOND_LO // SUB.LO s, creg p1 = gins(arm.ASUB, &s, &creg) p1.Scond = arm.C_SCOND_LO // OR.LO bl>>creg, ah p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah) p1.Scond = arm.C_SCOND_LO // BLO end p3 = gc.Gbranch(arm.ABLO, nil, 0) // shift == 32 p1 = gins(arm.AEOR, &al, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bl, &ah) p1.Scond = arm.C_SCOND_EQ p4 = gc.Gbranch(arm.ABEQ, nil, 0) // shift is < 64 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // EOR.LO al, al p1 = gins(arm.AEOR, &al, &al) p1.Scond = arm.C_SCOND_LO // MOVW.LO creg>>1, creg p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg) p1.Scond = arm.C_SCOND_LO // SUB.LO creg, s p1 = gins(arm.ASUB, &creg, &s) p1.Scond = arm.C_SCOND_LO // MOVW bl<<s, ah p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah) p1.Scond = arm.C_SCOND_LO p5 = gc.Gbranch(arm.ABLO, nil, 0) // shift >= 64 if p6 != nil { gc.Patch(p6, gc.Pc) } gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) gc.Patch(p2, gc.Pc) gc.Patch(p3, gc.Pc) gc.Patch(p4, gc.Pc) gc.Patch(p5, gc.Pc) gc.Regfree(&s) gc.Regfree(&creg) olsh_break: gc.Regfree(&bl) gc.Regfree(&bh) case gc.ORSH: var bl gc.Node gc.Regalloc(&bl, lo1.Type, nil) var bh gc.Node gc.Regalloc(&bh, hi1.Type, nil) gins(arm.AMOVW, &hi1, &bh) gins(arm.AMOVW, &lo1, &bl) var p4 *obj.Prog var p5 *obj.Prog var n1 gc.Node var p6 *obj.Prog var s gc.Node var p1 *obj.Prog var p2 *obj.Prog var creg gc.Node var p3 *obj.Prog if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al) // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &al, &al) gins(arm.AEOR, &ah, &ah) } } else if v > 32 { if bh.Type.Etype == gc.TINT32 { // MOVW bh->(v-32), al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al) // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { // MOVW bh>>(v-32), al gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al) gins(arm.AEOR, &ah, &ah) } } else if v == 32 { gins(arm.AMOVW, &bh, &al) if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &ah, &ah) } } else if v > 0 { // MOVW bl>>v, al gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al) // OR bh<<(32-v), al gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al) if bh.Type.Etype == gc.TINT32 { // MOVW bh->v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah) } else { // MOVW bh>>v, ah gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah) } } else { gins(arm.AMOVW, &bl, &al) gins(arm.AMOVW, &bh, &ah) } goto orsh_break } gc.Regalloc(&s, gc.Types[gc.TUINT32], nil) gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil) if gc.Is64(r.Type) { // shift is >= 1<<32 var ch gc.Node var cl gc.Node split64(r, &cl, &ch) gmove(&ch, &s) gins(arm.ATST, &s, nil) var p1 *obj.Prog if bh.Type.Etype == gc.TINT32 { p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { p1 = gins(arm.AEOR, &ah, &ah) } p1.Scond = arm.C_SCOND_NE p6 = gc.Gbranch(arm.ABNE, nil, 0) gmove(&cl, &s) splitclean() } else { gmove(r, &s) p6 = nil } gins(arm.ATST, &s, nil) // shift == 0 p1 = gins(arm.AMOVW, &bl, &al) p1.Scond = arm.C_SCOND_EQ p1 = gins(arm.AMOVW, &bh, &ah) p1.Scond = arm.C_SCOND_EQ p2 = gc.Gbranch(arm.ABEQ, nil, 0) // check if shift is < 32 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO bl>>s, al p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al) p1.Scond = arm.C_SCOND_LO // SUB.LO s,creg p1 = gins(arm.ASUB, &s, &creg) p1.Scond = arm.C_SCOND_LO // OR.LO bh<<(32-s), al p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al) p1.Scond = arm.C_SCOND_LO if bh.Type.Etype == gc.TINT32 { // MOVW bh->s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah) } else { // MOVW bh>>s, ah p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah) } p1.Scond = arm.C_SCOND_LO // BLO end p3 = gc.Gbranch(arm.ABLO, nil, 0) // shift == 32 p1 = gins(arm.AMOVW, &bh, &al) p1.Scond = arm.C_SCOND_EQ if bh.Type.Etype == gc.TINT32 { gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah) } else { gins(arm.AEOR, &ah, &ah) } p4 = gc.Gbranch(arm.ABEQ, nil, 0) // check if shift is < 64 gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64) gmove(&n1, &creg) gins(arm.ACMP, &s, &creg) // MOVW.LO creg>>1, creg p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg) p1.Scond = arm.C_SCOND_LO // SUB.LO creg, s p1 = gins(arm.ASUB, &creg, &s) p1.Scond = arm.C_SCOND_LO if bh.Type.Etype == gc.TINT32 { // MOVW bh->(s-32), al p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al) p1.Scond = arm.C_SCOND_LO } else { // MOVW bh>>(v-32), al p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al) p1.Scond = arm.C_SCOND_LO } // BLO end p5 = gc.Gbranch(arm.ABLO, nil, 0) // s >= 64 if p6 != nil { gc.Patch(p6, gc.Pc) } if bh.Type.Etype == gc.TINT32 { // MOVW bh->31, al gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al) } else { gins(arm.AEOR, &al, &al) } gc.Patch(p2, gc.Pc) gc.Patch(p3, gc.Pc) gc.Patch(p4, gc.Pc) gc.Patch(p5, gc.Pc) gc.Regfree(&s) gc.Regfree(&creg) orsh_break: gc.Regfree(&bl) gc.Regfree(&bh) // TODO(kaib): literal optimizations // make constant the right side (it usually is anyway). // if(lo1.op == OLITERAL) { // nswap(&lo1, &lo2); // nswap(&hi1, &hi2); // } // if(lo2.op == OLITERAL) { // // special cases for constants. // lv = mpgetfix(lo2.val.u.xval); // hv = mpgetfix(hi2.val.u.xval); // splitclean(); // right side // split64(res, &lo2, &hi2); // switch(n->op) { // case OXOR: // gmove(&lo1, &lo2); // gmove(&hi1, &hi2); // switch(lv) { // case 0: // break; // case 0xffffffffu: // gins(ANOTL, N, &lo2); // break; // default: // gins(AXORL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // break; // case 0xffffffffu: // gins(ANOTL, N, &hi2); // break; // default: // gins(AXORL, ncon(hv), &hi2); // break; // } // break; // case OAND: // switch(lv) { // case 0: // gins(AMOVL, ncon(0), &lo2); // break; // default: // gmove(&lo1, &lo2); // if(lv != 0xffffffffu) // gins(AANDL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // gins(AMOVL, ncon(0), &hi2); // break; // default: // gmove(&hi1, &hi2); // if(hv != 0xffffffffu) // gins(AANDL, ncon(hv), &hi2); // break; // } // break; // case OOR: // switch(lv) { // case 0: // gmove(&lo1, &lo2); // break; // case 0xffffffffu: // gins(AMOVL, ncon(0xffffffffu), &lo2); // break; // default: // gmove(&lo1, &lo2); // gins(AORL, ncon(lv), &lo2); // break; // } // switch(hv) { // case 0: // gmove(&hi1, &hi2); // break; // case 0xffffffffu: // gins(AMOVL, ncon(0xffffffffu), &hi2); // break; // default: // gmove(&hi1, &hi2); // gins(AORL, ncon(hv), &hi2); // break; // } // break; // } // splitclean(); // splitclean(); // goto out; // } case gc.OXOR, gc.OAND, gc.OOR: var n1 gc.Node gc.Regalloc(&n1, lo1.Type, nil) gins(arm.AMOVW, &lo1, &al) gins(arm.AMOVW, &hi1, &ah) gins(arm.AMOVW, &lo2, &n1) gins(optoas(int(n.Op), lo1.Type), &n1, &al) gins(arm.AMOVW, &hi2, &n1) gins(optoas(int(n.Op), lo1.Type), &n1, &ah) gc.Regfree(&n1) } if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo1, &hi1) gins(arm.AMOVW, &al, &lo1) gins(arm.AMOVW, &ah, &hi1) splitclean() //out: gc.Regfree(&al) gc.Regfree(&ah) }
/* * attempt to generate 64-bit * res = n * return 1 on success, 0 if op not handled. */ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0)) } switch n.Op { default: gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0)) case gc.OMINUS: gc.Cgen(n.Left, res) var hi1 gc.Node var lo1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANEGL, nil, &lo1) gins(x86.AADCL, ncon(0), &hi1) gins(x86.ANEGL, nil, &hi1) splitclean() return case gc.OCOM: gc.Cgen(n.Left, res) var lo1 gc.Node var hi1 gc.Node split64(res, &lo1, &hi1) gins(x86.ANOTL, nil, &lo1) gins(x86.ANOTL, nil, &hi1) splitclean() return // binary operators. // common setup below. case gc.OADD, gc.OSUB, gc.OMUL, gc.OLROT, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR: break } l := n.Left r := n.Right if !l.Addable { var t1 gc.Node gc.Tempname(&t1, l.Type) gc.Cgen(l, &t1) l = &t1 } if r != nil && !r.Addable { var t2 gc.Node gc.Tempname(&t2, r.Type) gc.Cgen(r, &t2) r = &t2 } var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX) // Setup for binary operation. var hi1 gc.Node var lo1 gc.Node split64(l, &lo1, &hi1) var lo2 gc.Node var hi2 gc.Node if gc.Is64(r.Type) { split64(r, &lo2, &hi2) } // Do op. Leave result in DX:AX. switch n.Op { // TODO: Constants case gc.OADD: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AADDL, &lo2, &ax) gins(x86.AADCL, &hi2, &dx) // TODO: Constants. case gc.OSUB: gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.ASUBL, &lo2, &ax) gins(x86.ASBBL, &hi2, &dx) // let's call the next two EX and FX. case gc.OMUL: var ex gc.Node gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil) var fx gc.Node gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil) // load args into DX:AX and EX:CX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(x86.AMOVL, &lo2, &cx) gins(x86.AMOVL, &hi2, &ex) // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply. gins(x86.AMOVL, &dx, &fx) gins(x86.AORL, &ex, &fx) p1 := gc.Gbranch(x86.AJNE, nil, 0) gins(x86.AMULL, &cx, nil) // implicit &ax p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // full 64x64 -> 64, from 32x32 -> 64. gins(x86.AIMULL, &cx, &dx) gins(x86.AMOVL, &ax, &fx) gins(x86.AIMULL, &ex, &fx) gins(x86.AADDL, &dx, &fx) gins(x86.AMOVL, &cx, &dx) gins(x86.AMULL, &dx, nil) // implicit &ax gins(x86.AADDL, &fx, &dx) gc.Patch(p2, gc.Pc) gc.Regfree(&ex) gc.Regfree(&fx) // We only rotate by a constant c in [0,64). // if c >= 32: // lo, hi = hi, lo // c -= 32 // if c == 0: // no-op // else: // t = hi // shld hi:lo, c // shld lo:t, c case gc.OLROT: v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 32 { // reverse during load to do the first 32 bits of rotate v -= 32 gins(x86.AMOVL, &lo1, &dx) gins(x86.AMOVL, &hi1, &ax) } else { gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) } if v == 0 { } else // done { gins(x86.AMOVL, &dx, &cx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_CX // double-width shift p1.From.Scale = 0 } case gc.OLSH: if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&lo1, &hi2) if v > 32 { gins(x86.ASHLL, ncon(uint32(v-32)), &hi2) } gins(x86.AMOVL, ncon(0), &lo2) splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, ncon(uint32(v)), &ax) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) gc.Patch(p2, gc.Pc) // if shift count is >= 32, zero low. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &ax, &dx) gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count gins(x86.AXORL, &ax, &ax) p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHLL, &cx, &dx) p1.From.Index = x86.REG_AX // double-width shift p1.From.Scale = 0 gins(x86.ASHLL, &cx, &ax) gc.Patch(p2, gc.Pc) case gc.ORSH: if r.Op == gc.OLITERAL { v := uint64(gc.Mpgetfix(r.Val.U.Xval)) if v >= 64 { if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo2, &hi2) if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &lo2) gins(x86.ASARL, ncon(31), &lo2) gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &lo2) gins(x86.AMOVL, ncon(0), &hi2) } splitclean() return } if v >= 32 { if gc.Is64(r.Type) { splitclean() } split64(res, &lo2, &hi2) gmove(&hi1, &lo2) if v > 32 { gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2) } if hi1.Type.Etype == gc.TINT32 { gmove(&hi1, &hi2) gins(x86.ASARL, ncon(31), &hi2) } else { gins(x86.AMOVL, ncon(0), &hi2) } splitclean() splitclean() return } // general shift gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx) break } // load value into DX:AX. gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) // load shift value into register. // if high bits are set, zero value. var p1 *obj.Prog if gc.Is64(r.Type) { gins(x86.ACMPL, &hi2, ncon(0)) p1 = gc.Gbranch(x86.AJNE, nil, +1) gins(x86.AMOVL, &lo2, &cx) } else { cx.Type = gc.Types[gc.TUINT32] gmove(r, &cx) } // if shift count is >=64, zero or sign-extend value gins(x86.ACMPL, &cx, ncon(64)) p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) if p1 != nil { gc.Patch(p1, gc.Pc) } if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, ncon(31), &dx) gins(x86.AMOVL, &dx, &ax) } else { gins(x86.AXORL, &dx, &dx) gins(x86.AXORL, &ax, &ax) } gc.Patch(p2, gc.Pc) // if shift count is >= 32, sign-extend hi. gins(x86.ACMPL, &cx, ncon(32)) p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1) gins(x86.AMOVL, &dx, &ax) if hi1.Type.Etype == gc.TINT32 { gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count gins(x86.ASARL, ncon(31), &dx) } else { gins(x86.ASHRL, &cx, &ax) gins(x86.AXORL, &dx, &dx) } p2 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) // general shift p1 = gins(x86.ASHRL, &cx, &ax) p1.From.Index = x86.REG_DX // double-width shift p1.From.Scale = 0 gins(optoas(gc.ORSH, hi1.Type), &cx, &dx) gc.Patch(p2, gc.Pc) // make constant the right side (it usually is anyway). case gc.OXOR, gc.OAND, gc.OOR: if lo1.Op == gc.OLITERAL { nswap(&lo1, &lo2) nswap(&hi1, &hi2) } if lo2.Op == gc.OLITERAL { // special cases for constants. lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval)) hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval)) splitclean() // right side split64(res, &lo2, &hi2) switch n.Op { case gc.OXOR: gmove(&lo1, &lo2) gmove(&hi1, &hi2) switch lv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &lo2) default: gins(x86.AXORL, ncon(lv), &lo2) } switch hv { case 0: break case 0xffffffff: gins(x86.ANOTL, nil, &hi2) default: gins(x86.AXORL, ncon(hv), &hi2) } case gc.OAND: switch lv { case 0: gins(x86.AMOVL, ncon(0), &lo2) default: gmove(&lo1, &lo2) if lv != 0xffffffff { gins(x86.AANDL, ncon(lv), &lo2) } } switch hv { case 0: gins(x86.AMOVL, ncon(0), &hi2) default: gmove(&hi1, &hi2) if hv != 0xffffffff { gins(x86.AANDL, ncon(hv), &hi2) } } case gc.OOR: switch lv { case 0: gmove(&lo1, &lo2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &lo2) default: gmove(&lo1, &lo2) gins(x86.AORL, ncon(lv), &lo2) } switch hv { case 0: gmove(&hi1, &hi2) case 0xffffffff: gins(x86.AMOVL, ncon(0xffffffff), &hi2) default: gmove(&hi1, &hi2) gins(x86.AORL, ncon(hv), &hi2) } } splitclean() splitclean() return } gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &hi1, &dx) gins(optoas(int(n.Op), lo1.Type), &lo2, &ax) gins(optoas(int(n.Op), lo1.Type), &hi2, &dx) } if gc.Is64(r.Type) { splitclean() } splitclean() split64(res, &lo1, &hi1) gins(x86.AMOVL, &ax, &lo1) gins(x86.AMOVL, &dx, &hi1) splitclean() }
func anyregalloc() bool { var j int for i := 0; i < len(reg); i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } return false } var regpc [REGALLOC_FMAX + 1]uint32 /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if false && gc.Debug['r'] != 0 { fixfree := 0 for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { fixfree++ } } floatfree := 0 for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { floatfree++ } } fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree) } if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) if gc.Is64(t) { gc.Fatal("regalloc: 64 bit type %v") } var i int switch et { case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TPTR32, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= REGALLOC_R0 && i <= REGALLOC_RMAX { goto out } } for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { regpc[i] = uint32(obj.Getcallerpc(&n)) goto out } } fmt.Printf("registers allocated at\n") for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { fmt.Printf("%d %p\n", i, regpc[i]) } gc.Fatal("out of fixed registers") goto err case gc.TFLOAT32, gc.TFLOAT64: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= REGALLOC_F0 && i <= REGALLOC_FMAX { goto out } } for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { goto out } } gc.Fatal("out of floating point registers") goto err case gc.TCOMPLEX64, gc.TCOMPLEX128: gc.Tempname(n, t) return } gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0)) err: gc.Nodreg(n, t, arm.REG_R0) return out: reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if false && gc.Debug['r'] != 0 { fixfree := 0 for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { fixfree++ } } floatfree := 0 for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { floatfree++ } } fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree) } if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == arm.REGSP { return } if i < 0 || i >= len(reg) || i >= len(regpc) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg %v not allocated", obj.Rconv(i)) } reg[i]-- if reg[i] == 0 { regpc[i] = 0 } } /* * return constant i node. * overwritten by next call, but useful in calls to gins. */ var ncon_n gc.Node func ncon(i uint32) *gc.Node { if ncon_n.Type == nil { gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) } gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) return &ncon_n } var sclean [10]gc.Node var nsclean int /* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } } func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { regfree(&sclean[nsclean]) } } func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands; // except 64-bit, which always copies via registers anyway. var a int var r1 gc.Node if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) regfree(&r1) return case gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) regfree(&r1) return } f = &con ft = gc.Simsimtype(con.Type) // constants can't move directly to memory if gc.Ismem(t) && !gc.Is64(t.Type) { goto hard } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8: // same size if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8: a = arm.AMOVBS case gc.TUINT8<<16 | gc.TUINT8: if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = arm.AMOVBU case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8: a = arm.AMOVBS goto trunc64 case gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm.AMOVBU goto trunc64 case gc.TINT16<<16 | gc.TINT16: // same size if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16: a = arm.AMOVHS case gc.TUINT16<<16 | gc.TUINT16: if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = arm.AMOVHU case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16: a = arm.AMOVHS goto trunc64 case gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm.AMOVHU goto trunc64 case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = arm.AMOVW case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node regalloc(&r1, t.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &r1, t) regfree(&r1) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node regalloc(&r1, flo.Type, nil) var r2 gc.Node regalloc(&r2, fhi.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &fhi, &r2) gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) regfree(&r1) regfree(&r2) splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = arm.AMOVBS goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = arm.AMOVBU goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = arm.AMOVHS goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = arm.AMOVHU goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node regalloc(&r1, tlo.Type, nil) var r2 gc.Node regalloc(&r2, thi.Type, nil) gmove(f, &r1) p1 := gins(arm.AMOVW, &r1, &r2) p1.From.Type = obj.TYPE_SHIFT p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31 p1.From.Reg = 0 //print("gmove: %P\n", p1); gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) regfree(&r1) regfree(&r2) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) var r1 gc.Node regalloc(&r1, thi.Type, nil) gins(arm.AMOVW, ncon(0), &r1) gins(arm.AMOVW, &r1, &thi) regfree(&r1) splitclean() return // case CASE(TFLOAT64, TUINT64): /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TUINT32, // case CASE(TFLOAT32, TUINT64): gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TUINT32: fa := arm.AMOVF a := arm.AMOVFW if ft == gc.TFLOAT64 { fa = arm.AMOVD a = arm.AMOVDW } ta := arm.AMOVW switch tt { case gc.TINT8: ta = arm.AMOVBS case gc.TUINT8: ta = arm.AMOVBU case gc.TINT16: ta = arm.AMOVHS case gc.TUINT16: ta = arm.AMOVHU } var r1 gc.Node regalloc(&r1, gc.Types[ft], f) var r2 gc.Node regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to fpu p1 := gins(a, &r1, &r1) // convert to w switch tt { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(arm.AMOVW, &r1, &r2) // copy to cpu gins(ta, &r2, t) // store regfree(&r1) regfree(&r2) return /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: fa := arm.AMOVW switch ft { case gc.TINT8: fa = arm.AMOVBS case gc.TUINT8: fa = arm.AMOVBU case gc.TINT16: fa = arm.AMOVHS case gc.TUINT16: fa = arm.AMOVHU } a := arm.AMOVWF ta := arm.AMOVF if tt == gc.TFLOAT64 { a = arm.AMOVWD ta = arm.AMOVD } var r1 gc.Node regalloc(&r1, gc.Types[ft], f) var r2 gc.Node regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to cpu gins(arm.AMOVW, &r1, &r2) // copy to fpu p1 := gins(a, &r2, &r2) // convert switch ft { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(ta, &r2, t) // store regfree(&r1) regfree(&r2) return case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: gc.Fatal("gmove UINT64, TFLOAT not implemented") return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: var r1 gc.Node regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVF, f, &r1) gins(arm.AMOVFD, &r1, &r1) gins(arm.AMOVD, &r1, t) regfree(&r1) return case gc.TFLOAT64<<16 | gc.TFLOAT32: var r1 gc.Node regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVD, f, &r1) gins(arm.AMOVDF, &r1, &r1) gins(arm.AMOVF, &r1, t) regfree(&r1) return } gins(a, f, t) return // TODO(kaib): we almost always require a register dest anyway, this can probably be // removed. // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // truncate 64 bit integer trunc64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) regalloc(&r1, t.Type, nil) gins(a, &flo, &r1) gins(a, &r1, t) regfree(&r1) splitclean() return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // int32 v; if f != nil && f.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); if t != nil && t.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); var af obj.Addr var at obj.Addr if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } return p } /* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr a = gc.Naddr(n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) } else { gc.Fatal("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } } /* generate a comparison TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. */ func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) } p := gins(as, rhs, nil) raddr(lhs, p) return p } /* generate a constant shift * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. */ func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { if sval <= 0 || sval > 32 { gc.Fatal("bad shift value: %d", sval) } sval = sval & 0x1f p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15 return p } /* generate a register shift */ func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15 return p } /* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0)) /* case CASE(OADDR, TPTR32): a = ALEAL; break; case CASE(OADDR, TPTR64): a = ALEAQ; break; */ // TODO(kaib): make sure the conditional branches work on all edge cases case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = arm.ABEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = arm.ABNE case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = arm.ABLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64: a = arm.ABLO case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = arm.ABLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64: a = arm.ABLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64, gc.OGT<<16 | gc.TFLOAT32, gc.OGT<<16 | gc.TFLOAT64: a = arm.ABGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64: a = arm.ABHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64, gc.OGE<<16 | gc.TFLOAT32, gc.OGE<<16 | gc.TFLOAT64: a = arm.ABGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64: a = arm.ABHS case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TUINT8, gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TUINT16, gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TPTR32: a = arm.ACMP case gc.OCMP<<16 | gc.TFLOAT32: a = arm.ACMPF case gc.OCMP<<16 | gc.TFLOAT64: a = arm.ACMPD case gc.OAS<<16 | gc.TBOOL: a = arm.AMOVB case gc.OAS<<16 | gc.TINT8: a = arm.AMOVBS case gc.OAS<<16 | gc.TUINT8: a = arm.AMOVBU case gc.OAS<<16 | gc.TINT16: a = arm.AMOVHS case gc.OAS<<16 | gc.TUINT16: a = arm.AMOVHU case gc.OAS<<16 | gc.TINT32, gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = arm.AMOVW case gc.OAS<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.OAS<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8, gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16, gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32: a = arm.AADD case gc.OADD<<16 | gc.TFLOAT32: a = arm.AADDF case gc.OADD<<16 | gc.TFLOAT64: a = arm.AADDD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8, gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16, gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32: a = arm.ASUB case gc.OSUB<<16 | gc.TFLOAT32: a = arm.ASUBF case gc.OSUB<<16 | gc.TFLOAT64: a = arm.ASUBD case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8, gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16, gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32: a = arm.ARSB case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8, gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16, gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32: a = arm.AAND case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8, gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16, gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32: a = arm.AORR case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8, gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16, gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32: a = arm.AEOR case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8, gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16, gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32: a = arm.ASLL case gc.ORSH<<16 | gc.TUINT8, gc.ORSH<<16 | gc.TUINT16, gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32: a = arm.ASRL case gc.ORSH<<16 | gc.TINT8, gc.ORSH<<16 | gc.TINT16, gc.ORSH<<16 | gc.TINT32: a = arm.ASRA case gc.OMUL<<16 | gc.TUINT8, gc.OMUL<<16 | gc.TUINT16, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: a = arm.AMULU case gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT32: a = arm.AMUL case gc.OMUL<<16 | gc.TFLOAT32: a = arm.AMULF case gc.OMUL<<16 | gc.TFLOAT64: a = arm.AMULD case gc.ODIV<<16 | gc.TUINT8, gc.ODIV<<16 | gc.TUINT16, gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32: a = arm.ADIVU case gc.ODIV<<16 | gc.TINT8, gc.ODIV<<16 | gc.TINT16, gc.ODIV<<16 | gc.TINT32: a = arm.ADIV case gc.OMOD<<16 | gc.TUINT8, gc.OMOD<<16 | gc.TUINT16, gc.OMOD<<16 | gc.TUINT32, gc.OMOD<<16 | gc.TPTR32: a = arm.AMODU case gc.OMOD<<16 | gc.TINT8, gc.OMOD<<16 | gc.TINT16, gc.OMOD<<16 | gc.TINT32: a = arm.AMOD // case CASE(OEXTEND, TINT16): // a = ACWD; // break; // case CASE(OEXTEND, TINT32): // a = ACDQ; // break; // case CASE(OEXTEND, TINT64): // a = ACQO; // break; case gc.ODIV<<16 | gc.TFLOAT32: a = arm.ADIVF case gc.ODIV<<16 | gc.TFLOAT64: a = arm.ADIVD } return a } const ( ODynam = 1 << 0 OPtrto = 1 << 1 ) var clean [20]gc.Node var cleani int = 0 func sudoclean() { if clean[cleani-1].Op != gc.OEMPTY { regfree(&clean[cleani-1]) } if clean[cleani-2].Op != gc.OEMPTY { regfree(&clean[cleani-2]) } cleani -= 2 } func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false } /* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := gc.Mpgetfix(n.Val.U.Xval) if v >= 32000 || v <= -32000 { break } switch as { default: return false case arm.AADD, arm.ASUB, arm.AAND, arm.AORR, arm.AEOR, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVW: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY *a = gc.Naddr(n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable != 0 && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] *a = gc.Naddr(&n1) return true } regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { agen(nn, reg) n1.Xoffset = oary[0] } else { cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(arm.AMOVW, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Name = obj.NAME_NONE n1.Type = n.Type *a = gc.Naddr(&n1) return true case gc.OINDEX: return false } return false }
func anyregalloc() bool { var j int for i := x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } for i := x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] != 0 { return true } } return false } /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) var i int switch et { case gc.TINT64, gc.TUINT64: gc.Fatal("regalloc64") case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_AX && i <= x86.REG_DI { goto out } } for i = x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_AX; i <= x86.REG_DI; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of fixed registers") goto err case gc.TFLOAT32, gc.TFLOAT64: if gc.Use_sse == 0 { i = x86.REG_F0 goto out } if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_X0 && i <= x86.REG_X7 { goto out } } for i = x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_X0; i <= x86.REG_X7; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of floating registers") } gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0)) err: gc.Nodreg(n, t, 0) return out: if i == x86.REG_SP { fmt.Printf("alloc SP\n") } if reg[i] == 0 { regpc[i] = uint32(obj.Getcallerpc(&n)) if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP { gc.Dump("regalloc-o", o) gc.Fatal("regalloc %v", obj.Rconv(i)) } } reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == x86.REG_SP { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) { gc.Fatal("regfree %v", obj.Rconv(i)) } } /* * generate * as $c, reg */ func gconreg(as int, c int64, reg int) { var n1 gc.Node var n2 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) gc.Nodreg(&n2, gc.Types[gc.TINT64], reg) gins(as, &n1, &n2) } /* * swap node contents */ func nswap(a *gc.Node, b *gc.Node) { t := *a *a = *b *b = t } /* * return constant i node. * overwritten by next call, but useful in calls to gins. */ var ncon_n gc.Node func ncon(i uint32) *gc.Node { if ncon_n.Type == nil { gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) } gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) return &ncon_n } var sclean [10]gc.Node var nsclean int /* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } } func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { regfree(&sclean[nsclean]) } } /* * set up nodes representing fp constants */ var zerof gc.Node var two64f gc.Node var two63f gc.Node var bignodes_did int func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 two64f = *ncon(0) two64f.Type = gc.Types[gc.TFLOAT64] two64f.Val.Ctype = gc.CTFLT two64f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.) two63f = two64f two63f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.) zerof = two64f zerof.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(zerof.Val.U.Fval, 0) } func memname(n *gc.Node, t *gc.Type) { gc.Tempname(n, t) n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing n.Orig.Sym = n.Sym } func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a int if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) var r2 gc.Node gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &fhi, &r2) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r2, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) regfree(&r1) return // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } func floatmove(f *gc.Node, t *gc.Node) { var r1 gc.Node ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type // cannot have two floating point memory operands. if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: if gc.Use_sse != 0 { floatmove_sse(f, t) } else { floatmove_387(f, t) } return // float to very long integer. case gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64: if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: if !gc.Ismem(f) { cvt = f.Type goto hardmem } bignodes() var f0 gc.Node gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0) var f1 gc.Node gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1) var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &f0) } else { gins(x86.AFMOVD, f, &f0) } // if 0 > v { answer = 0 } gins(x86.AFMOVD, &zerof, &f0) gins(x86.AFUCOMIP, &f0, &f1) p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) // if 1<<64 <= v { answer = 0 too } gins(x86.AFMOVD, &two64f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) gc.Patch(p1, gc.Pc) gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gins(x86.AMOVL, ncon(0), &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) // in range; algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) // actual work gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0) gins(x86.AFMOVVP, &f0, t) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFSUBDP, &f0, &f1) gins(x86.AFMOVVP, &f0, t) split64(t, &tlo, &thi) gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63 gc.Patch(p3, gc.Pc) splitclean() // restore rounding mode gins(x86.AFLDCW, &t1, nil) gc.Patch(p1, gc.Pc) return /* * integer to float */ case gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op == gc.OREGISTER { goto hardmem } var f0 gc.Node gc.Nodreg(&f0, t.Type, x86.REG_F0) gins(x86.AFMOVV, f, &f0) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &f0, t) } else { gins(x86.AFMOVDP, &f0, t) } return // algorithm is: // if small enough, use native int64 -> float64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) var t1 gc.Node gc.Tempname(&t1, f.Type) var tlo gc.Node var thi gc.Node split64(&t1, &tlo, &thi) gmove(f, &t1) gins(x86.ACMPL, &thi, ncon(0)) p1 := gc.Gbranch(x86.AJLT, nil, 0) // native var r1 gc.Node gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) gins(x86.AFMOVV, &t1, &r1) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } p2 := gc.Gbranch(obj.AJMP, nil, 0) // simulated gc.Patch(p1, gc.Pc) gmove(&tlo, &ax) gmove(&thi, &dx) p1 = gins(x86.ASHRL, ncon(1), &ax) p1.From.Index = x86.REG_DX // double-width shift DX -> AX p1.From.Scale = 0 gins(x86.AMOVL, ncon(0), &cx) gins(x86.ASETCC, nil, &cx) gins(x86.AORL, &cx, &ax) gins(x86.ASHRL, ncon(1), &dx) gmove(&dx, &thi) gmove(&ax, &tlo) gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) var r2 gc.Node gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1) gins(x86.AFMOVV, &t1, &r1) gins(x86.AFMOVD, &r1, &r1) gins(x86.AFADDDP, &r1, &r2) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } gc.Patch(p2, gc.Pc) splitclean() return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return } func floatmove_387(f *gc.Node, t *gc.Node) { var r1 gc.Node var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type switch uint32(ft)<<16 | uint32(tt) { default: goto fatal /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT64: if t.Op == gc.OREGISTER { goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if f.Op != gc.OREGISTER { if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return // convert via int32. case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: var t1 gc.Node gc.Tempname(&t1, gc.Types[gc.TINT32]) gmove(f, &t1) switch tt { default: gc.Fatal("gmove %v", gc.Nconv(t, 0)) case gc.TINT8: gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1))) p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1) gins(x86.ACMPL, &t1, ncon(0x7f)) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gc.Patch(p2, gc.Pc) gmove(ncon(-0x80&(1<<32-1)), &t1) gc.Patch(p3, gc.Pc) gmove(&t1, t) case gc.TUINT8: gins(x86.ATESTL, ncon(0xffffff00), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) case gc.TUINT16: gins(x86.ATESTL, ncon(0xffff0000), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) } return // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem /* * integer to float */ case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op != gc.OREGISTER { goto hard } if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } switch ft { case gc.TINT16: a = x86.AFMOVW case gc.TINT32: a = x86.AFMOVL default: a = x86.AFMOVV } // convert via int32 memory case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hardmem // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem // The way the code generator uses floating-point // registers, a move from F0 to F0 is intended as a no-op. // On the x86, it's not: it pushes a second copy of F0 // on the floating point stack. So toss it away here. // Also, F0 is the *only* register we ever evaluate // into, so we should only see register/register as F0/F0. /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32, gc.TFLOAT64<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } a = x86.AFMOVF if ft == gc.TFLOAT64 { a = x86.AFMOVD } if gc.Ismem(t) { if f.Op != gc.OREGISTER || f.Val.U.Reg != x86.REG_F0 { gc.Fatal("gmove %v", gc.Nconv(f, 0)) } a = x86.AFMOVFP if ft == gc.TFLOAT64 { a = x86.AFMOVDP } } case gc.TFLOAT32<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } if f.Op == gc.OREGISTER { gins(x86.AFMOVDP, f, t) } else { gins(x86.AFMOVF, f, t) } return case gc.TFLOAT64<<16 | gc.TFLOAT32: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { var r1 gc.Node gc.Tempname(&r1, gc.Types[gc.TFLOAT32]) gins(x86.AFMOVFP, f, &r1) gins(x86.AFMOVF, &r1, t) return } if f.Op == gc.OREGISTER { gins(x86.AFMOVFP, f, t) } else { gins(x86.AFMOVD, f, t) } return } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // should not happen fatal: gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) return } func floatmove_sse(f *gc.Node, t *gc.Node) { var r1 gc.Node var cvt *gc.Type var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) switch uint32(ft)<<16 | uint32(tt) { // should not happen default: gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return // convert via int32. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst // convert via int32 memory /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // requires register destination rdst: regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER { gc.Fatal("gins MOVF reg, reg") } if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL { gc.Fatal("gins CVTSD2SS const") } if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == x86.REG_F0 { gc.Fatal("gins MOVSD into F0") } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAL: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0)) } } var af obj.Addr var at obj.Addr if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := 0 switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 } if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) { gc.Dump("bad width from:", f) gc.Dump("bad width to:", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatal("bad use of addr: %v", p) } return p } func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false } func sudoclean() { } func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { *a = obj.Addr{} return false }
/* * allocate a register (reusing res if possible) and generate * a = &n * The caller must call regfree(a). * The generated code checks that the result is not nil. */ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("\nagenr-n", n) } nl := n.Left nr := n.Right switch n.Op { case gc.ODOT, gc.ODOTPTR, gc.OCALLFUNC, gc.OCALLMETH, gc.OCALLINTER: var n1 gc.Node igen(n, &n1, res) regalloc(a, gc.Types[gc.Tptr], &n1) agen(&n1, a) regfree(&n1) case gc.OIND: cgenr(n.Left, a, res) gc.Cgen_checknil(a) case gc.OINDEX: freelen := 0 w := uint64(n.Type.Width) // Generate the non-addressable child first. var n3 gc.Node var nlen gc.Node var tmp gc.Node var n1 gc.Node if nr.Addable != 0 { goto irad } if nl.Addable != 0 { cgenr(nr, &n1, nil) if !gc.Isconst(nl, gc.CTSTR) { if gc.Isfixedarray(nl.Type) { agenr(nl, &n3, res) } else { igen(nl, &nlen, res) freelen = 1 nlen.Type = gc.Types[gc.Tptr] nlen.Xoffset += int64(gc.Array_array) regalloc(&n3, gc.Types[gc.Tptr], res) gmove(&nlen, &n3) nlen.Type = gc.Types[gc.Simtype[gc.TUINT]] nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) } } goto index } gc.Tempname(&tmp, nr.Type) cgen(nr, &tmp) nr = &tmp irad: if !gc.Isconst(nl, gc.CTSTR) { if gc.Isfixedarray(nl.Type) { agenr(nl, &n3, res) } else { if nl.Addable == 0 { // igen will need an addressable node. var tmp2 gc.Node gc.Tempname(&tmp2, nl.Type) cgen(nl, &tmp2) nl = &tmp2 } igen(nl, &nlen, res) freelen = 1 nlen.Type = gc.Types[gc.Tptr] nlen.Xoffset += int64(gc.Array_array) regalloc(&n3, gc.Types[gc.Tptr], res) gmove(&nlen, &n3) nlen.Type = gc.Types[gc.Simtype[gc.TUINT]] nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array) } } if !gc.Isconst(nr, gc.CTINT) { cgenr(nr, &n1, nil) } goto index // &a is in &n3 (allocated in res) // i is in &n1 (if not constant) // len(a) is in nlen (if needed) // w is width // constant index index: if gc.Isconst(nr, gc.CTINT) { if gc.Isconst(nl, gc.CTSTR) { gc.Fatal("constant string constant index") // front end should handle } v := uint64(gc.Mpgetfix(nr.Val.U.Xval)) if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING { if gc.Debug['B'] == 0 && !n.Bounded { var n2 gc.Node gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v)) if gc.Smallintconst(nr) { gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2) } else { regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil) gmove(&n2, &tmp) gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp) regfree(&tmp) } p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1) ginscall(gc.Panicindex, -1) gc.Patch(p1, gc.Pc) } regfree(&nlen) } if v*w != 0 { ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3) } *a = n3 break } // type of the index t := gc.Types[gc.TUINT64] if gc.Issigned[n1.Type.Etype] { t = gc.Types[gc.TINT64] } var n2 gc.Node regalloc(&n2, t, &n1) // i gmove(&n1, &n2) regfree(&n1) if gc.Debug['B'] == 0 && !n.Bounded { // check bounds t = gc.Types[gc.Simtype[gc.TUINT]] if gc.Is64(nr.Type) { t = gc.Types[gc.TUINT64] } if gc.Isconst(nl, gc.CTSTR) { gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval))) } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING { if gc.Is64(nr.Type) { var n5 gc.Node regalloc(&n5, t, nil) gmove(&nlen, &n5) regfree(&nlen) nlen = n5 } } else { gc.Nodconst(&nlen, t, nl.Type.Bound) if !gc.Smallintconst(&nlen) { var n5 gc.Node regalloc(&n5, t, nil) gmove(&nlen, &n5) nlen = n5 freelen = 1 } } gins(optoas(gc.OCMP, t), &n2, &nlen) p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1) ginscall(gc.Panicindex, -1) gc.Patch(p1, gc.Pc) } if gc.Isconst(nl, gc.CTSTR) { regalloc(&n3, gc.Types[gc.Tptr], res) p1 := gins(x86.ALEAQ, nil, &n3) gc.Datastring(nl.Val.U.Sval, &p1.From) gins(x86.AADDQ, &n2, &n3) goto indexdone } if w == 0 { } else // nothing to do if w == 1 || w == 2 || w == 4 || w == 8 { p1 := gins(x86.ALEAQ, &n2, &n3) p1.From.Type = obj.TYPE_MEM p1.From.Scale = int16(w) p1.From.Index = p1.From.Reg p1.From.Reg = p1.To.Reg } else { ginscon(optoas(gc.OMUL, t), int64(w), &n2) gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3) } indexdone: *a = n3 regfree(&n2) if freelen != 0 { regfree(&nlen) } default: regalloc(a, gc.Types[gc.Tptr], res) agen(n, a) } }
/* * generate: * res = n; * simplifies and calls gmove. */ func cgen(n *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("\ncgen-n", n) gc.Dump("cgen-res", res) } if n == nil || n.Type == nil { return } if res == nil || res.Type == nil { gc.Fatal("cgen: res nil") } switch n.Op { case gc.OSLICE, gc.OSLICEARR, gc.OSLICESTR, gc.OSLICE3, gc.OSLICE3ARR: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_slice(n, &n1) cgen(&n1, res) } else { gc.Cgen_slice(n, res) } return case gc.OEFACE: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_eface(n, &n1) cgen(&n1, res) } else { gc.Cgen_eface(n, res) } return } for n.Op == gc.OCONVNOP { n = n.Left } if n.Ullman >= gc.UINF { if n.Op == gc.OINDREG { gc.Fatal("cgen: this is going to misscompile") } if res.Ullman >= gc.UINF { var n1 gc.Node gc.Tempname(&n1, n.Type) cgen(n, &n1) cgen(&n1, res) return } } if gc.Isfat(n.Type) { if n.Type.Width < 0 { gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0)) } sgen(n, res, n.Type.Width) return } // update addressability for string, slice // can't do in walk because n->left->addable // changes if n->left is an escaping local variable. switch n.Op { case gc.OSPTR, gc.OLEN: if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) { n.Addable = n.Left.Addable } case gc.OCAP: if gc.Isslice(n.Left.Type) { n.Addable = n.Left.Addable } case gc.OITAB: n.Addable = n.Left.Addable } // if both are addressable, move if n.Addable != 0 && res.Addable != 0 { if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] || gc.Iscomplex[res.Type.Etype] { gmove(n, res) } else { var n1 gc.Node regalloc(&n1, n.Type, nil) gmove(n, &n1) cgen(&n1, res) regfree(&n1) } return } // if both are not addressable, use a temporary. if n.Addable == 0 && res.Addable == 0 { // could use regalloc here sometimes, // but have to check for ullman >= UINF. var n1 gc.Node gc.Tempname(&n1, n.Type) cgen(n, &n1) cgen(&n1, res) return } // if result is not addressable directly but n is, // compute its address and then store via the address. if res.Addable == 0 { var n1 gc.Node igen(res, &n1, nil) cgen(n, &n1) regfree(&n1) return } if gc.Complexop(n, res) { gc.Complexgen(n, res) return } // if n is sudoaddable generate addr and move if !gc.Is64(n.Type) && !gc.Is64(res.Type) && !gc.Iscomplex[n.Type.Etype] && !gc.Iscomplex[res.Type.Etype] { a := optoas(gc.OAS, n.Type) var w int var addr obj.Addr if sudoaddable(a, n, &addr, &w) { if res.Op != gc.OREGISTER { var n2 gc.Node regalloc(&n2, res.Type, nil) p1 := gins(a, nil, &n2) p1.From = addr if gc.Debug['g'] != 0 { fmt.Printf("%v [ignore previous line]\n", p1) } gmove(&n2, res) regfree(&n2) } else { p1 := gins(a, nil, res) p1.From = addr if gc.Debug['g'] != 0 { fmt.Printf("%v [ignore previous line]\n", p1) } } sudoclean() return } } // otherwise, the result is addressable but n is not. // let's do some computation. nl := n.Left nr := n.Right if nl != nil && nl.Ullman >= gc.UINF { if nr != nil && nr.Ullman >= gc.UINF { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) n2 := *n n2.Left = &n1 cgen(&n2, res) return } } // 64-bit ops are hard on 32-bit machine. if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) { switch n.Op { // math goes to cgen64. case gc.OMINUS, gc.OCOM, gc.OADD, gc.OSUB, gc.OMUL, gc.OLROT, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR: cgen64(n, res) return } } var a int var f0 gc.Node var n1 gc.Node var n2 gc.Node if nl != nil && gc.Isfloat[n.Type.Etype] && gc.Isfloat[nl.Type.Etype] { // floating-point. regalloc(&f0, nl.Type, res) if nr != nil { goto flt2 } if n.Op == gc.OMINUS { nr = gc.Nodintconst(-1) gc.Convlit(&nr, n.Type) n.Op = gc.OMUL goto flt2 } // unary cgen(nl, &f0) if n.Op != gc.OCONV && n.Op != gc.OPLUS { gins(optoas(int(n.Op), n.Type), &f0, &f0) } gmove(&f0, res) regfree(&f0) return } switch n.Op { default: gc.Dump("cgen", n) gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign)) case gc.OREAL, gc.OIMAG, gc.OCOMPLEX: gc.Fatal("unexpected complex") // these call bgen to get a bool value case gc.OOROR, gc.OANDAND, gc.OEQ, gc.ONE, gc.OLT, gc.OLE, gc.OGE, gc.OGT, gc.ONOT: p1 := gc.Gbranch(arm.AB, nil, 0) p2 := gc.Pc gmove(gc.Nodbool(true), res) p3 := gc.Gbranch(arm.AB, nil, 0) gc.Patch(p1, gc.Pc) bgen(n, true, 0, p2) gmove(gc.Nodbool(false), res) gc.Patch(p3, gc.Pc) return case gc.OPLUS: cgen(nl, res) return // unary case gc.OCOM: a := optoas(gc.OXOR, nl.Type) regalloc(&n1, nl.Type, nil) cgen(nl, &n1) gc.Nodconst(&n2, nl.Type, -1) gins(a, &n2, &n1) goto norm case gc.OMINUS: regalloc(&n1, nl.Type, nil) cgen(nl, &n1) gc.Nodconst(&n2, nl.Type, 0) gins(optoas(gc.OMINUS, nl.Type), &n2, &n1) goto norm // symmetric binary case gc.OAND, gc.OOR, gc.OXOR, gc.OADD, gc.OMUL: a = optoas(int(n.Op), nl.Type) // symmetric binary if nl.Ullman < nr.Ullman { r := nl nl = nr nr = r } goto abop // asymmetric binary case gc.OSUB: a = optoas(int(n.Op), nl.Type) goto abop case gc.OHMUL: cgen_hmul(nl, nr, res) case gc.OLROT, gc.OLSH, gc.ORSH: cgen_shift(int(n.Op), n.Bounded, nl, nr, res) case gc.OCONV: if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) { cgen(nl, res) break } var n1 gc.Node if nl.Addable != 0 && !gc.Is64(nl.Type) { regalloc(&n1, nl.Type, res) gmove(nl, &n1) } else { if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] { gc.Tempname(&n1, nl.Type) } else { regalloc(&n1, nl.Type, res) } cgen(nl, &n1) } var n2 gc.Node if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] { gc.Tempname(&n2, n.Type) } else { regalloc(&n2, n.Type, nil) } gmove(&n1, &n2) gmove(&n2, res) if n1.Op == gc.OREGISTER { regfree(&n1) } if n2.Op == gc.OREGISTER { regfree(&n2) } case gc.ODOT, gc.ODOTPTR, gc.OINDEX, gc.OIND, gc.ONAME: // PHEAP or PPARAMREF var var n1 gc.Node igen(n, &n1, res) gmove(&n1, res) regfree(&n1) // interface table is first word of interface value case gc.OITAB: var n1 gc.Node igen(nl, &n1, res) n1.Type = n.Type gmove(&n1, res) regfree(&n1) // pointer is the first word of string or slice. case gc.OSPTR: if gc.Isconst(nl, gc.CTSTR) { var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) p1 := gins(arm.AMOVW, nil, &n1) gc.Datastring(nl.Val.U.Sval, &p1.From) gmove(&n1, res) regfree(&n1) break } var n1 gc.Node igen(nl, &n1, res) n1.Type = n.Type gmove(&n1, res) regfree(&n1) case gc.OLEN: if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) { // map has len in the first 32-bit word. // a zero pointer means zero length var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) cgen(nl, &n1) var n2 gc.Node gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1) n2 = n1 n2.Op = gc.OINDREG n2.Type = gc.Types[gc.TINT32] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) { // both slice and string have len one pointer into the struct. var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.TUINT32] n1.Xoffset += int64(gc.Array_nel) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OCAP: if gc.Istype(nl.Type, gc.TCHAN) { // chan has cap in the second 32-bit word. // a zero pointer means zero length var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) cgen(nl, &n1) var n2 gc.Node gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1) n2 = n1 n2.Op = gc.OINDREG n2.Xoffset = 4 n2.Type = gc.Types[gc.TINT32] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Isslice(nl.Type) { var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.TUINT32] n1.Xoffset += int64(gc.Array_cap) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OADDR: agen(nl, res) // Release res so that it is available for cgen_call. // Pick it up again after the call. case gc.OCALLMETH, gc.OCALLFUNC: rg := -1 if n.Ullman >= gc.UINF { if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) { rg = int(res.Val.U.Reg) reg[rg]-- } } if n.Op == gc.OCALLMETH { gc.Cgen_callmeth(n, 0) } else { cgen_call(n, 0) } if rg >= 0 { reg[rg]++ } cgen_callret(n, res) case gc.OCALLINTER: cgen_callinter(n, res, 0) cgen_callret(n, res) case gc.OMOD, gc.ODIV: a = optoas(int(n.Op), nl.Type) goto abop } return // TODO(kaib): use fewer registers here. abop: // asymmetric binary if nl.Ullman >= nr.Ullman { regalloc(&n1, nl.Type, res) cgen(nl, &n1) switch n.Op { case gc.OADD, gc.OSUB, gc.OAND, gc.OOR, gc.OXOR: if gc.Smallintconst(nr) { n2 = *nr break } fallthrough default: regalloc(&n2, nr.Type, nil) cgen(nr, &n2) } } else { switch n.Op { case gc.OADD, gc.OSUB, gc.OAND, gc.OOR, gc.OXOR: if gc.Smallintconst(nr) { n2 = *nr break } fallthrough default: regalloc(&n2, nr.Type, res) cgen(nr, &n2) } regalloc(&n1, nl.Type, nil) cgen(nl, &n1) } gins(a, &n2, &n1) // Normalize result for types smaller than word. norm: if n.Type.Width < int64(gc.Widthptr) { switch n.Op { case gc.OADD, gc.OSUB, gc.OMUL, gc.OCOM, gc.OMINUS: gins(optoas(gc.OAS, n.Type), &n1, &n1) } } gmove(&n1, res) regfree(&n1) if n2.Op != gc.OLITERAL { regfree(&n2) } return flt2: // binary var f1 gc.Node if nl.Ullman >= nr.Ullman { cgen(nl, &f0) regalloc(&f1, n.Type, nil) gmove(&f0, &f1) cgen(nr, &f0) gins(optoas(int(n.Op), n.Type), &f0, &f1) } else { cgen(nr, &f0) regalloc(&f1, n.Type, nil) cgen(nl, &f1) gins(optoas(int(n.Op), n.Type), &f0, &f1) } gmove(&f1, res) regfree(&f0) regfree(&f1) return }
/* * generate: * if(n == true) goto to; */ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) { if gc.Debug['g'] != 0 { gc.Dump("\nbgen", n) } if n == nil { n = gc.Nodbool(true) } if n.Ninit != nil { gc.Genlist(n.Ninit) } if n.Type == nil { gc.Convlit(&n, gc.Types[gc.TBOOL]) if n.Type == nil { return } } et := int(n.Type.Etype) if et != gc.TBOOL { gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0)) gc.Patch(gins(obj.AEND, nil, nil), to) return } var nr *gc.Node var nl *gc.Node switch n.Op { default: a := gc.ONE if !true_ { a = gc.OEQ } gencmp0(n, n.Type, a, likely, to) return // need to ask if it is bool? case gc.OLITERAL: if !true_ == (n.Val.U.Bval == 0) { gc.Patch(gc.Gbranch(arm.AB, nil, 0), to) } return case gc.OANDAND, gc.OOROR: if (n.Op == gc.OANDAND) == true_ { p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) bgen(n.Left, !true_, -likely, p2) bgen(n.Right, !true_, -likely, p2) p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, to) gc.Patch(p2, gc.Pc) } else { bgen(n.Left, true_, likely, to) bgen(n.Right, true_, likely, to) } return case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: nr = n.Right if nr == nil || nr.Type == nil { return } fallthrough case gc.ONOT: // unary nl = n.Left if nl == nil || nl.Type == nil { return } } switch n.Op { case gc.ONOT: bgen(nl, !true_, likely, to) return case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: a := int(n.Op) if !true_ { if gc.Isfloat[nl.Type.Etype] { // brcom is not valid on floats when NaN is involved. p1 := gc.Gbranch(arm.AB, nil, 0) p2 := gc.Gbranch(arm.AB, nil, 0) gc.Patch(p1, gc.Pc) ll := n.Ninit n.Ninit = nil bgen(n, true, -likely, p2) n.Ninit = ll gc.Patch(gc.Gbranch(arm.AB, nil, 0), to) gc.Patch(p2, gc.Pc) return } a = gc.Brcom(a) true_ = !true_ } // make simplest on right if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) { a = gc.Brrev(a) r := nl nl = nr nr = r } if gc.Isslice(nl.Type) { // only valid to cmp darray to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal array comparison") break } var n1 gc.Node igen(nl, &n1, nil) n1.Xoffset += int64(gc.Array_array) n1.Type = gc.Types[gc.Tptr] gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to) regfree(&n1) break } if gc.Isinter(nl.Type) { // front end shold only leave cmp to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal interface comparison") break } var n1 gc.Node igen(nl, &n1, nil) n1.Type = gc.Types[gc.Tptr] n1.Xoffset += 0 gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to) regfree(&n1) break } if gc.Iscomplex[nl.Type.Etype] { gc.Complexbool(a, nl, nr, true_, likely, to) break } if gc.Is64(nr.Type) { if nl.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) nl = &n1 } if nr.Addable == 0 { var n2 gc.Node gc.Tempname(&n2, nr.Type) cgen(nr, &n2) nr = &n2 } cmp64(nl, nr, a, likely, to) break } if nr.Op == gc.OLITERAL { if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 { gencmp0(nl, nl.Type, a, likely, to) break } if nr.Val.Ctype == gc.CTNIL { gencmp0(nl, nl.Type, a, likely, to) break } } a = optoas(a, nr.Type) if nr.Ullman >= gc.UINF { var n1 gc.Node regalloc(&n1, nl.Type, nil) cgen(nl, &n1) var tmp gc.Node gc.Tempname(&tmp, nl.Type) gmove(&n1, &tmp) regfree(&n1) var n2 gc.Node regalloc(&n2, nr.Type, nil) cgen(nr, &n2) regalloc(&n1, nl.Type, nil) cgen(&tmp, &n1) gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2) gc.Patch(gc.Gbranch(a, nr.Type, likely), to) regfree(&n1) regfree(&n2) break } var n3 gc.Node gc.Tempname(&n3, nl.Type) cgen(nl, &n3) var tmp gc.Node gc.Tempname(&tmp, nr.Type) cgen(nr, &tmp) var n1 gc.Node regalloc(&n1, nl.Type, nil) gmove(&n3, &n1) var n2 gc.Node regalloc(&n2, nr.Type, nil) gmove(&tmp, &n2) gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2) if gc.Isfloat[nl.Type.Etype] { if n.Op == gc.ONE { p1 := gc.Gbranch(arm.ABVS, nr.Type, likely) gc.Patch(gc.Gbranch(a, nr.Type, likely), to) gc.Patch(p1, to) } else { p1 := gc.Gbranch(arm.ABVS, nr.Type, -likely) gc.Patch(gc.Gbranch(a, nr.Type, likely), to) gc.Patch(p1, gc.Pc) } } else { gc.Patch(gc.Gbranch(a, nr.Type, likely), to) } regfree(&n1) regfree(&n2) } return }
/* * branch gen * if(n == true) goto to; */ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) { if gc.Debug['g'] != 0 { gc.Dump("\nbgen", n) } if n == nil { n = gc.Nodbool(true) } if n.Ninit != nil { gc.Genlist(n.Ninit) } if n.Type == nil { gc.Convlit(&n, gc.Types[gc.TBOOL]) if n.Type == nil { return } } et := int(n.Type.Etype) if et != gc.TBOOL { gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0)) gc.Patch(gins(obj.AEND, nil, nil), to) return } for n.Op == gc.OCONVNOP { n = n.Left if n.Ninit != nil { gc.Genlist(n.Ninit) } } nl := n.Left var nr *gc.Node if nl != nil && gc.Isfloat[nl.Type.Etype] { bgen_float(n, bool2int(true_), likely, to) return } switch n.Op { default: goto def // need to ask if it is bool? case gc.OLITERAL: if !true_ == (n.Val.U.Bval == 0) { gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to) } return case gc.ONAME: if n.Addable == 0 { goto def } var n1 gc.Node gc.Nodconst(&n1, n.Type, 0) gins(optoas(gc.OCMP, n.Type), n, &n1) a := x86.AJNE if !true_ { a = x86.AJEQ } gc.Patch(gc.Gbranch(a, n.Type, likely), to) return case gc.OANDAND, gc.OOROR: if (n.Op == gc.OANDAND) == true_ { p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) bgen(n.Left, !true_, -likely, p2) bgen(n.Right, !true_, -likely, p2) p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, to) gc.Patch(p2, gc.Pc) } else { bgen(n.Left, true_, likely, to) bgen(n.Right, true_, likely, to) } return case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: nr = n.Right if nr == nil || nr.Type == nil { return } fallthrough case gc.ONOT: // unary nl = n.Left if nl == nil || nl.Type == nil { return } } switch n.Op { case gc.ONOT: bgen(nl, !true_, likely, to) case gc.OEQ, gc.ONE, gc.OLT, gc.OGT, gc.OLE, gc.OGE: a := int(n.Op) if !true_ { a = gc.Brcom(a) true_ = !true_ } // make simplest on right if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) { a = gc.Brrev(a) r := nl nl = nr nr = r } if gc.Isslice(nl.Type) { // front end should only leave cmp to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal slice comparison") break } a = optoas(a, gc.Types[gc.Tptr]) var n1 gc.Node igen(nl, &n1, nil) n1.Xoffset += int64(gc.Array_array) n1.Type = gc.Types[gc.Tptr] var tmp gc.Node gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp) gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to) regfree(&n1) break } if gc.Isinter(nl.Type) { // front end should only leave cmp to literal nil if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL { gc.Yyerror("illegal interface comparison") break } a = optoas(a, gc.Types[gc.Tptr]) var n1 gc.Node igen(nl, &n1, nil) n1.Type = gc.Types[gc.Tptr] var tmp gc.Node gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp) gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to) regfree(&n1) break } if gc.Iscomplex[nl.Type.Etype] { gc.Complexbool(a, nl, nr, true_, likely, to) break } if gc.Is64(nr.Type) { if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) nl = &n1 } if nr.Addable == 0 { var n2 gc.Node gc.Tempname(&n2, nr.Type) cgen(nr, &n2) nr = &n2 } cmp64(nl, nr, a, likely, to) break } var n2 gc.Node if nr.Ullman >= gc.UINF { if nl.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) nl = &n1 } if nr.Addable == 0 { var tmp gc.Node gc.Tempname(&tmp, nr.Type) cgen(nr, &tmp) nr = &tmp } var n2 gc.Node regalloc(&n2, nr.Type, nil) cgen(nr, &n2) nr = &n2 goto cmp } if nl.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) nl = &n1 } if gc.Smallintconst(nr) { gins(optoas(gc.OCMP, nr.Type), nl, nr) gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to) break } if nr.Addable == 0 { var tmp gc.Node gc.Tempname(&tmp, nr.Type) cgen(nr, &tmp) nr = &tmp } regalloc(&n2, nr.Type, nil) gmove(nr, &n2) nr = &n2 cmp: gins(optoas(gc.OCMP, nr.Type), nl, nr) gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to) if nl.Op == gc.OREGISTER { regfree(nl) } regfree(nr) } return def: var n1 gc.Node regalloc(&n1, n.Type, nil) cgen(n, &n1) var n2 gc.Node gc.Nodconst(&n2, n.Type, 0) gins(optoas(gc.OCMP, n.Type), &n1, &n2) a := x86.AJNE if !true_ { a = x86.AJEQ } gc.Patch(gc.Gbranch(a, n.Type, likely), to) regfree(&n1) return }
/* * generate: * res = n; * simplifies and calls gmove. * * TODO: * sudoaddable */ func cgen(n *gc.Node, res *gc.Node) { if gc.Debug['g'] != 0 { gc.Dump("\ncgen-n", n) gc.Dump("cgen-res", res) } if n == nil || n.Type == nil { gc.Fatal("cgen: n nil") } if res == nil || res.Type == nil { gc.Fatal("cgen: res nil") } switch n.Op { case gc.OSLICE, gc.OSLICEARR, gc.OSLICESTR, gc.OSLICE3, gc.OSLICE3ARR: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_slice(n, &n1) cgen(&n1, res) } else { gc.Cgen_slice(n, res) } return case gc.OEFACE: if res.Op != gc.ONAME || res.Addable == 0 { var n1 gc.Node gc.Tempname(&n1, n.Type) gc.Cgen_eface(n, &n1) cgen(&n1, res) } else { gc.Cgen_eface(n, res) } return } for n.Op == gc.OCONVNOP { n = n.Left } // function calls on both sides? introduce temporary if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF { var n1 gc.Node gc.Tempname(&n1, n.Type) cgen(n, &n1) cgen(&n1, res) return } // structs etc get handled specially if gc.Isfat(n.Type) { if n.Type.Width < 0 { gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0)) } sgen(n, res, n.Type.Width) return } // update addressability for string, slice // can't do in walk because n->left->addable // changes if n->left is an escaping local variable. switch n.Op { case gc.OSPTR, gc.OLEN: if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) { n.Addable = n.Left.Addable } case gc.OCAP: if gc.Isslice(n.Left.Type) { n.Addable = n.Left.Addable } case gc.OITAB: n.Addable = n.Left.Addable } // if both are addressable, move if n.Addable != 0 && res.Addable != 0 { gmove(n, res) return } // if both are not addressable, use a temporary. if n.Addable == 0 && res.Addable == 0 { // could use regalloc here sometimes, // but have to check for ullman >= UINF. var n1 gc.Node gc.Tempname(&n1, n.Type) cgen(n, &n1) cgen(&n1, res) return } // if result is not addressable directly but n is, // compute its address and then store via the address. if res.Addable == 0 { var n1 gc.Node igen(res, &n1, nil) cgen(n, &n1) regfree(&n1) return } // complex types if gc.Complexop(n, res) { gc.Complexgen(n, res) return } // otherwise, the result is addressable but n is not. // let's do some computation. // use ullman to pick operand to eval first. nl := n.Left nr := n.Right if nl != nil && nl.Ullman >= gc.UINF { if nr != nil && nr.Ullman >= gc.UINF { // both are hard var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) n2 := *n n2.Left = &n1 cgen(&n2, res) return } } // 64-bit ops are hard on 32-bit machine. if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) { switch n.Op { // math goes to cgen64. case gc.OMINUS, gc.OCOM, gc.OADD, gc.OSUB, gc.OMUL, gc.OLROT, gc.OLSH, gc.ORSH, gc.OAND, gc.OOR, gc.OXOR: cgen64(n, res) return } } if nl != nil && gc.Isfloat[n.Type.Etype] && gc.Isfloat[nl.Type.Etype] { cgen_float(n, res) return } var a int switch n.Op { default: gc.Dump("cgen", n) gc.Fatal("cgen %v", gc.Oconv(int(n.Op), 0)) case gc.OREAL, gc.OIMAG, gc.OCOMPLEX: gc.Fatal("unexpected complex") return // these call bgen to get a bool value case gc.OOROR, gc.OANDAND, gc.OEQ, gc.ONE, gc.OLT, gc.OLE, gc.OGE, gc.OGT, gc.ONOT: p1 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Pc gmove(gc.Nodbool(true), res) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) bgen(n, true, 0, p2) gmove(gc.Nodbool(false), res) gc.Patch(p3, gc.Pc) return case gc.OPLUS: cgen(nl, res) return case gc.OMINUS, gc.OCOM: a := optoas(int(n.Op), nl.Type) // unary var n1 gc.Node gc.Tempname(&n1, nl.Type) cgen(nl, &n1) gins(a, nil, &n1) gmove(&n1, res) return // symmetric binary case gc.OAND, gc.OOR, gc.OXOR, gc.OADD, gc.OMUL: a = optoas(int(n.Op), nl.Type) if a == x86.AIMULB { cgen_bmul(int(n.Op), nl, nr, res) break } // symmetric binary if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL { r := nl nl = nr nr = r } goto abop // asymmetric binary case gc.OSUB: a = optoas(int(n.Op), nl.Type) goto abop case gc.OHMUL: cgen_hmul(nl, nr, res) case gc.OCONV: if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) { cgen(nl, res) break } var n2 gc.Node gc.Tempname(&n2, n.Type) var n1 gc.Node mgen(nl, &n1, res) gmove(&n1, &n2) gmove(&n2, res) mfree(&n1) case gc.ODOT, gc.ODOTPTR, gc.OINDEX, gc.OIND, gc.ONAME: // PHEAP or PPARAMREF var var n1 gc.Node igen(n, &n1, res) gmove(&n1, res) regfree(&n1) case gc.OITAB: var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR]) gmove(&n1, res) regfree(&n1) // pointer is the first word of string or slice. case gc.OSPTR: if gc.Isconst(nl, gc.CTSTR) { var n1 gc.Node regalloc(&n1, gc.Types[gc.Tptr], res) p1 := gins(x86.ALEAL, nil, &n1) gc.Datastring(nl.Val.U.Sval, &p1.From) gmove(&n1, res) regfree(&n1) break } var n1 gc.Node igen(nl, &n1, res) n1.Type = n.Type gmove(&n1, res) regfree(&n1) case gc.OLEN: if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) { // map has len in the first 32-bit word. // a zero pointer means zero length var n1 gc.Node gc.Tempname(&n1, gc.Types[gc.Tptr]) cgen(nl, &n1) var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], nil) gmove(&n1, &n2) n1 = n2 gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1) n2 = n1 n2.Op = gc.OINDREG n2.Type = gc.Types[gc.TINT32] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) { // both slice and string have len one pointer into the struct. var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.TUINT32] n1.Xoffset += int64(gc.Array_nel) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OCAP: if gc.Istype(nl.Type, gc.TCHAN) { // chan has cap in the second 32-bit word. // a zero pointer means zero length var n1 gc.Node gc.Tempname(&n1, gc.Types[gc.Tptr]) cgen(nl, &n1) var n2 gc.Node regalloc(&n2, gc.Types[gc.Tptr], nil) gmove(&n1, &n2) n1 = n2 gc.Nodconst(&n2, gc.Types[gc.Tptr], 0) gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2) p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1) n2 = n1 n2.Op = gc.OINDREG n2.Xoffset = 4 n2.Type = gc.Types[gc.TINT32] gmove(&n2, &n1) gc.Patch(p1, gc.Pc) gmove(&n1, res) regfree(&n1) break } if gc.Isslice(nl.Type) { var n1 gc.Node igen(nl, &n1, res) n1.Type = gc.Types[gc.TUINT32] n1.Xoffset += int64(gc.Array_cap) gmove(&n1, res) regfree(&n1) break } gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong)) case gc.OADDR: agen(nl, res) case gc.OCALLMETH: gc.Cgen_callmeth(n, 0) cgen_callret(n, res) case gc.OCALLINTER: cgen_callinter(n, res, 0) cgen_callret(n, res) case gc.OCALLFUNC: cgen_call(n, 0) cgen_callret(n, res) case gc.OMOD, gc.ODIV: cgen_div(int(n.Op), nl, nr, res) case gc.OLSH, gc.ORSH, gc.OLROT: cgen_shift(int(n.Op), n.Bounded, nl, nr, res) } return abop: // asymmetric binary if gc.Smallintconst(nr) { var n1 gc.Node mgen(nl, &n1, res) var n2 gc.Node regalloc(&n2, nl.Type, &n1) gmove(&n1, &n2) gins(a, nr, &n2) gmove(&n2, res) regfree(&n2) mfree(&n1) } else if nl.Ullman >= nr.Ullman { var nt gc.Node gc.Tempname(&nt, nl.Type) cgen(nl, &nt) var n2 gc.Node mgen(nr, &n2, nil) var n1 gc.Node regalloc(&n1, nl.Type, res) gmove(&nt, &n1) gins(a, &n2, &n1) gmove(&n1, res) regfree(&n1) mfree(&n2) } else { var n2 gc.Node regalloc(&n2, nr.Type, res) cgen(nr, &n2) var n1 gc.Node regalloc(&n1, nl.Type, nil) cgen(nl, &n1) gins(a, &n2, &n1) regfree(&n2) gmove(&n1, res) regfree(&n1) } return }