func anyregalloc() bool { var j int for i := int(0); i < len(reg); i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } return false } /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(int(gc.Simtype[t.Etype])) if gc.Debug['r'] != 0 { fixfree := int(0) fltfree := int(0) for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ { if reg[i-ppc64.REG_R0] == 0 { if i < ppc64.REG_F0 { fixfree++ } else { fltfree++ } } } fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree) } var i int switch et { case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TINT64, gc.TUINT64, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= ppc64.REGMIN && i <= ppc64.REGMAX { goto out } } for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ { if reg[i-ppc64.REG_R0] == 0 { regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n)) goto out } } gc.Flusherrors() for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ { fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0]) } gc.Fatal("out of fixed registers") case gc.TFLOAT32, gc.TFLOAT64: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX { goto out } } for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ { if reg[i-ppc64.REG_R0] == 0 { regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n)) goto out } } gc.Flusherrors() for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ { fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0]) } gc.Fatal("out of floating registers") case gc.TCOMPLEX64, gc.TCOMPLEX128: gc.Tempname(n, t) return } gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0)) return out: reg[i-ppc64.REG_R0]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(int(n.Val.U.Reg) - ppc64.REG_R0) if i == ppc64.REGSP-ppc64.REG_R0 { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 { regpc[i] = 0 } } /* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(ppc64.AMOVD, &n1, &ntmp) gins(as, &ntmp, n2) regfree(&ntmp) return } gins(as, &n1, n2) } /* * generate * as n, $c (CMP/CMPU) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatal("ginscon2") case ppc64.ACMP: if -ppc64.BIG <= c && c <= ppc64.BIG { gins(as, n2, &n1) return } case ppc64.ACMPU: if 0 <= c && c <= 2*ppc64.BIG { gins(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(ppc64.AMOVD, &n1, &ntmp) gins(as, n2, &ntmp) regfree(&ntmp) } /* * set up nodes representing 2^63 */ var bigi gc.Node var bigf gc.Node var bignodes_did int func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1) gc.Mpshiftfix(bigi.Val.U.Xval, 63) bigf = bigi bigf.Type = gc.Types[gc.TFLOAT64] bigf.Val.Ctype = gc.CTFLT bigf.Val.U.Fval = new(gc.Mpflt) gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval) } /* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := (*gc.Type)(t.Type) if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r2 gc.Node var r1 gc.Node var a int if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(ppc64.AMOVD, &con, &r1) gmove(&r1, t) regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // float constants come from memory. //if(isfloat[tt]) // goto hard; // 64-bit immediates are also from memory. //if(isint[tt]) // goto hard; //// 64-bit immediates are really 32-bit sign-extended //// unless moving into a register. //if(isint[tt]) { // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0) // goto hard; // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0) // goto hard; //} // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = ppc64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = ppc64.AMOVBZ case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = ppc64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = ppc64.AMOVHZ case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = ppc64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = ppc64.AMOVWZ case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = ppc64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = ppc64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = ppc64.AMOVBZ goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = ppc64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = ppc64.AMOVHZ goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = ppc64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = ppc64.AMOVWZ goto rdst //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32, gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: bignodes() var r1 gc.Node regalloc(&r1, gc.Types[ft], f) gmove(f, &r1) if tt == gc.TUINT64 { regalloc(&r2, gc.Types[gc.TFLOAT64], nil) gmove(&bigf, &r2) gins(ppc64.AFCMPU, &r1, &r2) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) gins(ppc64.AFSUB, &r2, &r1) gc.Patch(p1, gc.Pc) regfree(&r2) } regalloc(&r2, gc.Types[gc.TFLOAT64], nil) var r3 gc.Node regalloc(&r3, gc.Types[gc.TINT64], t) gins(ppc64.AFCTIDZ, &r1, &r2) p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil)) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AMOVD, nil, &r3) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 regfree(&r2) regfree(&r1) if tt == gc.TUINT64 { p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP) gins(ppc64.AMOVD, &bigi, &r1) gins(ppc64.AADD, &r1, &r3) gc.Patch(p1, gc.Pc) } gmove(&r3, t) regfree(&r3) return //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t); //return; // algorithm is: // if small enough, use native int64 -> uint64 conversion. // otherwise, halve (rounding to odd?), convert, and double. /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64, gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: bignodes() var r1 gc.Node regalloc(&r1, gc.Types[gc.TINT64], nil) gmove(f, &r1) if ft == gc.TUINT64 { gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP) gmove(&bigi, &r2) gins(ppc64.ACMPU, &r1, &r2) p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1)) p2.From.Type = obj.TYPE_CONST p2.From.Offset = 1 gc.Patch(p1, gc.Pc) } regalloc(&r2, gc.Types[gc.TFLOAT64], t) p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil)) p1.To.Type = obj.TYPE_MEM p1.To.Reg = ppc64.REGSP p1.To.Offset = -8 p1 = gins(ppc64.AFMOVD, nil, &r2) p1.From.Type = obj.TYPE_MEM p1.From.Reg = ppc64.REGSP p1.From.Offset = -8 gins(ppc64.AFCFID, &r2, &r2) regfree(&r1) if ft == gc.TUINT64 { p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO) gins(ppc64.AFMUL, &r1, &r2) gc.Patch(p1, gc.Pc) } gmove(&r2, t) regfree(&r2) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = ppc64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = ppc64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = ppc64.AFMOVS goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = ppc64.AFRSP goto rdst } gins(a, f, t) return // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) af := obj.Addr(obj.Addr{}) at := obj.Addr(obj.Addr{}) if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := (*obj.Prog)(gc.Prog(as)) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case ppc64.AMOVB, ppc64.AMOVBU, ppc64.AMOVBZ, ppc64.AMOVBZU: w = 1 case ppc64.AMOVH, ppc64.AMOVHU, ppc64.AMOVHZ, ppc64.AMOVHZU: w = 2 case ppc64.AMOVW, ppc64.AMOVWU, ppc64.AMOVWZ, ppc64.AMOVWZU: w = 4 case ppc64.AMOVD, ppc64.AMOVDU: if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } return p } func fixlargeoffset(n *gc.Node) { if n == nil { return } if n.Op != gc.OINDREG { return } if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large return } if n.Xoffset != int64(int32(n.Xoffset)) { // TODO(minux): offset too large, move into R31 and add to R31 instead. // this is used only in test/fixedbugs/issue6036.go. gc.Fatal("offset too large: %v", gc.Nconv(n, 0)) a := gc.Node(*n) a.Op = gc.OREGISTER a.Type = gc.Types[gc.Tptr] a.Xoffset = 0 gc.Cgen_checknil(&a) ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) n.Xoffset = 0 } } /* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := int(obj.AXXX) switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0)) case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = ppc64.ABEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = ppc64.ABNE case gc.OLT<<16 | gc.TINT8, // ACMP gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64, gc.OLT<<16 | gc.TUINT8, // ACMPU gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64, gc.OLT<<16 | gc.TFLOAT32, // AFCMPU gc.OLT<<16 | gc.TFLOAT64: a = ppc64.ABLT case gc.OLE<<16 | gc.TINT8, // ACMP gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64, gc.OLE<<16 | gc.TUINT8, // ACMPU gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64, gc.OLE<<16 | gc.TFLOAT32, // AFCMPU gc.OLE<<16 | gc.TFLOAT64: a = ppc64.ABLE case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64, gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64, gc.OGT<<16 | gc.TFLOAT32, gc.OGT<<16 | gc.TFLOAT64: a = ppc64.ABGT case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64, gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64, gc.OGE<<16 | gc.TFLOAT32, gc.OGE<<16 | gc.TFLOAT64: a = ppc64.ABGE case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TPTR32, gc.OCMP<<16 | gc.TINT64: a = ppc64.ACMP case gc.OCMP<<16 | gc.TUINT8, gc.OCMP<<16 | gc.TUINT16, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TUINT64, gc.OCMP<<16 | gc.TPTR64: a = ppc64.ACMPU case gc.OCMP<<16 | gc.TFLOAT32, gc.OCMP<<16 | gc.TFLOAT64: a = ppc64.AFCMPU case gc.OAS<<16 | gc.TBOOL, gc.OAS<<16 | gc.TINT8: a = ppc64.AMOVB case gc.OAS<<16 | gc.TUINT8: a = ppc64.AMOVBZ case gc.OAS<<16 | gc.TINT16: a = ppc64.AMOVH case gc.OAS<<16 | gc.TUINT16: a = ppc64.AMOVHZ case gc.OAS<<16 | gc.TINT32: a = ppc64.AMOVW case gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = ppc64.AMOVWZ case gc.OAS<<16 | gc.TINT64, gc.OAS<<16 | gc.TUINT64, gc.OAS<<16 | gc.TPTR64: a = ppc64.AMOVD case gc.OAS<<16 | gc.TFLOAT32: a = ppc64.AFMOVS case gc.OAS<<16 | gc.TFLOAT64: a = ppc64.AFMOVD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8, gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16, gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32, gc.OADD<<16 | gc.TINT64, gc.OADD<<16 | gc.TUINT64, gc.OADD<<16 | gc.TPTR64: a = ppc64.AADD case gc.OADD<<16 | gc.TFLOAT32: a = ppc64.AFADDS case gc.OADD<<16 | gc.TFLOAT64: a = ppc64.AFADD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8, gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16, gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32, gc.OSUB<<16 | gc.TINT64, gc.OSUB<<16 | gc.TUINT64, gc.OSUB<<16 | gc.TPTR64: a = ppc64.ASUB case gc.OSUB<<16 | gc.TFLOAT32: a = ppc64.AFSUBS case gc.OSUB<<16 | gc.TFLOAT64: a = ppc64.AFSUB case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8, gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16, gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32, gc.OMINUS<<16 | gc.TINT64, gc.OMINUS<<16 | gc.TUINT64, gc.OMINUS<<16 | gc.TPTR64: a = ppc64.ANEG case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8, gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16, gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32, gc.OAND<<16 | gc.TINT64, gc.OAND<<16 | gc.TUINT64, gc.OAND<<16 | gc.TPTR64: a = ppc64.AAND case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8, gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16, gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32, gc.OOR<<16 | gc.TINT64, gc.OOR<<16 | gc.TUINT64, gc.OOR<<16 | gc.TPTR64: a = ppc64.AOR case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8, gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16, gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32, gc.OXOR<<16 | gc.TINT64, gc.OXOR<<16 | gc.TUINT64, gc.OXOR<<16 | gc.TPTR64: a = ppc64.AXOR // TODO(minux): handle rotates //case CASE(OLROT, TINT8): //case CASE(OLROT, TUINT8): //case CASE(OLROT, TINT16): //case CASE(OLROT, TUINT16): //case CASE(OLROT, TINT32): //case CASE(OLROT, TUINT32): //case CASE(OLROT, TPTR32): //case CASE(OLROT, TINT64): //case CASE(OLROT, TUINT64): //case CASE(OLROT, TPTR64): // a = 0//???; RLDC? // break; case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8, gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16, gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32, gc.OLSH<<16 | gc.TINT64, gc.OLSH<<16 | gc.TUINT64, gc.OLSH<<16 | gc.TPTR64: a = ppc64.ASLD case gc.ORSH<<16 | gc.TUINT8, gc.ORSH<<16 | gc.TUINT16, gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32, gc.ORSH<<16 | gc.TUINT64, gc.ORSH<<16 | gc.TPTR64: a = ppc64.ASRD case gc.ORSH<<16 | gc.TINT8, gc.ORSH<<16 | gc.TINT16, gc.ORSH<<16 | gc.TINT32, gc.ORSH<<16 | gc.TINT64: a = ppc64.ASRAD // TODO(minux): handle rotates //case CASE(ORROTC, TINT8): //case CASE(ORROTC, TUINT8): //case CASE(ORROTC, TINT16): //case CASE(ORROTC, TUINT16): //case CASE(ORROTC, TINT32): //case CASE(ORROTC, TUINT32): //case CASE(ORROTC, TINT64): //case CASE(ORROTC, TUINT64): // a = 0//??? RLDC?? // break; case gc.OHMUL<<16 | gc.TINT64: a = ppc64.AMULHD case gc.OHMUL<<16 | gc.TUINT64, gc.OHMUL<<16 | gc.TPTR64: a = ppc64.AMULHDU case gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT32, gc.OMUL<<16 | gc.TINT64: a = ppc64.AMULLD case gc.OMUL<<16 | gc.TUINT8, gc.OMUL<<16 | gc.TUINT16, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32, // don't use word multiply, the high 32-bit are undefined. // fallthrough gc.OMUL<<16 | gc.TUINT64, gc.OMUL<<16 | gc.TPTR64: a = ppc64.AMULLD // for 64-bit multiplies, signedness doesn't matter. case gc.OMUL<<16 | gc.TFLOAT32: a = ppc64.AFMULS case gc.OMUL<<16 | gc.TFLOAT64: a = ppc64.AFMUL case gc.ODIV<<16 | gc.TINT8, gc.ODIV<<16 | gc.TINT16, gc.ODIV<<16 | gc.TINT32, gc.ODIV<<16 | gc.TINT64: a = ppc64.ADIVD case gc.ODIV<<16 | gc.TUINT8, gc.ODIV<<16 | gc.TUINT16, gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32, gc.ODIV<<16 | gc.TUINT64, gc.ODIV<<16 | gc.TPTR64: a = ppc64.ADIVDU case gc.ODIV<<16 | gc.TFLOAT32: a = ppc64.AFDIVS case gc.ODIV<<16 | gc.TFLOAT64: a = ppc64.AFDIV } return a } const ( ODynam = 1 << 0 OAddable = 1 << 1 ) func xgen(n *gc.Node, a *gc.Node, o int) bool { // TODO(minux) return -1 != 0 /*TypeKind(100016)*/ } func sudoclean() { return } /* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { // TODO(minux) *a = obj.Addr{} return false }
func anyregalloc() bool { var j int for i := int(0); i < len(reg); i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } return false } /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(int(gc.Simtype[t.Etype])) if gc.Debug['r'] != 0 { fixfree := int(0) fltfree := int(0) for i := int(arm64.REG_R0); i < arm64.REG_F31; i++ { if reg[i-arm64.REG_R0] == 0 { if i < arm64.REG_F0 { fixfree++ } else { fltfree++ } } } fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree) } var i int switch et { case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TINT64, gc.TUINT64, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= arm64.REGMIN && i <= arm64.REGMAX { goto out } } for i = arm64.REGMIN; i <= arm64.REGMAX; i++ { if reg[i-arm64.REG_R0] == 0 { regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n)) goto out } } gc.Flusherrors() for i := int(arm64.REG_R0); i < arm64.REG_R0+arm64.NREG; i++ { fmt.Printf("R%d %p\n", i, regpc[i-arm64.REG_R0]) } gc.Fatal("out of fixed registers") case gc.TFLOAT32, gc.TFLOAT64: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= arm64.FREGMIN && i <= arm64.FREGMAX { goto out } } for i = arm64.FREGMIN; i <= arm64.FREGMAX; i++ { if reg[i-arm64.REG_R0] == 0 { regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n)) goto out } } gc.Flusherrors() for i := int(arm64.REG_F0); i < arm64.REG_F0+arm64.NREG; i++ { fmt.Printf("F%d %p\n", i, regpc[i-arm64.REG_R0]) } gc.Fatal("out of floating registers") case gc.TCOMPLEX64, gc.TCOMPLEX128: gc.Tempname(n, t) return } gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0)) return out: reg[i-arm64.REG_R0]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(int(n.Val.U.Reg) - arm64.REG_R0) if i == arm64.REGSP-arm64.REG_R0 { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 { regpc[i] = 0 } } /* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) { // cannot have more than 16-bit of immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(arm64.AMOVD, &n1, &ntmp) gins(as, &ntmp, n2) regfree(&ntmp) return } gins(as, &n1, n2) } /* * generate * as n, $c (CMP) */ func ginscon2(as int, n2 *gc.Node, c int64) { var n1 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) switch as { default: gc.Fatal("ginscon2") case arm64.ACMP: if -arm64.BIG <= c && c <= arm64.BIG { gcmp(as, n2, &n1) return } } // MOV n1 into register first var ntmp gc.Node regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(arm64.AMOVD, &n1, &ntmp) gcmp(as, n2, &ntmp) regfree(&ntmp) } /* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) } ft := int(gc.Simsimtype(f.Type)) tt := int(gc.Simsimtype(t.Type)) cvt := (*gc.Type)(t.Type) if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var r1 gc.Node var a int if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT32, gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) regfree(&r1) return case gc.TUINT32, gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm64.AMOVD, &con, &r1) gmove(&r1, t) regfree(&r1) return } f = &con ft = tt // so big switch will choose a simple mov // constants can't move directly to memory. if gc.Ismem(t) { goto hard } } // value -> value copy, first operand in memory. // any floating point operand requires register // src, so goto hard to copy to register first. if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) { cvt = gc.Types[ft] goto hard } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8: a = arm64.AMOVB case gc.TINT8<<16 | gc.TUINT8, // same size gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, // truncate gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm64.AMOVBU case gc.TINT16<<16 | gc.TINT16, // same size gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16: a = arm64.AMOVH case gc.TINT16<<16 | gc.TUINT16, // same size gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, // truncate gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm64.AMOVHU case gc.TINT32<<16 | gc.TINT32, // same size gc.TUINT32<<16 | gc.TINT32, gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32: a = arm64.AMOVW case gc.TINT32<<16 | gc.TUINT32, // same size gc.TUINT32<<16 | gc.TUINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = arm64.AMOVWU case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = arm64.AMOVD /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32, gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = arm64.AMOVB goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32, gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = arm64.AMOVBU goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32, gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = arm64.AMOVH goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32, gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = arm64.AMOVHU goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = arm64.AMOVW goto rdst case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = arm64.AMOVWU goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = arm64.AFCVTZSSW goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = arm64.AFCVTZSDW goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = arm64.AFCVTZSS goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = arm64.AFCVTZSD goto rdst case gc.TFLOAT32<<16 | gc.TUINT32: a = arm64.AFCVTZUSW goto rdst case gc.TFLOAT64<<16 | gc.TUINT32: a = arm64.AFCVTZUDW goto rdst case gc.TFLOAT32<<16 | gc.TUINT64: a = arm64.AFCVTZUS goto rdst case gc.TFLOAT64<<16 | gc.TUINT64: a = arm64.AFCVTZUD goto rdst case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8: cvt = gc.Types[gc.TINT32] goto hard case gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TUINT32] goto hard /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32: a = arm64.ASCVTFWS goto rdst case gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64: a = arm64.ASCVTFWD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = arm64.ASCVTFS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = arm64.ASCVTFD goto rdst case gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32: a = arm64.AUCVTFWS goto rdst case gc.TUINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: a = arm64.AUCVTFWD goto rdst case gc.TUINT64<<16 | gc.TFLOAT32: a = arm64.AUCVTFS goto rdst case gc.TUINT64<<16 | gc.TFLOAT64: a = arm64.AUCVTFD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm64.AFMOVS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm64.AFMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = arm64.AFCVTSD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = arm64.AFCVTDS goto rdst } gins(a, f, t) return // requires register destination rdst: regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) af := obj.Addr(obj.Addr{}) at := obj.Addr(obj.Addr{}) if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := (*obj.Prog)(gc.Prog(as)) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case arm64.AMOVB, arm64.AMOVBU: w = 1 case arm64.AMOVH, arm64.AMOVHU: w = 2 case arm64.AMOVW, arm64.AMOVWU: w = 4 case arm64.AMOVD: if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } return p } func fixlargeoffset(n *gc.Node) { if n == nil { return } if n.Op != gc.OINDREG { return } if -4096 <= n.Xoffset && n.Xoffset < 4096 { return } a := gc.Node(*n) a.Op = gc.OREGISTER a.Type = gc.Types[gc.Tptr] a.Xoffset = 0 gc.Cgen_checknil(&a) ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) n.Xoffset = 0 } /* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr a = gc.Naddr(n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) } else { gc.Fatal("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } } func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) } p := gins(as, rhs, nil) raddr(lhs, p) return p } /* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := int(obj.AXXX) switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0)) case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = arm64.ABEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = arm64.ABNE case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64: a = arm64.ABLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = arm64.ABLO case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64: a = arm64.ABLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = arm64.ABLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64, gc.OGT<<16 | gc.TFLOAT32, gc.OGT<<16 | gc.TFLOAT64: a = arm64.ABGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64: a = arm64.ABHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64, gc.OGE<<16 | gc.TFLOAT32, gc.OGE<<16 | gc.TFLOAT64: a = arm64.ABGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64: a = arm64.ABHS case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TPTR32, gc.OCMP<<16 | gc.TINT64, gc.OCMP<<16 | gc.TUINT8, gc.OCMP<<16 | gc.TUINT16, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TUINT64, gc.OCMP<<16 | gc.TPTR64: a = arm64.ACMP case gc.OCMP<<16 | gc.TFLOAT32: a = arm64.AFCMPS case gc.OCMP<<16 | gc.TFLOAT64: a = arm64.AFCMPD case gc.OAS<<16 | gc.TBOOL, gc.OAS<<16 | gc.TINT8: a = arm64.AMOVB case gc.OAS<<16 | gc.TUINT8: a = arm64.AMOVBU case gc.OAS<<16 | gc.TINT16: a = arm64.AMOVH case gc.OAS<<16 | gc.TUINT16: a = arm64.AMOVHU case gc.OAS<<16 | gc.TINT32: a = arm64.AMOVW case gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = arm64.AMOVWU case gc.OAS<<16 | gc.TINT64, gc.OAS<<16 | gc.TUINT64, gc.OAS<<16 | gc.TPTR64: a = arm64.AMOVD case gc.OAS<<16 | gc.TFLOAT32: a = arm64.AFMOVS case gc.OAS<<16 | gc.TFLOAT64: a = arm64.AFMOVD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8, gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16, gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32, gc.OADD<<16 | gc.TINT64, gc.OADD<<16 | gc.TUINT64, gc.OADD<<16 | gc.TPTR64: a = arm64.AADD case gc.OADD<<16 | gc.TFLOAT32: a = arm64.AFADDS case gc.OADD<<16 | gc.TFLOAT64: a = arm64.AFADDD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8, gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16, gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32, gc.OSUB<<16 | gc.TINT64, gc.OSUB<<16 | gc.TUINT64, gc.OSUB<<16 | gc.TPTR64: a = arm64.ASUB case gc.OSUB<<16 | gc.TFLOAT32: a = arm64.AFSUBS case gc.OSUB<<16 | gc.TFLOAT64: a = arm64.AFSUBD case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8, gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16, gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32, gc.OMINUS<<16 | gc.TINT64, gc.OMINUS<<16 | gc.TUINT64, gc.OMINUS<<16 | gc.TPTR64: a = arm64.ANEG case gc.OMINUS<<16 | gc.TFLOAT32: a = arm64.AFNEGS case gc.OMINUS<<16 | gc.TFLOAT64: a = arm64.AFNEGD case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8, gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16, gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32, gc.OAND<<16 | gc.TINT64, gc.OAND<<16 | gc.TUINT64, gc.OAND<<16 | gc.TPTR64: a = arm64.AAND case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8, gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16, gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32, gc.OOR<<16 | gc.TINT64, gc.OOR<<16 | gc.TUINT64, gc.OOR<<16 | gc.TPTR64: a = arm64.AORR case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8, gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16, gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32, gc.OXOR<<16 | gc.TINT64, gc.OXOR<<16 | gc.TUINT64, gc.OXOR<<16 | gc.TPTR64: a = arm64.AEOR // TODO(minux): handle rotates //case CASE(OLROT, TINT8): //case CASE(OLROT, TUINT8): //case CASE(OLROT, TINT16): //case CASE(OLROT, TUINT16): //case CASE(OLROT, TINT32): //case CASE(OLROT, TUINT32): //case CASE(OLROT, TPTR32): //case CASE(OLROT, TINT64): //case CASE(OLROT, TUINT64): //case CASE(OLROT, TPTR64): // a = 0//???; RLDC? // break; case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8, gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16, gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32, gc.OLSH<<16 | gc.TINT64, gc.OLSH<<16 | gc.TUINT64, gc.OLSH<<16 | gc.TPTR64: a = arm64.ALSL case gc.ORSH<<16 | gc.TUINT8, gc.ORSH<<16 | gc.TUINT16, gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32, gc.ORSH<<16 | gc.TUINT64, gc.ORSH<<16 | gc.TPTR64: a = arm64.ALSR case gc.ORSH<<16 | gc.TINT8, gc.ORSH<<16 | gc.TINT16, gc.ORSH<<16 | gc.TINT32, gc.ORSH<<16 | gc.TINT64: a = arm64.AASR // TODO(minux): handle rotates //case CASE(ORROTC, TINT8): //case CASE(ORROTC, TUINT8): //case CASE(ORROTC, TINT16): //case CASE(ORROTC, TUINT16): //case CASE(ORROTC, TINT32): //case CASE(ORROTC, TUINT32): //case CASE(ORROTC, TINT64): //case CASE(ORROTC, TUINT64): // a = 0//??? RLDC?? // break; case gc.OHMUL<<16 | gc.TINT64: a = arm64.ASMULH case gc.OHMUL<<16 | gc.TUINT64, gc.OHMUL<<16 | gc.TPTR64: a = arm64.AUMULH case gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT32: a = arm64.ASMULL case gc.OMUL<<16 | gc.TINT64: a = arm64.AMUL case gc.OMUL<<16 | gc.TUINT8, gc.OMUL<<16 | gc.TUINT16, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: // don't use word multiply, the high 32-bit are undefined. a = arm64.AUMULL case gc.OMUL<<16 | gc.TUINT64, gc.OMUL<<16 | gc.TPTR64: a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter. case gc.OMUL<<16 | gc.TFLOAT32: a = arm64.AFMULS case gc.OMUL<<16 | gc.TFLOAT64: a = arm64.AFMULD case gc.ODIV<<16 | gc.TINT8, gc.ODIV<<16 | gc.TINT16, gc.ODIV<<16 | gc.TINT32, gc.ODIV<<16 | gc.TINT64: a = arm64.ASDIV case gc.ODIV<<16 | gc.TUINT8, gc.ODIV<<16 | gc.TUINT16, gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32, gc.ODIV<<16 | gc.TUINT64, gc.ODIV<<16 | gc.TPTR64: a = arm64.AUDIV case gc.ODIV<<16 | gc.TFLOAT32: a = arm64.AFDIVS case gc.ODIV<<16 | gc.TFLOAT64: a = arm64.AFDIVD } return a } const ( ODynam = 1 << 0 OAddable = 1 << 1 ) func xgen(n *gc.Node, a *gc.Node, o int) bool { // TODO(minux) return -1 != 0 /*TypeKind(100016)*/ } func sudoclean() { return } /* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { // TODO(minux) *a = obj.Addr{} return false }
func anyregalloc() bool { var j int for i := x86.REG_AX; i <= x86.REG_R15; i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } return false } var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32 /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) var i int switch et { case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TINT64, gc.TUINT64, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_AX && i <= x86.REG_R15 { goto out } } for i = x86.REG_AX; i <= x86.REG_R15; i++ { if reg[i] == 0 { regpc[i-x86.REG_AX] = uint32(obj.Getcallerpc(&n)) goto out } } gc.Flusherrors() for i := 0; i+x86.REG_AX <= x86.REG_R15; i++ { fmt.Printf("%d %p\n", i, regpc[i]) } gc.Fatal("out of fixed registers") case gc.TFLOAT32, gc.TFLOAT64: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_X0 && i <= x86.REG_X15 { goto out } } for i = x86.REG_X0; i <= x86.REG_X15; i++ { if reg[i] == 0 { goto out } } gc.Fatal("out of floating registers") case gc.TCOMPLEX64, gc.TCOMPLEX128: gc.Tempname(n, t) return } gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0)) return out: reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == x86.REG_SP { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 && x86.REG_AX <= i && i <= x86.REG_R15 { regpc[i-x86.REG_AX] = 0 } } /* * generate * as $c, reg */ func gconreg(as int, c int64, reg int) { var nr gc.Node switch as { case x86.AADDL, x86.AMOVL, x86.ALEAL: gc.Nodreg(&nr, gc.Types[gc.TINT32], reg) default: gc.Nodreg(&nr, gc.Types[gc.TINT64], reg) } ginscon(as, c, &nr) } /* * generate * as $c, n */ func ginscon(as int, c int64, n2 *gc.Node) { var n1 gc.Node switch as { case x86.AADDL, x86.AMOVL, x86.ALEAL: gc.Nodconst(&n1, gc.Types[gc.TINT32], c) default: gc.Nodconst(&n1, gc.Types[gc.TINT64], c) } if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { // cannot have 64-bit immediate in ADD, etc. // instead, MOV into register first. var ntmp gc.Node regalloc(&ntmp, gc.Types[gc.TINT64], nil) gins(x86.AMOVQ, &n1, &ntmp) gins(as, &ntmp, n2) regfree(&ntmp) return } gins(as, &n1, n2) } /* * set up nodes representing 2^63 */ var bigi gc.Node var bigf gc.Node var bignodes_did int func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1) gc.Mpshiftfix(bigi.Val.U.Xval, 63) bigf = bigi bigf.Type = gc.Types[gc.TFLOAT64] bigf.Val.Ctype = gc.CTFLT bigf.Val.U.Fval = new(gc.Mpflt) gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval) } /* * generate move: * t = f * hard part is conversions. */ func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands var a int if gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = tt // so big switch will choose a simple mov // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } // 64-bit immediates are really 32-bit sign-extended // unless moving into a register. if gc.Isint[tt] { if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Minintval[gc.TINT32]) < 0 { goto hard } if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Maxintval[gc.TINT32]) > 0 { goto hard } } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT64<<16 | gc.TINT8, gc.TUINT64<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT64<<16 | gc.TINT16, gc.TUINT64<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: a = x86.AMOVQL case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: a = x86.AMOVQ /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, gc.TINT8<<16 | gc.TUINT64: a = x86.AMOVBQSX goto rdst case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, gc.TUINT8<<16 | gc.TUINT64: a = x86.AMOVBQZX goto rdst case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, gc.TINT16<<16 | gc.TUINT64: a = x86.AMOVWQSX goto rdst case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, gc.TUINT16<<16 | gc.TUINT64: a = x86.AMOVWQZX goto rdst case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: a = x86.AMOVLQSX goto rdst // AMOVL into a register zeros the top of the register, // so this is not always necessary, but if we rely on AMOVL // the optimizer is almost certain to screw with us. case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: a = x86.AMOVLQZX goto rdst /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst case gc.TFLOAT32<<16 | gc.TINT64: a = x86.ACVTTSS2SQ goto rdst case gc.TFLOAT64<<16 | gc.TINT64: a = x86.ACVTTSD2SQ goto rdst // convert via int32. case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: a := x86.ACVTTSS2SQ if ft == gc.TFLOAT64 { a = x86.ACVTTSD2SQ } bignodes() var r1 gc.Node regalloc(&r1, gc.Types[ft], nil) var r2 gc.Node regalloc(&r2, gc.Types[tt], t) var r3 gc.Node regalloc(&r3, gc.Types[ft], nil) var r4 gc.Node regalloc(&r4, gc.Types[tt], nil) gins(optoas(gc.OAS, f.Type), f, &r1) gins(optoas(gc.OCMP, f.Type), &bigf, &r1) p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gins(optoas(gc.OAS, f.Type), &bigf, &r3) gins(optoas(gc.OSUB, f.Type), &r3, &r1) gins(a, &r1, &r2) gins(x86.AMOVQ, &bigi, &r4) gins(x86.AXORQ, &r4, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) regfree(&r4) regfree(&r3) regfree(&r2) regfree(&r1) return /* * integer to float */ case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst case gc.TINT64<<16 | gc.TFLOAT32: a = x86.ACVTSQ2SS goto rdst case gc.TINT64<<16 | gc.TFLOAT64: a = x86.ACVTSQ2SD goto rdst // convert via int32 case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hard // algorithm is: // if small enough, use native int64 -> uint64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: a := x86.ACVTSQ2SS if tt == gc.TFLOAT64 { a = x86.ACVTSQ2SD } var zero gc.Node gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) var one gc.Node gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) var r1 gc.Node regalloc(&r1, f.Type, f) var r2 gc.Node regalloc(&r2, t.Type, t) var r3 gc.Node regalloc(&r3, f.Type, nil) var r4 gc.Node regalloc(&r4, f.Type, nil) gmove(f, &r1) gins(x86.ACMPQ, &r1, &zero) p1 := gc.Gbranch(x86.AJLT, nil, +1) gins(a, &r1, &r2) p2 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gmove(&r1, &r3) gins(x86.ASHRQ, &one, &r3) gmove(&r1, &r4) gins(x86.AANDL, &one, &r4) gins(x86.AORQ, &r4, &r3) gins(a, &r3, &r2) gins(optoas(gc.OADD, t.Type), &r2, &r2) gc.Patch(p2, gc.Pc) gmove(&r2, t) regfree(&r4) regfree(&r3) regfree(&r2) regfree(&r1) return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register destination rdst: { var r1 gc.Node regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: var r1 gc.Node regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // if(f != N && f->op == OINDEX) { // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); // } // if(t != N && t->op == OINDEX) { // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); // } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ, x86.AMOVSS, x86.AMOVSD: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAQ: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0)) } } var af obj.Addr if f != nil { af = gc.Naddr(f) } var at obj.Addr if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 case x86.AMOVQ: w = 8 } if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatal("bad use of addr: %v", p) } return p } func fixlargeoffset(n *gc.Node) { if n == nil { return } if n.Op != gc.OINDREG { return } if n.Val.U.Reg == x86.REG_SP { // stack offset cannot be large return } if n.Xoffset != int64(int32(n.Xoffset)) { // offset too large, add to register instead. a := *n a.Op = gc.OREGISTER a.Type = gc.Types[gc.Tptr] a.Xoffset = 0 gc.Cgen_checknil(&a) ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a) n.Xoffset = 0 } } /* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0)) case gc.OADDR<<16 | gc.TPTR32: a = x86.ALEAL case gc.OADDR<<16 | gc.TPTR64: a = x86.ALEAQ case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = x86.AJEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = x86.AJNE case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64: a = x86.AJLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64: a = x86.AJCS case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64: a = x86.AJLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64: a = x86.AJLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64: a = x86.AJGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = x86.AJHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64: a = x86.AJGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = x86.AJCC case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TUINT8: a = x86.ACMPB case gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TUINT16: a = x86.ACMPW case gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TPTR32: a = x86.ACMPL case gc.OCMP<<16 | gc.TINT64, gc.OCMP<<16 | gc.TUINT64, gc.OCMP<<16 | gc.TPTR64: a = x86.ACMPQ case gc.OCMP<<16 | gc.TFLOAT32: a = x86.AUCOMISS case gc.OCMP<<16 | gc.TFLOAT64: a = x86.AUCOMISD case gc.OAS<<16 | gc.TBOOL, gc.OAS<<16 | gc.TINT8, gc.OAS<<16 | gc.TUINT8: a = x86.AMOVB case gc.OAS<<16 | gc.TINT16, gc.OAS<<16 | gc.TUINT16: a = x86.AMOVW case gc.OAS<<16 | gc.TINT32, gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = x86.AMOVL case gc.OAS<<16 | gc.TINT64, gc.OAS<<16 | gc.TUINT64, gc.OAS<<16 | gc.TPTR64: a = x86.AMOVQ case gc.OAS<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.OAS<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8: a = x86.AADDB case gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16: a = x86.AADDW case gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32: a = x86.AADDL case gc.OADD<<16 | gc.TINT64, gc.OADD<<16 | gc.TUINT64, gc.OADD<<16 | gc.TPTR64: a = x86.AADDQ case gc.OADD<<16 | gc.TFLOAT32: a = x86.AADDSS case gc.OADD<<16 | gc.TFLOAT64: a = x86.AADDSD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8: a = x86.ASUBB case gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16: a = x86.ASUBW case gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32: a = x86.ASUBL case gc.OSUB<<16 | gc.TINT64, gc.OSUB<<16 | gc.TUINT64, gc.OSUB<<16 | gc.TPTR64: a = x86.ASUBQ case gc.OSUB<<16 | gc.TFLOAT32: a = x86.ASUBSS case gc.OSUB<<16 | gc.TFLOAT64: a = x86.ASUBSD case gc.OINC<<16 | gc.TINT8, gc.OINC<<16 | gc.TUINT8: a = x86.AINCB case gc.OINC<<16 | gc.TINT16, gc.OINC<<16 | gc.TUINT16: a = x86.AINCW case gc.OINC<<16 | gc.TINT32, gc.OINC<<16 | gc.TUINT32, gc.OINC<<16 | gc.TPTR32: a = x86.AINCL case gc.OINC<<16 | gc.TINT64, gc.OINC<<16 | gc.TUINT64, gc.OINC<<16 | gc.TPTR64: a = x86.AINCQ case gc.ODEC<<16 | gc.TINT8, gc.ODEC<<16 | gc.TUINT8: a = x86.ADECB case gc.ODEC<<16 | gc.TINT16, gc.ODEC<<16 | gc.TUINT16: a = x86.ADECW case gc.ODEC<<16 | gc.TINT32, gc.ODEC<<16 | gc.TUINT32, gc.ODEC<<16 | gc.TPTR32: a = x86.ADECL case gc.ODEC<<16 | gc.TINT64, gc.ODEC<<16 | gc.TUINT64, gc.ODEC<<16 | gc.TPTR64: a = x86.ADECQ case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8: a = x86.ANEGB case gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16: a = x86.ANEGW case gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32: a = x86.ANEGL case gc.OMINUS<<16 | gc.TINT64, gc.OMINUS<<16 | gc.TUINT64, gc.OMINUS<<16 | gc.TPTR64: a = x86.ANEGQ case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8: a = x86.AANDB case gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16: a = x86.AANDW case gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32: a = x86.AANDL case gc.OAND<<16 | gc.TINT64, gc.OAND<<16 | gc.TUINT64, gc.OAND<<16 | gc.TPTR64: a = x86.AANDQ case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8: a = x86.AORB case gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16: a = x86.AORW case gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32: a = x86.AORL case gc.OOR<<16 | gc.TINT64, gc.OOR<<16 | gc.TUINT64, gc.OOR<<16 | gc.TPTR64: a = x86.AORQ case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8: a = x86.AXORB case gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16: a = x86.AXORW case gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32: a = x86.AXORL case gc.OXOR<<16 | gc.TINT64, gc.OXOR<<16 | gc.TUINT64, gc.OXOR<<16 | gc.TPTR64: a = x86.AXORQ case gc.OLROT<<16 | gc.TINT8, gc.OLROT<<16 | gc.TUINT8: a = x86.AROLB case gc.OLROT<<16 | gc.TINT16, gc.OLROT<<16 | gc.TUINT16: a = x86.AROLW case gc.OLROT<<16 | gc.TINT32, gc.OLROT<<16 | gc.TUINT32, gc.OLROT<<16 | gc.TPTR32: a = x86.AROLL case gc.OLROT<<16 | gc.TINT64, gc.OLROT<<16 | gc.TUINT64, gc.OLROT<<16 | gc.TPTR64: a = x86.AROLQ case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8: a = x86.ASHLB case gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16: a = x86.ASHLW case gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32: a = x86.ASHLL case gc.OLSH<<16 | gc.TINT64, gc.OLSH<<16 | gc.TUINT64, gc.OLSH<<16 | gc.TPTR64: a = x86.ASHLQ case gc.ORSH<<16 | gc.TUINT8: a = x86.ASHRB case gc.ORSH<<16 | gc.TUINT16: a = x86.ASHRW case gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32: a = x86.ASHRL case gc.ORSH<<16 | gc.TUINT64, gc.ORSH<<16 | gc.TPTR64: a = x86.ASHRQ case gc.ORSH<<16 | gc.TINT8: a = x86.ASARB case gc.ORSH<<16 | gc.TINT16: a = x86.ASARW case gc.ORSH<<16 | gc.TINT32: a = x86.ASARL case gc.ORSH<<16 | gc.TINT64: a = x86.ASARQ case gc.ORROTC<<16 | gc.TINT8, gc.ORROTC<<16 | gc.TUINT8: a = x86.ARCRB case gc.ORROTC<<16 | gc.TINT16, gc.ORROTC<<16 | gc.TUINT16: a = x86.ARCRW case gc.ORROTC<<16 | gc.TINT32, gc.ORROTC<<16 | gc.TUINT32: a = x86.ARCRL case gc.ORROTC<<16 | gc.TINT64, gc.ORROTC<<16 | gc.TUINT64: a = x86.ARCRQ case gc.OHMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TUINT8: a = x86.AIMULB case gc.OHMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TUINT16: a = x86.AIMULW case gc.OHMUL<<16 | gc.TINT32, gc.OMUL<<16 | gc.TINT32, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: a = x86.AIMULL case gc.OHMUL<<16 | gc.TINT64, gc.OMUL<<16 | gc.TINT64, gc.OMUL<<16 | gc.TUINT64, gc.OMUL<<16 | gc.TPTR64: a = x86.AIMULQ case gc.OHMUL<<16 | gc.TUINT8: a = x86.AMULB case gc.OHMUL<<16 | gc.TUINT16: a = x86.AMULW case gc.OHMUL<<16 | gc.TUINT32, gc.OHMUL<<16 | gc.TPTR32: a = x86.AMULL case gc.OHMUL<<16 | gc.TUINT64, gc.OHMUL<<16 | gc.TPTR64: a = x86.AMULQ case gc.OMUL<<16 | gc.TFLOAT32: a = x86.AMULSS case gc.OMUL<<16 | gc.TFLOAT64: a = x86.AMULSD case gc.ODIV<<16 | gc.TINT8, gc.OMOD<<16 | gc.TINT8: a = x86.AIDIVB case gc.ODIV<<16 | gc.TUINT8, gc.OMOD<<16 | gc.TUINT8: a = x86.ADIVB case gc.ODIV<<16 | gc.TINT16, gc.OMOD<<16 | gc.TINT16: a = x86.AIDIVW case gc.ODIV<<16 | gc.TUINT16, gc.OMOD<<16 | gc.TUINT16: a = x86.ADIVW case gc.ODIV<<16 | gc.TINT32, gc.OMOD<<16 | gc.TINT32: a = x86.AIDIVL case gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32, gc.OMOD<<16 | gc.TUINT32, gc.OMOD<<16 | gc.TPTR32: a = x86.ADIVL case gc.ODIV<<16 | gc.TINT64, gc.OMOD<<16 | gc.TINT64: a = x86.AIDIVQ case gc.ODIV<<16 | gc.TUINT64, gc.ODIV<<16 | gc.TPTR64, gc.OMOD<<16 | gc.TUINT64, gc.OMOD<<16 | gc.TPTR64: a = x86.ADIVQ case gc.OEXTEND<<16 | gc.TINT16: a = x86.ACWD case gc.OEXTEND<<16 | gc.TINT32: a = x86.ACDQ case gc.OEXTEND<<16 | gc.TINT64: a = x86.ACQO case gc.ODIV<<16 | gc.TFLOAT32: a = x86.ADIVSS case gc.ODIV<<16 | gc.TFLOAT64: a = x86.ADIVSD } return a } const ( ODynam = 1 << 0 OAddable = 1 << 1 ) var clean [20]gc.Node var cleani int = 0 func xgen(n *gc.Node, a *gc.Node, o int) bool { regalloc(a, gc.Types[gc.Tptr], nil) if o&ODynam != 0 { if n.Addable != 0 { if n.Op != gc.OINDREG { if n.Op != gc.OREGISTER { return true } } } } agen(n, a) return false } func sudoclean() { if clean[cleani-1].Op != gc.OEMPTY { regfree(&clean[cleani-1]) } if clean[cleani-2].Op != gc.OEMPTY { regfree(&clean[cleani-2]) } cleani -= 2 } /* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := gc.Mpgetfix(n.Val.U.Xval) if v >= 32000 || v <= -32000 { break } switch as { default: return false case x86.AADDB, x86.AADDW, x86.AADDL, x86.AADDQ, x86.ASUBB, x86.ASUBW, x86.ASUBL, x86.ASUBQ, x86.AANDB, x86.AANDW, x86.AANDL, x86.AANDQ, x86.AORB, x86.AORW, x86.AORL, x86.AORQ, x86.AXORB, x86.AXORW, x86.AXORL, x86.AXORQ, x86.AINCB, x86.AINCW, x86.AINCL, x86.AINCQ, x86.ADECB, x86.ADECW, x86.ADECL, x86.ADECQ, x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY *a = gc.Naddr(n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable != 0 && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] *a = gc.Naddr(&n1) return true } regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { agen(nn, reg) n1.Xoffset = oary[0] } else { cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(movptr, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Index = obj.TYPE_NONE fixlargeoffset(&n1) *a = gc.Naddr(&n1) return true case gc.OINDEX: return false } return false }