/* * generate one instruction: * as f, t */ func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { case obj.ACALL: if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR { // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR. if gc.Ctxt.Flag_shared != 0 { // Make sure function pointer is in R12 as well when // compiling Go into PIC. // TODO(mwhudson): it would obviously be better to // change the register allocation to put the value in // R12 already, but I don't know how to do that. q := gc.Prog(as) q.As = ppc64.AMOVD q.From = p.To q.To.Type = obj.TYPE_REG q.To.Reg = ppc64.REG_R12 } pp := gc.Prog(as) pp.From = p.From pp.To.Type = obj.TYPE_REG pp.To.Reg = ppc64.REG_CTR p.As = ppc64.AMOVD p.From = p.To p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REG_CTR if gc.Ctxt.Flag_shared != 0 { // When compiling Go into PIC, the function we just // called via pointer might have been implemented in // a separate module and so overwritten the TOC // pointer in R2; reload it. q := gc.Prog(ppc64.AMOVD) q.From.Type = obj.TYPE_MEM q.From.Offset = 24 q.From.Reg = ppc64.REGSP q.To.Type = obj.TYPE_REG q.To.Reg = ppc64.REG_R2 } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) fmt.Printf("%v\n", pp) } return pp } // Bad things the front end has done to us. Crash to find call stack. case ppc64.AAND, ppc64.AMULLD: if p.From.Type == obj.TYPE_CONST { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } case ppc64.ACMP, ppc64.ACMPU: if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case ppc64.AMOVB, ppc64.AMOVBU, ppc64.AMOVBZ, ppc64.AMOVBZU: w = 1 case ppc64.AMOVH, ppc64.AMOVHU, ppc64.AMOVHZ, ppc64.AMOVHZU: w = 2 case ppc64.AMOVW, ppc64.AMOVWU, ppc64.AMOVWZ, ppc64.AMOVWZU: w = 4 case ppc64.AMOVD, ppc64.AMOVDU: if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } return p }
/* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // if(f != N && f->op == OINDEX) { // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); // } // if(t != N && t->op == OINDEX) { // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); // } if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) { // Turn MOVL $xxx into LEAL xxx. // These should be equivalent but most of the backend // only expects to see LEAL, because that's what we had // historically generated. Various hidden assumptions are baked in by now. if as == x86.AMOVL { as = x86.ALEAL } else { as = x86.ALEAQ } f = f.Left } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL, x86.AMOVQ, x86.AMOVSS, x86.AMOVSD: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAQ: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatalf("gins LEAQ nil %v", f.Type) } } p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 case x86.AMOVQ: w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatalf("bad use of addr: %v", p) } return p }
/* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // int32 v; if f != nil && f.Op == gc.OINDEX { gc.Fatalf("gins OINDEX not implemented") } // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); if t != nil && t.Op == gc.OINDEX { gc.Fatalf("gins OINDEX not implemented") } // gc.Regalloc(&nod, ®node, Z); // v = constnode.vconst; // gc.Cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // gc.Regfree(&nod); p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { case arm.ABL: if p.To.Type == obj.TYPE_REG { p.To.Type = obj.TYPE_MEM } case arm.ACMP, arm.ACMPF, arm.ACMPD: if t != nil { if f.Op != gc.OREGISTER { /* generate a comparison TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. */ gc.Fatalf("bad operands to gcmp") } p.From = p.To p.To = obj.Addr{} raddr(f, p) } case arm.AMULU: if f != nil && f.Op != gc.OREGISTER { gc.Fatalf("bad operands to mul") } case arm.AMOVW: if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) { gc.Fatalf("gins double memory") } case arm.AADD: if p.To.Type == obj.TYPE_MEM { gc.Fatalf("gins arith to mem") } case arm.ARSB: if p.From.Type == obj.TYPE_NONE { gc.Fatalf("rsb with no from") } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } return p }
/* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER { gc.Fatalf("gins MOVF reg, reg") } if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL { gc.Fatalf("gins CVTSD2SS const") } if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Reg == x86.REG_F0 { gc.Fatalf("gins MOVSD into F0") } if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC { // Turn MOVL $xxx(FP/SP) into LEAL xxx. // These should be equivalent but most of the backend // only expects to see LEAL, because that's what we had // historically generated. Various hidden assumptions are baked in by now. as = x86.ALEAL f = f.Left } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAL: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatalf("gins LEAL nil %v", f.Type) } } p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := 0 switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 } if true && w != 0 && f != nil && (p.From.Width > int64(w) || p.To.Width > int64(w)) { gc.Dump("bad width from:", f) gc.Dump("bad width to:", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatalf("bad use of addr: %v", p) } return p }
/* * generate one instruction: * as f, t */ func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { case obj.ACALL: if p.To.Type == obj.TYPE_REG { // Allow front end to emit CALL REG, and rewrite into CALL (REG). p.From = obj.Addr{} p.To.Type = obj.TYPE_MEM p.To.Offset = 0 if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } return p } // Bad things the front end has done to us. Crash to find call stack. case mips.AAND: if p.From.Type == obj.TYPE_CONST { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } case mips.ASGT, mips.ASGTU: if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } // Special cases case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU: if p.From.Type == obj.TYPE_CONST { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } pp := gc.Prog(mips.AMOVV) pp.From.Type = obj.TYPE_REG pp.From.Reg = mips.REG_LO pp.To = p.To p.Reg = p.To.Reg p.To = obj.Addr{} case mips.ASUBVU: // unary if f == nil { p.From = p.To p.Reg = mips.REGZERO } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case mips.AMOVB, mips.AMOVBU: w = 1 case mips.AMOVH, mips.AMOVHU: w = 2 case mips.AMOVW, mips.AMOVWU: w = 4 case mips.AMOVV: if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } return p }
/* * generate one instruction: * as f, t */ func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // TODO(austin): Add self-move test like in 6g (but be careful // of truncation moves) p := gc.Prog(as) gc.Naddr(&p.From, f) gc.Naddr(&p.To, t) switch as { case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD: if t != nil { if f.Op != gc.OREGISTER { gc.Fatalf("bad operands to gcmp") } p.From = p.To p.To = obj.Addr{} raddr(f, p) } } // Bad things the front end has done to us. Crash to find call stack. switch as { case arm64.AAND, arm64.AMUL: if p.From.Type == obj.TYPE_CONST { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } case arm64.ACMP: if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { gc.Debug['h'] = 1 gc.Fatalf("bad inst: %v", p) } } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := int32(0) switch as { case arm64.AMOVB, arm64.AMOVBU: w = 1 case arm64.AMOVH, arm64.AMOVHU: w = 2 case arm64.AMOVW, arm64.AMOVWU: w = 4 case arm64.AMOVD: if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { break } w = 8 } if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { gc.Dump("f", f) gc.Dump("t", t) gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) } return p }