func gclean() { for _, r := range Thearch.ReservedRegs { reg[r-Thearch.REGMIN]-- } for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { n := reg[r-Thearch.REGMIN] if n != 0 { if Debug['v'] != 0 { Regdump() } Yyerror("reg %v left allocated", obj.Rconv(r)) } } for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { n := reg[r-Thearch.REGMIN] if n != 0 { if Debug['v'] != 0 { Regdump() } Yyerror("reg %v left allocated", obj.Rconv(r)) } } }
func archPPC64() *Arch { register := make(map[string]int16) // Create maps for easy lookup of instruction names etc. // Note that there is no list of names as there is for x86. for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ { register[obj.Rconv(i)] = int16(i) } for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ { register[obj.Rconv(i)] = int16(i) } for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ { register[obj.Rconv(i)] = int16(i) } for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ { register[obj.Rconv(i)] = int16(i) } register["CR"] = ppc64.REG_CR register["XER"] = ppc64.REG_XER register["LR"] = ppc64.REG_LR register["CTR"] = ppc64.REG_CTR register["FPSCR"] = ppc64.REG_FPSCR register["MSR"] = ppc64.REG_MSR // Pseudo-registers. register["SB"] = RSB register["FP"] = RFP register["PC"] = RPC // Avoid unintentionally clobbering g using R30. delete(register, "R30") register["g"] = ppc64.REG_R30 registerPrefix := map[string]bool{ "CR": true, "F": true, "R": true, "SPR": true, } instructions := make(map[string]int) for i, s := range obj.Anames { instructions[s] = i } for i, s := range ppc64.Anames { if i >= obj.A_ARCHSPECIFIC { instructions[s] = i + obj.ABasePPC64 } } // Annoying aliases. instructions["BR"] = ppc64.ABR instructions["BL"] = ppc64.ABL instructions["RETURN"] = ppc64.ARETURN return &Arch{ LinkArch: &ppc64.Linkppc64, Instructions: instructions, Register: register, RegisterPrefix: registerPrefix, RegisterNumber: ppc64RegisterNumber, IsJump: jumpPPC64, } }
func archMips64() *Arch { register := make(map[string]int16) // Create maps for easy lookup of instruction names etc. // Note that there is no list of names as there is for x86. for i := mips.REG_R0; i <= mips.REG_R31; i++ { register[obj.Rconv(i)] = int16(i) } for i := mips.REG_F0; i <= mips.REG_F31; i++ { register[obj.Rconv(i)] = int16(i) } for i := mips.REG_M0; i <= mips.REG_M31; i++ { register[obj.Rconv(i)] = int16(i) } for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ { register[obj.Rconv(i)] = int16(i) } register["HI"] = mips.REG_HI register["LO"] = mips.REG_LO // Pseudo-registers. register["SB"] = RSB register["FP"] = RFP register["PC"] = RPC // Avoid unintentionally clobbering g using R30. delete(register, "R30") register["g"] = mips.REG_R30 // Avoid unintentionally clobbering RSB using R28. delete(register, "R28") register["RSB"] = mips.REG_R28 registerPrefix := map[string]bool{ "F": true, "FCR": true, "M": true, "R": true, } instructions := make(map[string]obj.As) for i, s := range obj.Anames { instructions[s] = obj.As(i) } for i, s := range mips.Anames { if obj.As(i) >= obj.A_ARCHSPECIFIC { instructions[s] = obj.As(i) + obj.ABaseMIPS64 } } // Annoying alias. instructions["JAL"] = mips.AJAL return &Arch{ LinkArch: &mips.Linkmips64, Instructions: instructions, Register: register, RegisterPrefix: registerPrefix, RegisterNumber: mipsRegisterNumber, IsJump: jumpMIPS64, } }
func archS390x() *Arch { register := make(map[string]int16) // Create maps for easy lookup of instruction names etc. // Note that there is no list of names as there is for x86. for i := s390x.REG_R0; i <= s390x.REG_R15; i++ { register[obj.Rconv(i)] = int16(i) } for i := s390x.REG_F0; i <= s390x.REG_F15; i++ { register[obj.Rconv(i)] = int16(i) } for i := s390x.REG_V0; i <= s390x.REG_V31; i++ { register[obj.Rconv(i)] = int16(i) } for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ { register[obj.Rconv(i)] = int16(i) } register["LR"] = s390x.REG_LR // Pseudo-registers. register["SB"] = RSB register["FP"] = RFP register["PC"] = RPC // Avoid unintentionally clobbering g using R13. delete(register, "R13") register["g"] = s390x.REG_R13 registerPrefix := map[string]bool{ "AR": true, "F": true, "R": true, } instructions := make(map[string]obj.As) for i, s := range obj.Anames { instructions[s] = obj.As(i) } for i, s := range s390x.Anames { if obj.As(i) >= obj.A_ARCHSPECIFIC { instructions[s] = obj.As(i) + obj.ABaseS390X } } // Annoying aliases. instructions["BR"] = s390x.ABR instructions["BL"] = s390x.ABL return &Arch{ LinkArch: &s390x.Links390x, Instructions: instructions, Register: register, RegisterPrefix: registerPrefix, RegisterNumber: s390xRegisterNumber, IsJump: jumpS390x, } }
func gclean() { for i := 0; i < len(resvd); i++ { reg[resvd[i]]-- } for i := x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated at %x", obj.Rconv(i), regpc[i]) } } for i := x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated\n", obj.Rconv(i)) } } }
func Regdump() { if Debug['v'] == 0 { fmt.Printf("run compiler with -v for register allocation sites\n") return } dump := func(r int) { stk := regstk[r-Thearch.REGMIN] if len(stk) == 0 { return } fmt.Printf("reg %v allocated at:\n", obj.Rconv(r)) fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1)) } for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ { if reg[r-Thearch.REGMIN] != 0 { dump(r) } } for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ { if reg[r-Thearch.REGMIN] == 0 { dump(r) } } }
func gclean() { for i := 0; i < len(resvd); i++ { reg[resvd[i]]-- } for i := 0; i < len(reg); i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated\n", obj.Rconv(i)) } } }
func gclean() { for i := int(0); i < len(resvd); i++ { reg[resvd[i]-arm64.REG_R0]-- } for i := int(0); i < len(reg); i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated, %p\n", obj.Rconv(i+arm64.REG_R0), regpc[i]) } } }
func gclean() { for i := 0; i < len(resvd); i++ { reg[resvd[i]]-- } if gc.Nacl { reg[x86.REG_BP]-- reg[x86.REG_R15]-- } else if obj.Framepointer_enabled != 0 { reg[x86.REG_BP]-- } for i := x86.REG_AX; i <= x86.REG_R15; i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated\n", obj.Rconv(i)) } } for i := x86.REG_X0; i <= x86.REG_X15; i++ { if reg[i] != 0 { gc.Yyerror("reg %v left allocated\n", obj.Rconv(i)) } } }
func archArm() *Arch { register := make(map[string]int16) // Create maps for easy lookup of instruction names etc. // Note that there is no list of names as there is for x86. for i := arm.REG_R0; i < arm.REG_SPSR; i++ { register[obj.Rconv(i)] = int16(i) } // Avoid unintentionally clobbering g using R10. delete(register, "R10") register["g"] = arm.REG_R10 for i := 0; i < 16; i++ { register[fmt.Sprintf("C%d", i)] = int16(i) } // Pseudo-registers. register["SB"] = RSB register["FP"] = RFP register["PC"] = RPC register["SP"] = RSP registerPrefix := map[string]bool{ "F": true, "R": true, } instructions := make(map[string]int) for i, s := range obj.Anames { instructions[s] = i } for i, s := range arm.Anames { if i >= obj.A_ARCHSPECIFIC { instructions[s] = i + obj.ABaseARM } } // Annoying aliases. instructions["B"] = obj.AJMP instructions["BL"] = obj.ACALL // MCR differs from MRC by the way fields of the word are encoded. // (Details in arm.go). Here we add the instruction so parse will find // it, but give it an opcode number known only to us. instructions["MCR"] = aMCR return &Arch{ LinkArch: &arm.Linkarm, Instructions: instructions, Register: register, RegisterPrefix: registerPrefix, RegisterNumber: armRegisterNumber, IsJump: jumpArm, } }
// If s==nil, copyu returns the set/use of v in p; otherwise, it // modifies p to replace reads of v with reads of s and returns 0 for // success or non-zero for failure. // // If s==nil, copy returns one of the following values: // 1 if v only used // 2 if v is set and used in one address (read-alter-rewrite; // can't substitute) // 3 if v is only set // 4 if v is set in one address and used in another (so addresses // can be rewritten independently) // 0 otherwise (not touched) func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { if p.From3Type() != obj.TYPE_NONE { // 7g never generates a from3 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) } if p.RegTo2 != obj.REG_NONE { // 7g never generates a to2 fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2))) } switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As)) return 2 case obj.ANOP, /* read p->from, write p->to */ arm64.ANEG, arm64.AFNEGD, arm64.AFNEGS, arm64.AFSQRTD, arm64.AFCVTZSD, arm64.AFCVTZSS, arm64.AFCVTZSDW, arm64.AFCVTZSSW, arm64.AFCVTZUD, arm64.AFCVTZUS, arm64.AFCVTZUDW, arm64.AFCVTZUSW, arm64.AFCVTSD, arm64.AFCVTDS, arm64.ASCVTFD, arm64.ASCVTFS, arm64.ASCVTFWD, arm64.ASCVTFWS, arm64.AUCVTFD, arm64.AUCVTFS, arm64.AUCVTFWD, arm64.AUCVTFWS, arm64.AMOVB, arm64.AMOVBU, arm64.AMOVH, arm64.AMOVHU, arm64.AMOVW, arm64.AMOVWU, arm64.AMOVD, arm64.AFMOVS, arm64.AFMOVD: if p.Scond == 0 { if s != nil { if copysub(&p.From, v, s, true) { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, true) { return 1 } } return 0 } if copyas(&p.To, v) { // Fix up implicit from if p.From.Type == obj.TYPE_NONE { p.From = p.To } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { // p->to only indirectly uses v return 1 } return 0 } /* rar p->from, write p->to or read p->from, rar p->to */ if p.From.Type == obj.TYPE_MEM { if copyas(&p.From, v) { // No s!=nil check; need to fail // anyway in that case return 2 } if s != nil { if copysub(&p.To, v, s, true) { return 1 } return 0 } if copyas(&p.To, v) { return 3 } } else if p.To.Type == obj.TYPE_MEM { if copyas(&p.To, v) { return 2 } if s != nil { if copysub(&p.From, v, s, true) { return 1 } return 0 } if copyau(&p.From, v) { return 1 } } else { fmt.Printf("copyu: bad %v\n", p) } return 0 case arm64.AADD, /* read p->from, read p->reg, write p->to */ arm64.ASUB, arm64.AAND, arm64.AORR, arm64.AEOR, arm64.AMUL, arm64.ASMULL, arm64.AUMULL, arm64.ASMULH, arm64.AUMULH, arm64.ASDIV, arm64.AUDIV, arm64.ALSL, arm64.ALSR, arm64.AASR, arm64.AFADDD, arm64.AFADDS, arm64.AFSUBD, arm64.AFSUBS, arm64.AFMULD, arm64.AFMULS, arm64.AFDIVD, arm64.AFDIVS: if s != nil { if copysub(&p.From, v, s, true) { return 1 } if copysub1(p, v, s, true) { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, true) { return 1 } } return 0 } if copyas(&p.To, v) { if p.Reg == 0 { // Fix up implicit reg (e.g., ADD // R3,R4 -> ADD R3,R4,R4) so we can // update reg and to separately. p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm64.ABEQ, arm64.ABNE, arm64.ABGE, arm64.ABLT, arm64.ABGT, arm64.ABLE, arm64.ABLO, arm64.ABLS, arm64.ABHI, arm64.ABHS: return 0 case obj.ACHECKNIL, /* read p->from */ arm64.ACMP, /* read p->from, read p->reg */ arm64.AFCMPD, arm64.AFCMPS: if s != nil { if copysub(&p.From, v, s, true) { return 1 } if copysub1(p, v, s, true) { return 1 } return 0 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case arm64.AB: /* read p->to */ if s != nil { if copysub(&p.To, v, s, true) { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case obj.ARET: /* funny */ if s != nil { return 0 } // All registers die at this point, so claim // everything is set (and not used). return 3 case arm64.ABL: /* funny */ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, true) { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R31 is zero, used by DUFFZERO, cannot be substituted. // R16 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == 31 { return 1 } if v.Reg == 16 { return 2 } } return 0 // R16, R17 are ptr to src, dst, used and set, cannot be substituted. // R27 is scratch, set by DUFFCOPY, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == 16 || v.Reg == 17 { return 2 } if v.Reg == 27 { return 3 } } return 0 case arm64.AHINT, obj.ATEXT, obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
func anyregalloc() bool { var j int for i := 0; i < len(reg); i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } return false } var regpc [REGALLOC_FMAX + 1]uint32 /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if false && gc.Debug['r'] != 0 { fixfree := 0 for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { fixfree++ } } floatfree := 0 for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { floatfree++ } } fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree) } if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) if gc.Is64(t) { gc.Fatal("regalloc: 64 bit type %v") } var i int switch et { case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TPTR32, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= REGALLOC_R0 && i <= REGALLOC_RMAX { goto out } } for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { regpc[i] = uint32(obj.Getcallerpc(&n)) goto out } } fmt.Printf("registers allocated at\n") for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { fmt.Printf("%d %p\n", i, regpc[i]) } gc.Fatal("out of fixed registers") goto err case gc.TFLOAT32, gc.TFLOAT64: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= REGALLOC_F0 && i <= REGALLOC_FMAX { goto out } } for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { goto out } } gc.Fatal("out of floating point registers") goto err case gc.TCOMPLEX64, gc.TCOMPLEX128: gc.Tempname(n, t) return } gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0)) err: gc.Nodreg(n, t, arm.REG_R0) return out: reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if false && gc.Debug['r'] != 0 { fixfree := 0 for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ { if reg[i] == 0 { fixfree++ } } floatfree := 0 for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ { if reg[i] == 0 { floatfree++ } } fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree) } if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == arm.REGSP { return } if i < 0 || i >= len(reg) || i >= len(regpc) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg %v not allocated", obj.Rconv(i)) } reg[i]-- if reg[i] == 0 { regpc[i] = 0 } } /* * return constant i node. * overwritten by next call, but useful in calls to gins. */ var ncon_n gc.Node func ncon(i uint32) *gc.Node { if ncon_n.Type == nil { gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) } gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) return &ncon_n } var sclean [10]gc.Node var nsclean int /* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } } func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { regfree(&sclean[nsclean]) } } func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } // cannot have two memory operands; // except 64-bit, which always copies via registers anyway. var a int var r1 gc.Node if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node switch tt { default: gc.Convconst(&con, t.Type, &f.Val) case gc.TINT16, gc.TINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) regfree(&r1) return case gc.TUINT16, gc.TUINT8: var con gc.Node gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val) var r1 gc.Node regalloc(&r1, con.Type, t) gins(arm.AMOVW, &con, &r1) gmove(&r1, t) regfree(&r1) return } f = &con ft = gc.Simsimtype(con.Type) // constants can't move directly to memory if gc.Ismem(t) && !gc.Is64(t.Type) { goto hard } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8: // same size if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TUINT8<<16 | gc.TINT8, gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8: a = arm.AMOVBS case gc.TUINT8<<16 | gc.TUINT8: if !gc.Ismem(f) { a = arm.AMOVB break } fallthrough case gc.TINT8<<16 | gc.TUINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = arm.AMOVBU case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8: a = arm.AMOVBS goto trunc64 case gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: a = arm.AMOVBU goto trunc64 case gc.TINT16<<16 | gc.TINT16: // same size if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TUINT16<<16 | gc.TINT16, gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16: a = arm.AMOVHS case gc.TUINT16<<16 | gc.TUINT16: if !gc.Ismem(f) { a = arm.AMOVH break } fallthrough case gc.TINT16<<16 | gc.TUINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = arm.AMOVHU case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16: a = arm.AMOVHS goto trunc64 case gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: a = arm.AMOVHU goto trunc64 case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = arm.AMOVW case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node regalloc(&r1, t.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &r1, t) regfree(&r1) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node regalloc(&r1, flo.Type, nil) var r2 gc.Node regalloc(&r2, fhi.Type, nil) gins(arm.AMOVW, &flo, &r1) gins(arm.AMOVW, &fhi, &r2) gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) regfree(&r1) regfree(&r2) splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16, gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = arm.AMOVBS goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16, gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = arm.AMOVBU goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = arm.AMOVHS goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = arm.AMOVHU goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) var r1 gc.Node regalloc(&r1, tlo.Type, nil) var r2 gc.Node regalloc(&r2, thi.Type, nil) gmove(f, &r1) p1 := gins(arm.AMOVW, &r1, &r2) p1.From.Type = obj.TYPE_SHIFT p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31 p1.From.Reg = 0 //print("gmove: %P\n", p1); gins(arm.AMOVW, &r1, &tlo) gins(arm.AMOVW, &r2, &thi) regfree(&r1) regfree(&r2) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) var r1 gc.Node regalloc(&r1, thi.Type, nil) gins(arm.AMOVW, ncon(0), &r1) gins(arm.AMOVW, &r1, &thi) regfree(&r1) splitclean() return // case CASE(TFLOAT64, TUINT64): /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TUINT32, // case CASE(TFLOAT32, TUINT64): gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TUINT32: fa := arm.AMOVF a := arm.AMOVFW if ft == gc.TFLOAT64 { fa = arm.AMOVD a = arm.AMOVDW } ta := arm.AMOVW switch tt { case gc.TINT8: ta = arm.AMOVBS case gc.TUINT8: ta = arm.AMOVBU case gc.TINT16: ta = arm.AMOVHS case gc.TUINT16: ta = arm.AMOVHU } var r1 gc.Node regalloc(&r1, gc.Types[ft], f) var r2 gc.Node regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to fpu p1 := gins(a, &r1, &r1) // convert to w switch tt { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(arm.AMOVW, &r1, &r2) // copy to cpu gins(ta, &r2, t) // store regfree(&r1) regfree(&r2) return /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT64, gc.TUINT32<<16 | gc.TFLOAT64: fa := arm.AMOVW switch ft { case gc.TINT8: fa = arm.AMOVBS case gc.TUINT8: fa = arm.AMOVBU case gc.TINT16: fa = arm.AMOVHS case gc.TUINT16: fa = arm.AMOVHU } a := arm.AMOVWF ta := arm.AMOVF if tt == gc.TFLOAT64 { a = arm.AMOVWD ta = arm.AMOVD } var r1 gc.Node regalloc(&r1, gc.Types[ft], f) var r2 gc.Node regalloc(&r2, gc.Types[tt], t) gins(fa, f, &r1) // load to cpu gins(arm.AMOVW, &r1, &r2) // copy to fpu p1 := gins(a, &r2, &r2) // convert switch ft { case gc.TUINT8, gc.TUINT16, gc.TUINT32: p1.Scond |= arm.C_UBIT } gins(ta, &r2, t) // store regfree(&r1) regfree(&r2) return case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: gc.Fatal("gmove UINT64, TFLOAT not implemented") return /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.TFLOAT64<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.TFLOAT32<<16 | gc.TFLOAT64: var r1 gc.Node regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVF, f, &r1) gins(arm.AMOVFD, &r1, &r1) gins(arm.AMOVD, &r1, t) regfree(&r1) return case gc.TFLOAT64<<16 | gc.TFLOAT32: var r1 gc.Node regalloc(&r1, gc.Types[gc.TFLOAT64], t) gins(arm.AMOVD, f, &r1) gins(arm.AMOVDF, &r1, &r1) gins(arm.AMOVF, &r1, t) regfree(&r1) return } gins(a, f, t) return // TODO(kaib): we almost always require a register dest anyway, this can probably be // removed. // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // truncate 64 bit integer trunc64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) regalloc(&r1, t.Type, nil) gins(a, &flo, &r1) gins(a, &r1, t) regfree(&r1) splitclean() return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { // Node nod; // int32 v; if f != nil && f.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(f->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); if t != nil && t.Op == gc.OINDEX { gc.Fatal("gins OINDEX not implemented") } // regalloc(&nod, ®node, Z); // v = constnode.vconst; // cgen(t->right, &nod); // constnode.vconst = v; // idx.reg = nod.reg; // regfree(&nod); var af obj.Addr var at obj.Addr if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } return p } /* * insert n into reg slot of p */ func raddr(n *gc.Node, p *obj.Prog) { var a obj.Addr a = gc.Naddr(n) if a.Type != obj.TYPE_REG { if n != nil { gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0)) } else { gc.Fatal("bad in raddr: <null>") } p.Reg = 0 } else { p.Reg = a.Reg } } /* generate a comparison TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites. */ func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0)) } p := gins(as, rhs, nil) raddr(lhs, p) return p } /* generate a constant shift * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal. */ func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog { if sval <= 0 || sval > 32 { gc.Fatal("bad shift value: %d", sval) } sval = sval & 0x1f p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15 return p } /* generate a register shift */ func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog { p := gins(as, nil, rhs) p.From.Type = obj.TYPE_SHIFT p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15 return p } /* * return Axxx for Oxxx on type t. */ func optoas(op int, t *gc.Type) int { if t == nil { gc.Fatal("optoas: t is nil") } a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0)) /* case CASE(OADDR, TPTR32): a = ALEAL; break; case CASE(OADDR, TPTR64): a = ALEAQ; break; */ // TODO(kaib): make sure the conditional branches work on all edge cases case gc.OEQ<<16 | gc.TBOOL, gc.OEQ<<16 | gc.TINT8, gc.OEQ<<16 | gc.TUINT8, gc.OEQ<<16 | gc.TINT16, gc.OEQ<<16 | gc.TUINT16, gc.OEQ<<16 | gc.TINT32, gc.OEQ<<16 | gc.TUINT32, gc.OEQ<<16 | gc.TINT64, gc.OEQ<<16 | gc.TUINT64, gc.OEQ<<16 | gc.TPTR32, gc.OEQ<<16 | gc.TPTR64, gc.OEQ<<16 | gc.TFLOAT32, gc.OEQ<<16 | gc.TFLOAT64: a = arm.ABEQ case gc.ONE<<16 | gc.TBOOL, gc.ONE<<16 | gc.TINT8, gc.ONE<<16 | gc.TUINT8, gc.ONE<<16 | gc.TINT16, gc.ONE<<16 | gc.TUINT16, gc.ONE<<16 | gc.TINT32, gc.ONE<<16 | gc.TUINT32, gc.ONE<<16 | gc.TINT64, gc.ONE<<16 | gc.TUINT64, gc.ONE<<16 | gc.TPTR32, gc.ONE<<16 | gc.TPTR64, gc.ONE<<16 | gc.TFLOAT32, gc.ONE<<16 | gc.TFLOAT64: a = arm.ABNE case gc.OLT<<16 | gc.TINT8, gc.OLT<<16 | gc.TINT16, gc.OLT<<16 | gc.TINT32, gc.OLT<<16 | gc.TINT64, gc.OLT<<16 | gc.TFLOAT32, gc.OLT<<16 | gc.TFLOAT64: a = arm.ABLT case gc.OLT<<16 | gc.TUINT8, gc.OLT<<16 | gc.TUINT16, gc.OLT<<16 | gc.TUINT32, gc.OLT<<16 | gc.TUINT64: a = arm.ABLO case gc.OLE<<16 | gc.TINT8, gc.OLE<<16 | gc.TINT16, gc.OLE<<16 | gc.TINT32, gc.OLE<<16 | gc.TINT64, gc.OLE<<16 | gc.TFLOAT32, gc.OLE<<16 | gc.TFLOAT64: a = arm.ABLE case gc.OLE<<16 | gc.TUINT8, gc.OLE<<16 | gc.TUINT16, gc.OLE<<16 | gc.TUINT32, gc.OLE<<16 | gc.TUINT64: a = arm.ABLS case gc.OGT<<16 | gc.TINT8, gc.OGT<<16 | gc.TINT16, gc.OGT<<16 | gc.TINT32, gc.OGT<<16 | gc.TINT64, gc.OGT<<16 | gc.TFLOAT32, gc.OGT<<16 | gc.TFLOAT64: a = arm.ABGT case gc.OGT<<16 | gc.TUINT8, gc.OGT<<16 | gc.TUINT16, gc.OGT<<16 | gc.TUINT32, gc.OGT<<16 | gc.TUINT64: a = arm.ABHI case gc.OGE<<16 | gc.TINT8, gc.OGE<<16 | gc.TINT16, gc.OGE<<16 | gc.TINT32, gc.OGE<<16 | gc.TINT64, gc.OGE<<16 | gc.TFLOAT32, gc.OGE<<16 | gc.TFLOAT64: a = arm.ABGE case gc.OGE<<16 | gc.TUINT8, gc.OGE<<16 | gc.TUINT16, gc.OGE<<16 | gc.TUINT32, gc.OGE<<16 | gc.TUINT64: a = arm.ABHS case gc.OCMP<<16 | gc.TBOOL, gc.OCMP<<16 | gc.TINT8, gc.OCMP<<16 | gc.TUINT8, gc.OCMP<<16 | gc.TINT16, gc.OCMP<<16 | gc.TUINT16, gc.OCMP<<16 | gc.TINT32, gc.OCMP<<16 | gc.TUINT32, gc.OCMP<<16 | gc.TPTR32: a = arm.ACMP case gc.OCMP<<16 | gc.TFLOAT32: a = arm.ACMPF case gc.OCMP<<16 | gc.TFLOAT64: a = arm.ACMPD case gc.OAS<<16 | gc.TBOOL: a = arm.AMOVB case gc.OAS<<16 | gc.TINT8: a = arm.AMOVBS case gc.OAS<<16 | gc.TUINT8: a = arm.AMOVBU case gc.OAS<<16 | gc.TINT16: a = arm.AMOVHS case gc.OAS<<16 | gc.TUINT16: a = arm.AMOVHU case gc.OAS<<16 | gc.TINT32, gc.OAS<<16 | gc.TUINT32, gc.OAS<<16 | gc.TPTR32: a = arm.AMOVW case gc.OAS<<16 | gc.TFLOAT32: a = arm.AMOVF case gc.OAS<<16 | gc.TFLOAT64: a = arm.AMOVD case gc.OADD<<16 | gc.TINT8, gc.OADD<<16 | gc.TUINT8, gc.OADD<<16 | gc.TINT16, gc.OADD<<16 | gc.TUINT16, gc.OADD<<16 | gc.TINT32, gc.OADD<<16 | gc.TUINT32, gc.OADD<<16 | gc.TPTR32: a = arm.AADD case gc.OADD<<16 | gc.TFLOAT32: a = arm.AADDF case gc.OADD<<16 | gc.TFLOAT64: a = arm.AADDD case gc.OSUB<<16 | gc.TINT8, gc.OSUB<<16 | gc.TUINT8, gc.OSUB<<16 | gc.TINT16, gc.OSUB<<16 | gc.TUINT16, gc.OSUB<<16 | gc.TINT32, gc.OSUB<<16 | gc.TUINT32, gc.OSUB<<16 | gc.TPTR32: a = arm.ASUB case gc.OSUB<<16 | gc.TFLOAT32: a = arm.ASUBF case gc.OSUB<<16 | gc.TFLOAT64: a = arm.ASUBD case gc.OMINUS<<16 | gc.TINT8, gc.OMINUS<<16 | gc.TUINT8, gc.OMINUS<<16 | gc.TINT16, gc.OMINUS<<16 | gc.TUINT16, gc.OMINUS<<16 | gc.TINT32, gc.OMINUS<<16 | gc.TUINT32, gc.OMINUS<<16 | gc.TPTR32: a = arm.ARSB case gc.OAND<<16 | gc.TINT8, gc.OAND<<16 | gc.TUINT8, gc.OAND<<16 | gc.TINT16, gc.OAND<<16 | gc.TUINT16, gc.OAND<<16 | gc.TINT32, gc.OAND<<16 | gc.TUINT32, gc.OAND<<16 | gc.TPTR32: a = arm.AAND case gc.OOR<<16 | gc.TINT8, gc.OOR<<16 | gc.TUINT8, gc.OOR<<16 | gc.TINT16, gc.OOR<<16 | gc.TUINT16, gc.OOR<<16 | gc.TINT32, gc.OOR<<16 | gc.TUINT32, gc.OOR<<16 | gc.TPTR32: a = arm.AORR case gc.OXOR<<16 | gc.TINT8, gc.OXOR<<16 | gc.TUINT8, gc.OXOR<<16 | gc.TINT16, gc.OXOR<<16 | gc.TUINT16, gc.OXOR<<16 | gc.TINT32, gc.OXOR<<16 | gc.TUINT32, gc.OXOR<<16 | gc.TPTR32: a = arm.AEOR case gc.OLSH<<16 | gc.TINT8, gc.OLSH<<16 | gc.TUINT8, gc.OLSH<<16 | gc.TINT16, gc.OLSH<<16 | gc.TUINT16, gc.OLSH<<16 | gc.TINT32, gc.OLSH<<16 | gc.TUINT32, gc.OLSH<<16 | gc.TPTR32: a = arm.ASLL case gc.ORSH<<16 | gc.TUINT8, gc.ORSH<<16 | gc.TUINT16, gc.ORSH<<16 | gc.TUINT32, gc.ORSH<<16 | gc.TPTR32: a = arm.ASRL case gc.ORSH<<16 | gc.TINT8, gc.ORSH<<16 | gc.TINT16, gc.ORSH<<16 | gc.TINT32: a = arm.ASRA case gc.OMUL<<16 | gc.TUINT8, gc.OMUL<<16 | gc.TUINT16, gc.OMUL<<16 | gc.TUINT32, gc.OMUL<<16 | gc.TPTR32: a = arm.AMULU case gc.OMUL<<16 | gc.TINT8, gc.OMUL<<16 | gc.TINT16, gc.OMUL<<16 | gc.TINT32: a = arm.AMUL case gc.OMUL<<16 | gc.TFLOAT32: a = arm.AMULF case gc.OMUL<<16 | gc.TFLOAT64: a = arm.AMULD case gc.ODIV<<16 | gc.TUINT8, gc.ODIV<<16 | gc.TUINT16, gc.ODIV<<16 | gc.TUINT32, gc.ODIV<<16 | gc.TPTR32: a = arm.ADIVU case gc.ODIV<<16 | gc.TINT8, gc.ODIV<<16 | gc.TINT16, gc.ODIV<<16 | gc.TINT32: a = arm.ADIV case gc.OMOD<<16 | gc.TUINT8, gc.OMOD<<16 | gc.TUINT16, gc.OMOD<<16 | gc.TUINT32, gc.OMOD<<16 | gc.TPTR32: a = arm.AMODU case gc.OMOD<<16 | gc.TINT8, gc.OMOD<<16 | gc.TINT16, gc.OMOD<<16 | gc.TINT32: a = arm.AMOD // case CASE(OEXTEND, TINT16): // a = ACWD; // break; // case CASE(OEXTEND, TINT32): // a = ACDQ; // break; // case CASE(OEXTEND, TINT64): // a = ACQO; // break; case gc.ODIV<<16 | gc.TFLOAT32: a = arm.ADIVF case gc.ODIV<<16 | gc.TFLOAT64: a = arm.ADIVD } return a } const ( ODynam = 1 << 0 OPtrto = 1 << 1 ) var clean [20]gc.Node var cleani int = 0 func sudoclean() { if clean[cleani-1].Op != gc.OEMPTY { regfree(&clean[cleani-1]) } if clean[cleani-2].Op != gc.OEMPTY { regfree(&clean[cleani-2]) } cleani -= 2 } func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false } /* * generate code to compute address of n, * a reference to a (perhaps nested) field inside * an array or struct. * return 0 on failure, 1 on success. * on success, leaves usable address in a. * * caller is responsible for calling sudoclean * after successful sudoaddable, * to release the register used for a. */ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool { if n.Type == nil { return false } *a = obj.Addr{} switch n.Op { case gc.OLITERAL: if !gc.Isconst(n, gc.CTINT) { break } v := gc.Mpgetfix(n.Val.U.Xval) if v >= 32000 || v <= -32000 { break } switch as { default: return false case arm.AADD, arm.ASUB, arm.AAND, arm.AORR, arm.AEOR, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVW: break } cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY *a = gc.Naddr(n) return true case gc.ODOT, gc.ODOTPTR: cleani += 2 reg := &clean[cleani-1] reg1 := &clean[cleani-2] reg.Op = gc.OEMPTY reg1.Op = gc.OEMPTY var nn *gc.Node var oary [10]int64 o := gc.Dotoffset(n, oary[:], &nn) if nn == nil { sudoclean() return false } if nn.Addable != 0 && o == 1 && oary[0] >= 0 { // directly addressable set of DOTs n1 := *nn n1.Type = n.Type n1.Xoffset += oary[0] *a = gc.Naddr(&n1) return true } regalloc(reg, gc.Types[gc.Tptr], nil) n1 := *reg n1.Op = gc.OINDREG if oary[0] >= 0 { agen(nn, reg) n1.Xoffset = oary[0] } else { cgen(nn, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[0] + 1) } for i := 1; i < o; i++ { if oary[i] >= 0 { gc.Fatal("can't happen") } gins(arm.AMOVW, &n1, reg) gc.Cgen_checknil(reg) n1.Xoffset = -(oary[i] + 1) } a.Type = obj.TYPE_NONE a.Name = obj.NAME_NONE n1.Type = n.Type *a = gc.Naddr(&n1) return true case gc.OINDEX: return false } return false }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(uint64(nl.Type.Width)) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := uint64(w % 8) // bytes q := uint64(w / 8) // dwords if gc.Reginuse(ppc64.REGRT1) { gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(ppc64.AMOVD, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(ppc64.AMOVDU, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := (*obj.Prog)(p) p = gins(ppc64.ACMP, &dst, &end) gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl) gc.Regfree(&end) // The loop leaves R3 on the last zeroed dword boff = 8 } else if q >= 4 { p := gins(ppc64.ASUB, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := (*gc.Node)(gc.Sysfunc("duffzero")) p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s p.To.Offset = int64(4 * (128 - q)) // duffzero leaves R3 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(ppc64.AMOVD, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(ppc64.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
func nodedump(n *Node, flag int) string { if n == nil { return "" } recur := flag&obj.FmtShort == 0 /*untyped*/ var buf bytes.Buffer if recur { indent(&buf) if dumpdepth > 10 { buf.WriteString("...") return buf.String() } if n.Ninit != nil { fmt.Fprintf(&buf, "%v-init%v", Oconv(int(n.Op), 0), Hconv(n.Ninit, 0)) indent(&buf) } } switch n.Op { default: fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0)) case OREGISTER, OINDREG: fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), obj.Rconv(int(n.Val.U.Reg)), Jconv(n, 0)) case OLITERAL: fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Vconv(&n.Val, 0), Jconv(n, 0)) case ONAME, ONONAME: if n.Sym != nil { fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0)) } else { fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0)) } if recur && n.Type == nil && n.Ntype != nil { indent(&buf) fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0)) } case OASOP: fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0)) case OTYPE: fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0)) if recur && n.Type == nil && n.Ntype != nil { indent(&buf) fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0)) } } if n.Sym != nil && n.Op != ONAME { fmt.Fprintf(&buf, " %v G%d", Sconv(n.Sym, 0), n.Vargen) } if n.Type != nil { fmt.Fprintf(&buf, " %v", Tconv(n.Type, 0)) } if recur { if n.Left != nil { buf.WriteString(Nconv(n.Left, 0)) } if n.Right != nil { buf.WriteString(Nconv(n.Right, 0)) } if n.List != nil { indent(&buf) fmt.Fprintf(&buf, "%v-list%v", Oconv(int(n.Op), 0), Hconv(n.List, 0)) } if n.Rlist != nil { indent(&buf) fmt.Fprintf(&buf, "%v-rlist%v", Oconv(int(n.Op), 0), Hconv(n.Rlist, 0)) } if n.Ntest != nil { indent(&buf) fmt.Fprintf(&buf, "%v-test%v", Oconv(int(n.Op), 0), Nconv(n.Ntest, 0)) } if n.Nbody != nil { indent(&buf) fmt.Fprintf(&buf, "%v-body%v", Oconv(int(n.Op), 0), Hconv(n.Nbody, 0)) } if n.Nelse != nil { indent(&buf) fmt.Fprintf(&buf, "%v-else%v", Oconv(int(n.Op), 0), Hconv(n.Nelse, 0)) } if n.Nincr != nil { indent(&buf) fmt.Fprintf(&buf, "%v-incr%v", Oconv(int(n.Op), 0), Nconv(n.Nincr, 0)) } } return buf.String() }
func exprfmt(n *Node, prec int) string { for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) { n = n.Left } if n == nil { return "<N>" } nprec := opprec[n.Op] if n.Op == OTYPE && n.Sym != nil { nprec = 8 } if prec > nprec { return fmt.Sprintf("(%v)", Nconv(n, 0)) } switch n.Op { case OPAREN: return fmt.Sprintf("(%v)", Nconv(n.Left, 0)) case ODDDARG: return "... argument" case OREGISTER: return obj.Rconv(int(n.Val.U.Reg)) case OLITERAL: // this is a bit of a mess if fmtmode == FErr { if n.Orig != nil && n.Orig != n { return exprfmt(n.Orig, prec) } if n.Sym != nil { return Sconv(n.Sym, 0) } } if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n { return exprfmt(n.Orig, prec) } if n.Type != nil && n.Type != Types[n.Type.Etype] && n.Type != idealbool && n.Type != idealstring { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if Isptr[n.Type.Etype] || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) { return fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0)) } else { return fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0)) } } return Vconv(&n.Val, 0) // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export case ONAME: if fmtmode == FExp && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { return "_" } if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 { return fmt.Sprintf("%v·%d", Sconv(n.Sym, 0), n.Vargen) } // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME { if Isptr[n.Left.Type.Etype] { return fmt.Sprintf("(%v).%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte)) } else { return fmt.Sprintf("%v.%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte)) } } fallthrough //fallthrough case OPACK, ONONAME: return Sconv(n.Sym, 0) case OTYPE: if n.Type == nil && n.Sym != nil { return Sconv(n.Sym, 0) } return Tconv(n.Type, 0) case OTARRAY: if n.Left != nil { return fmt.Sprintf("[]%v", Nconv(n.Left, 0)) } var f string f += fmt.Sprintf("[]%v", Nconv(n.Right, 0)) return f // happens before typecheck case OTMAP: return fmt.Sprintf("map[%v]%v", Nconv(n.Left, 0), Nconv(n.Right, 0)) case OTCHAN: switch n.Etype { case Crecv: return fmt.Sprintf("<-chan %v", Nconv(n.Left, 0)) case Csend: return fmt.Sprintf("chan<- %v", Nconv(n.Left, 0)) default: if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv { return fmt.Sprintf("chan (%v)", Nconv(n.Left, 0)) } else { return fmt.Sprintf("chan %v", Nconv(n.Left, 0)) } } fallthrough case OTSTRUCT: return "<struct>" case OTINTER: return "<inter>" case OTFUNC: return "<func>" case OCLOSURE: if fmtmode == FErr { return "func literal" } if n.Nbody != nil { return fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Nbody, 0)) } return fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Closure.Nbody, 0)) case OCOMPLIT: ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && Isptr[n.Right.Type.Etype] if fmtmode == FErr { if n.Right != nil && n.Right.Type != nil && !n.Implicit { if ptrlit { return fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0)) } else { return fmt.Sprintf("%v literal", Tconv(n.Right.Type, 0)) } } return "composite literal" } if fmtmode == FExp && ptrlit { // typecheck has overwritten OIND by OTYPE with pointer type. return fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma)) } return fmt.Sprintf("(%v{ %v })", Nconv(n.Right, 0), Hconv(n.List, obj.FmtComma)) case OPTRLIT: if fmtmode == FExp && n.Left.Implicit { return Nconv(n.Left, 0) } return fmt.Sprintf("&%v", Nconv(n.Left, 0)) case OSTRUCTLIT: if fmtmode == FExp { // requires special handling of field names var f string if n.Implicit { f += "{" } else { f += fmt.Sprintf("(%v{", Tconv(n.Type, 0)) } for l := n.List; l != nil; l = l.Next { f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(l.N.Right, 0)) if l.Next != nil { f += "," } else { f += " " } } if !n.Implicit { f += "})" return f } f += "}" return f } fallthrough // fallthrough case OARRAYLIT, OMAPLIT: if fmtmode == FErr { return fmt.Sprintf("%v literal", Tconv(n.Type, 0)) } if fmtmode == FExp && n.Implicit { return fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma)) } return fmt.Sprintf("(%v{ %v })", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma)) case OKEY: if n.Left != nil && n.Right != nil { if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD { // requires special handling of field names return fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(n.Right, 0)) } else { return fmt.Sprintf("%v:%v", Nconv(n.Left, 0), Nconv(n.Right, 0)) } } if n.Left == nil && n.Right != nil { return fmt.Sprintf(":%v", Nconv(n.Right, 0)) } if n.Left != nil && n.Right == nil { return fmt.Sprintf("%v:", Nconv(n.Left, 0)) } return ":" case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART: var f string f += exprfmt(n.Left, nprec) if n.Right == nil || n.Right.Sym == nil { f += ".<nil>" return f } f += fmt.Sprintf(".%v", Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte)) return f case ODOTTYPE, ODOTTYPE2: var f string f += exprfmt(n.Left, nprec) if n.Right != nil { f += fmt.Sprintf(".(%v)", Nconv(n.Right, 0)) return f } f += fmt.Sprintf(".(%v)", Tconv(n.Type, 0)) return f case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: var f string f += exprfmt(n.Left, nprec) f += fmt.Sprintf("[%v]", Nconv(n.Right, 0)) return f case OCOPY, OCOMPLEX: return fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0), Nconv(n.Right, 0)) case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR: if n.Type == nil || n.Type.Sym == nil { return fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0)) } if n.Left != nil { return fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0)) } return fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma)) case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN: if n.Left != nil { return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0)) } if n.Isddd { return fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma)) } return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma)) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH: var f string f += exprfmt(n.Left, nprec) if n.Isddd { f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma)) return f } f += fmt.Sprintf("(%v)", Hconv(n.List, obj.FmtComma)) return f case OMAKEMAP, OMAKECHAN, OMAKESLICE: if n.List != nil { // pre-typecheck return fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma)) } if n.Right != nil { return fmt.Sprintf("make(%v, %v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0), Nconv(n.Right, 0)) } if n.Left != nil && (n.Op == OMAKESLICE || !isideal(n.Left.Type)) { return fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0)) } return fmt.Sprintf("make(%v)", Tconv(n.Type, 0)) // Unary case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV: var f string if n.Left.Op == n.Op { f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp)) } else { f += Oconv(int(n.Op), obj.FmtSharp) } f += exprfmt(n.Left, nprec+1) return f // Binary case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT, OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR: var f string f += exprfmt(n.Left, nprec) f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp)) f += exprfmt(n.Right, nprec+1) return f case OADDSTR: var f string for l := n.List; l != nil; l = l.Next { if l != n.List { f += " + " } f += exprfmt(l.N, nprec) } return f case OCMPSTR, OCMPIFACE: var f string f += exprfmt(n.Left, nprec) f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp)) f += exprfmt(n.Right, nprec+1) return f } return fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0)) }
func regopt(firstp *obj.Prog) { mergetemp(firstp) // control flow is more complicated in generated go code // than in generated c code. define pseudo-variables for // registers, so we have complete register usage information. var nreg int regnames := Thearch.Regnames(&nreg) nvar = nreg for i := 0; i < nreg; i++ { vars[i] = Var{} } for i := 0; i < nreg; i++ { if regnodes[i] == nil { regnodes[i] = newname(Lookup(regnames[i])) } vars[i].node = regnodes[i] } regbits = Thearch.Excludedregs() externs = zbits params = zbits consts = zbits addrs = zbits ivar = zbits ovar = zbits // pass 1 // build aux data structure // allocate pcs // find use and set of variables g := Flowstart(firstp, func() interface{} { return new(Reg) }) if g == nil { for i := 0; i < nvar; i++ { vars[i].node.SetOpt(nil) } return } firstf := g.Start for f := firstf; f != nil; f = f.Link { p := f.Prog if p.As == obj.AVARDEF || p.As == obj.AVARKILL { continue } // Avoid making variables for direct-called functions. if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN { continue } // from vs to doesn't matter for registers. r := f.Data.(*Reg) r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex r.set.b[0] |= p.Info.Regset bit := mkvar(f, &p.From) if bany(&bit) { if p.Info.Flags&LeftAddr != 0 { setaddrs(bit) } if p.Info.Flags&LeftRead != 0 { for z := 0; z < BITS; z++ { r.use1.b[z] |= bit.b[z] } } if p.Info.Flags&LeftWrite != 0 { for z := 0; z < BITS; z++ { r.set.b[z] |= bit.b[z] } } } // Compute used register for reg if p.Info.Flags&RegRead != 0 { r.use1.b[0] |= Thearch.RtoB(int(p.Reg)) } // Currently we never generate three register forms. // If we do, this will need to change. if p.From3Type() != obj.TYPE_NONE { Fatalf("regopt not implemented for from3") } bit = mkvar(f, &p.To) if bany(&bit) { if p.Info.Flags&RightAddr != 0 { setaddrs(bit) } if p.Info.Flags&RightRead != 0 { for z := 0; z < BITS; z++ { r.use2.b[z] |= bit.b[z] } } if p.Info.Flags&RightWrite != 0 { for z := 0; z < BITS; z++ { r.set.b[z] |= bit.b[z] } } } } for i := 0; i < nvar; i++ { v := &vars[i] if v.addr != 0 { bit := blsh(uint(i)) for z := 0; z < BITS; z++ { addrs.b[z] |= bit.b[z] } } if Debug['R'] != 0 && Debug['v'] != 0 { fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset) } } if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass1", firstf, 1) } // pass 2 // find looping structure flowrpo(g) if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass2", firstf, 1) } // pass 2.5 // iterate propagating fat vardef covering forward // r->act records vars with a VARDEF since the last CALL. // (r->act will be reused in pass 5 for something else, // but we'll be done with it by then.) active := 0 for f := firstf; f != nil; f = f.Link { f.Active = 0 r := f.Data.(*Reg) r.act = zbits } for f := firstf; f != nil; f = f.Link { p := f.Prog if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt() != nil { active++ walkvardef(p.To.Node.(*Node), f, active) } } // pass 3 // iterate propagating usage // back until flow graph is complete var f1 *Flow var i int var f *Flow loop1: change = 0 for f = firstf; f != nil; f = f.Link { f.Active = 0 } for f = firstf; f != nil; f = f.Link { if f.Prog.As == obj.ARET { prop(f, zbits, zbits) } } // pick up unreachable code loop11: i = 0 for f = firstf; f != nil; f = f1 { f1 = f.Link if f1 != nil && f1.Active != 0 && f.Active == 0 { prop(f, zbits, zbits) i = 1 } } if i != 0 { goto loop11 } if change != 0 { goto loop1 } if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass3", firstf, 1) } // pass 4 // iterate propagating register/variable synchrony // forward until graph is complete loop2: change = 0 for f = firstf; f != nil; f = f.Link { f.Active = 0 } synch(firstf, zbits) if change != 0 { goto loop2 } if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass4", firstf, 1) } // pass 4.5 // move register pseudo-variables into regu. mask := uint64((1 << uint(nreg)) - 1) for f := firstf; f != nil; f = f.Link { r := f.Data.(*Reg) r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask r.set.b[0] &^= mask r.use1.b[0] &^= mask r.use2.b[0] &^= mask r.refbehind.b[0] &^= mask r.refahead.b[0] &^= mask r.calbehind.b[0] &^= mask r.calahead.b[0] &^= mask r.regdiff.b[0] &^= mask r.act.b[0] &^= mask } if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass4.5", firstf, 1) } // pass 5 // isolate regions // calculate costs (paint1) var bit Bits if f := firstf; f != nil { r := f.Data.(*Reg) for z := 0; z < BITS; z++ { bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z]) } if bany(&bit) && !f.Refset { // should never happen - all variables are preset if Debug['w'] != 0 { fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit) } f.Refset = true } } for f := firstf; f != nil; f = f.Link { (f.Data.(*Reg)).act = zbits } nregion = 0 region = region[:0] var rgp *Rgn for f := firstf; f != nil; f = f.Link { r := f.Data.(*Reg) for z := 0; z < BITS; z++ { bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z]) } if bany(&bit) && !f.Refset { if Debug['w'] != 0 { fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit) } f.Refset = true Thearch.Excise(f) } for z := 0; z < BITS; z++ { bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z]) } for bany(&bit) { i = bnum(&bit) change = 0 paint1(f, i) biclr(&bit, uint(i)) if change <= 0 { continue } if nregion >= MaxRgn { nregion++ continue } region = append(region, Rgn{ enter: f, cost: int16(change), varno: int16(i), }) nregion++ } } if false && Debug['v'] != 0 && strings.Contains(Curfn.Func.Nname.Sym.Name, "Parse") { Warn("regions: %d\n", nregion) } if nregion >= MaxRgn { if Debug['v'] != 0 { Warn("too many regions: %d\n", nregion) } nregion = MaxRgn } sort.Sort(rcmp(region[:nregion])) if Debug['R'] != 0 && Debug['v'] != 0 { Dumpit("pass5", firstf, 1) } // pass 6 // determine used registers (paint2) // replace code (paint3) if Debug['R'] != 0 && Debug['v'] != 0 { fmt.Printf("\nregisterizing\n") } var usedreg uint64 var vreg uint64 for i := 0; i < nregion; i++ { rgp = ®ion[i] if Debug['R'] != 0 && Debug['v'] != 0 { fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc) } bit = blsh(uint(rgp.varno)) usedreg = paint2(rgp.enter, int(rgp.varno), 0) vreg = allreg(usedreg, rgp) if rgp.regno != 0 { if Debug['R'] != 0 && Debug['v'] != 0 { v := &vars[rgp.varno] fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg) } paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno)) } } // free aux structures. peep allocates new ones. for i := 0; i < nvar; i++ { vars[i].node.SetOpt(nil) } Flowend(g) firstf = nil if Debug['R'] != 0 && Debug['v'] != 0 { // Rebuild flow graph, since we inserted instructions g := Flowstart(firstp, nil) firstf = g.Start Dumpit("pass6", firstf, 0) Flowend(g) firstf = nil } // pass 7 // peep-hole on basic block if Debug['R'] == 0 || Debug['P'] != 0 { Thearch.Peep(firstp) } // eliminate nops for p := firstp; p != nil; p = p.Link { for p.Link != nil && p.Link.As == obj.ANOP { p.Link = p.Link.Link } if p.To.Type == obj.TYPE_BRANCH { for p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.ANOP { p.To.Val = p.To.Val.(*obj.Prog).Link } } } if Debug['R'] != 0 { if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false { fmt.Printf("\nstats\n") } if Ostats.Ncvtreg != 0 { fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg) } if Ostats.Nspill != 0 { fmt.Printf("\t%4d spill\n", Ostats.Nspill) } if Ostats.Nreload != 0 { fmt.Printf("\t%4d reload\n", Ostats.Nreload) } if Ostats.Ndelmov != 0 { fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov) } if Ostats.Nvar != 0 { fmt.Printf("\t%4d var\n", Ostats.Nvar) } if Ostats.Naddr != 0 { fmt.Printf("\t%4d addr\n", Ostats.Naddr) } Ostats = OptStats{} } }
func clearfat(nl *gc.Node) { /* clear a fat object */ if gc.Debug['g'] != 0 { fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) } w := uint64(nl.Type.Width) // Avoid taking the address for simple enough types. if gc.Componentgen(nil, nl) { return } c := w % 8 // bytes q := w / 8 // dwords if gc.Reginuse(mips.REGRT1) { gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1)) } var r0 gc.Node gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO) var dst gc.Node gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1) gc.Regrealloc(&dst) gc.Agen(nl, &dst) var boff uint64 if q > 128 { p := gins(mips.ASUBV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 var end gc.Node gc.Regalloc(&end, gc.Types[gc.Tptr], nil) p = gins(mips.AMOVV, &dst, &end) p.From.Type = obj.TYPE_ADDR p.From.Offset = int64(q * 8) p = gins(mips.AMOVV, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = 8 pl := p p = gins(mips.AADDV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl) gc.Regfree(&end) // The loop leaves R1 on the last zeroed dword boff = 8 // TODO(dfc): https://golang.org/issue/12108 // If DUFFZERO is used inside a tail call (see genwrapper) it will // overwrite the link register. } else if false && q >= 4 { p := gins(mips.ASUBV, nil, &dst) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 f := gc.Sysfunc("duffzero") p = gins(obj.ADUFFZERO, nil, f) gc.Afunclit(&p.To, f) // 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s p.To.Offset = int64(8 * (128 - q)) // duffzero leaves R1 on the last zeroed dword boff = 8 } else { var p *obj.Prog for t := uint64(0); t < q; t++ { p = gins(mips.AMOVV, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(8 * t) } boff = 8 * q } var p *obj.Prog for t := uint64(0); t < c; t++ { p = gins(mips.AMOVB, &r0, &dst) p.To.Type = obj.TYPE_MEM p.To.Offset = int64(t + boff) } gc.Regfree(&dst) }
func (n *Node) exprfmt(s fmt.State, prec int) { for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) { n = n.Left } if n == nil { fmt.Fprint(s, "<N>") return } nprec := opprec[n.Op] if n.Op == OTYPE && n.Sym != nil { nprec = 8 } if prec > nprec { fmt.Fprintf(s, "(%v)", n) return } switch n.Op { case OPAREN: fmt.Fprintf(s, "(%v)", n.Left) case ODDDARG: fmt.Fprint(s, "... argument") case OREGISTER: fmt.Fprint(s, obj.Rconv(int(n.Reg))) case OLITERAL: // this is a bit of a mess if fmtmode == FErr { if n.Orig != nil && n.Orig != n { n.Orig.exprfmt(s, prec) return } if n.Sym != nil { fmt.Fprint(s, n.Sym.String()) return } } if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n { n.Orig.exprfmt(s, prec) return } if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) { fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Val()) return } else { fmt.Fprintf(s, "%v(%v)", n.Type, n.Val()) return } } fmt.Fprintf(s, "%v", n.Val()) // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export case ONAME: if fmtmode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { fmt.Fprint(s, "_") return } fallthrough case OPACK, ONONAME: fmt.Fprint(s, n.Sym.String()) case OTYPE: if n.Type == nil && n.Sym != nil { fmt.Fprint(s, n.Sym.String()) return } fmt.Fprintf(s, "%v", n.Type) case OTARRAY: if n.Left != nil { fmt.Fprintf(s, "[]%v", n.Left) return } fmt.Fprintf(s, "[]%v", n.Right) // happens before typecheck case OTMAP: fmt.Fprintf(s, "map[%v]%v", n.Left, n.Right) case OTCHAN: switch ChanDir(n.Etype) { case Crecv: fmt.Fprintf(s, "<-chan %v", n.Left) case Csend: fmt.Fprintf(s, "chan<- %v", n.Left) default: if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv { fmt.Fprintf(s, "chan (%v)", n.Left) } else { fmt.Fprintf(s, "chan %v", n.Left) } } case OTSTRUCT: fmt.Fprint(s, "<struct>") case OTINTER: fmt.Fprint(s, "<inter>") case OTFUNC: fmt.Fprint(s, "<func>") case OCLOSURE: if fmtmode == FErr { fmt.Fprint(s, "func literal") return } if n.Nbody.Len() != 0 { fmt.Fprintf(s, "%v { %v }", n.Type, n.Nbody) return } fmt.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody) case OCOMPLIT: ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && n.Right.Type.IsPtr() if fmtmode == FErr { if n.Right != nil && n.Right.Type != nil && !n.Implicit { if ptrlit { fmt.Fprintf(s, "&%v literal", n.Right.Type.Elem()) return } else { fmt.Fprintf(s, "%v literal", n.Right.Type) return } } fmt.Fprint(s, "composite literal") return } fmt.Fprintf(s, "(%v{ %.v })", n.Right, n.List) case OPTRLIT: fmt.Fprintf(s, "&%v", n.Left) case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT: if fmtmode == FErr { fmt.Fprintf(s, "%v literal", n.Type) return } fmt.Fprintf(s, "(%v{ %.v })", n.Type, n.List) case OKEY: if n.Left != nil && n.Right != nil { fmt.Fprintf(s, "%v:%v", n.Left, n.Right) return } if n.Left == nil && n.Right != nil { fmt.Fprintf(s, ":%v", n.Right) return } if n.Left != nil && n.Right == nil { fmt.Fprintf(s, "%v:", n.Left) return } fmt.Fprint(s, ":") case OSTRUCTKEY: fmt.Fprintf(s, "%v:%v", n.Sym, n.Left) case OCALLPART: n.Left.exprfmt(s, nprec) if n.Right == nil || n.Right.Sym == nil { fmt.Fprint(s, ".<nil>") return } fmt.Fprintf(s, ".%0S", n.Right.Sym) case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: n.Left.exprfmt(s, nprec) if n.Sym == nil { fmt.Fprint(s, ".<nil>") return } fmt.Fprintf(s, ".%0S", n.Sym) case ODOTTYPE, ODOTTYPE2: n.Left.exprfmt(s, nprec) if n.Right != nil { fmt.Fprintf(s, ".(%v)", n.Right) return } fmt.Fprintf(s, ".(%v)", n.Type) case OINDEX, OINDEXMAP: n.Left.exprfmt(s, nprec) fmt.Fprintf(s, "[%v]", n.Right) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: n.Left.exprfmt(s, nprec) fmt.Fprint(s, "[") low, high, max := n.SliceBounds() if low != nil { fmt.Fprint(s, low.String()) } fmt.Fprint(s, ":") if high != nil { fmt.Fprint(s, high.String()) } if n.Op.IsSlice3() { fmt.Fprint(s, ":") if max != nil { fmt.Fprint(s, max.String()) } } fmt.Fprint(s, "]") case OCOPY, OCOMPLEX: fmt.Fprintf(s, "%#v(%v, %v)", n.Op, n.Left, n.Right) case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR: if n.Type == nil || n.Type.Sym == nil { fmt.Fprintf(s, "(%v)(%v)", n.Type, n.Left) return } if n.Left != nil { fmt.Fprintf(s, "%v(%v)", n.Type, n.Left) return } fmt.Fprintf(s, "%v(%.v)", n.Type, n.List) case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN: if n.Left != nil { fmt.Fprintf(s, "%#v(%v)", n.Op, n.Left) return } if n.Isddd { fmt.Fprintf(s, "%#v(%.v...)", n.Op, n.List) return } fmt.Fprintf(s, "%#v(%.v)", n.Op, n.List) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: n.Left.exprfmt(s, nprec) if n.Isddd { fmt.Fprintf(s, "(%.v...)", n.List) return } fmt.Fprintf(s, "(%.v)", n.List) case OMAKEMAP, OMAKECHAN, OMAKESLICE: if n.List.Len() != 0 { // pre-typecheck fmt.Fprintf(s, "make(%v, %.v)", n.Type, n.List) return } if n.Right != nil { fmt.Fprintf(s, "make(%v, %v, %v)", n.Type, n.Left, n.Right) return } if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) { fmt.Fprintf(s, "make(%v, %v)", n.Type, n.Left) return } fmt.Fprintf(s, "make(%v)", n.Type) // Unary case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV: fmt.Fprint(s, n.Op.GoString()) // %#v if n.Left.Op == n.Op { fmt.Fprint(s, " ") } n.Left.exprfmt(s, nprec+1) // Binary case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT, OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR: n.Left.exprfmt(s, nprec) fmt.Fprintf(s, " %#v ", n.Op) n.Right.exprfmt(s, nprec+1) case OADDSTR: i := 0 for _, n1 := range n.List.Slice() { if i != 0 { fmt.Fprint(s, " + ") } n1.exprfmt(s, nprec) i++ } case OCMPSTR, OCMPIFACE: n.Left.exprfmt(s, nprec) // TODO(marvin): Fix Node.EType type union. fmt.Fprintf(s, " %#v ", Op(n.Etype)) n.Right.exprfmt(s, nprec+1) default: fmt.Fprintf(s, "<node %v>", n.Op) } }
func (n *Node) nodedump(s fmt.State, flag FmtFlag) { if n == nil { return } recur := flag&FmtShort == 0 if recur { indent(s) if dumpdepth > 10 { fmt.Fprint(s, "...") return } if n.Ninit.Len() != 0 { fmt.Fprintf(s, "%v-init%v", n.Op, n.Ninit) indent(s) } } switch n.Op { default: fmt.Fprintf(s, "%v%j", n.Op, n) case OREGISTER, OINDREG: fmt.Fprintf(s, "%v-%v%j", n.Op, obj.Rconv(int(n.Reg)), n) case OLITERAL: fmt.Fprintf(s, "%v-%v%j", n.Op, n.Val(), n) case ONAME, ONONAME: if n.Sym != nil { fmt.Fprintf(s, "%v-%v%j", n.Op, n.Sym, n) } else { fmt.Fprintf(s, "%v%j", n.Op, n) } if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { indent(s) fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } case OASOP: fmt.Fprintf(s, "%v-%v%j", n.Op, Op(n.Etype), n) case OTYPE: fmt.Fprintf(s, "%v %v%j type=%v", n.Op, n.Sym, n, n.Type) if recur && n.Type == nil && n.Name.Param.Ntype != nil { indent(s) fmt.Fprintf(s, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } } if n.Sym != nil && n.Op != ONAME { fmt.Fprintf(s, " %v", n.Sym) } if n.Type != nil { fmt.Fprintf(s, " %v", n.Type) } if recur { if n.Left != nil { fmt.Fprintf(s, "%v", n.Left) } if n.Right != nil { fmt.Fprintf(s, "%v", n.Right) } if n.List.Len() != 0 { indent(s) fmt.Fprintf(s, "%v-list%v", n.Op, n.List) } if n.Rlist.Len() != 0 { indent(s) fmt.Fprintf(s, "%v-rlist%v", n.Op, n.Rlist) } if n.Nbody.Len() != 0 { indent(s) fmt.Fprintf(s, "%v-body%v", n.Op, n.Nbody) } } }
func anyregalloc() bool { var j int for i := x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto ok } for j = 0; j < len(resvd); j++ { if resvd[j] == i { goto ok } } return true ok: } for i := x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] != 0 { return true } } return false } /* * allocate register of type t, leave in n. * if o != N, o is desired fixed register. * caller must regfree(n). */ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) { if t == nil { gc.Fatal("regalloc: t nil") } et := int(gc.Simtype[t.Etype]) var i int switch et { case gc.TINT64, gc.TUINT64: gc.Fatal("regalloc64") case gc.TINT8, gc.TUINT8, gc.TINT16, gc.TUINT16, gc.TINT32, gc.TUINT32, gc.TPTR32, gc.TPTR64, gc.TBOOL: if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_AX && i <= x86.REG_DI { goto out } } for i = x86.REG_AX; i <= x86.REG_DI; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_AX; i <= x86.REG_DI; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of fixed registers") goto err case gc.TFLOAT32, gc.TFLOAT64: if gc.Use_sse == 0 { i = x86.REG_F0 goto out } if o != nil && o.Op == gc.OREGISTER { i = int(o.Val.U.Reg) if i >= x86.REG_X0 && i <= x86.REG_X7 { goto out } } for i = x86.REG_X0; i <= x86.REG_X7; i++ { if reg[i] == 0 { goto out } } fmt.Printf("registers allocated at\n") for i := x86.REG_X0; i <= x86.REG_X7; i++ { fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i]) } gc.Fatal("out of floating registers") } gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0)) err: gc.Nodreg(n, t, 0) return out: if i == x86.REG_SP { fmt.Printf("alloc SP\n") } if reg[i] == 0 { regpc[i] = uint32(obj.Getcallerpc(&n)) if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP { gc.Dump("regalloc-o", o) gc.Fatal("regalloc %v", obj.Rconv(i)) } } reg[i]++ gc.Nodreg(n, t, i) } func regfree(n *gc.Node) { if n.Op == gc.ONAME { return } if n.Op != gc.OREGISTER && n.Op != gc.OINDREG { gc.Fatal("regfree: not a register") } i := int(n.Val.U.Reg) if i == x86.REG_SP { return } if i < 0 || i >= len(reg) { gc.Fatal("regfree: reg out of range") } if reg[i] <= 0 { gc.Fatal("regfree: reg not allocated") } reg[i]-- if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) { gc.Fatal("regfree %v", obj.Rconv(i)) } } /* * generate * as $c, reg */ func gconreg(as int, c int64, reg int) { var n1 gc.Node var n2 gc.Node gc.Nodconst(&n1, gc.Types[gc.TINT64], c) gc.Nodreg(&n2, gc.Types[gc.TINT64], reg) gins(as, &n1, &n2) } /* * swap node contents */ func nswap(a *gc.Node, b *gc.Node) { t := *a *a = *b *b = t } /* * return constant i node. * overwritten by next call, but useful in calls to gins. */ var ncon_n gc.Node func ncon(i uint32) *gc.Node { if ncon_n.Type == nil { gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0) } gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i)) return &ncon_n } var sclean [10]gc.Node var nsclean int /* * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves. */ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) { if !gc.Is64(n.Type) { gc.Fatal("split64 %v", gc.Tconv(n.Type, 0)) } if nsclean >= len(sclean) { gc.Fatal("split64 clean") } sclean[nsclean].Op = gc.OEMPTY nsclean++ switch n.Op { default: switch n.Op { default: var n1 gc.Node if !dotaddable(n, &n1) { igen(n, &n1, nil) sclean[nsclean-1] = n1 } n = &n1 case gc.ONAME: if n.Class == gc.PPARAMREF { var n1 gc.Node cgen(n.Heapaddr, &n1) sclean[nsclean-1] = n1 n = &n1 } // nothing case gc.OINDREG: break } *lo = *n *hi = *n lo.Type = gc.Types[gc.TUINT32] if n.Type.Etype == gc.TINT64 { hi.Type = gc.Types[gc.TINT32] } else { hi.Type = gc.Types[gc.TUINT32] } hi.Xoffset += 4 case gc.OLITERAL: var n1 gc.Node gc.Convconst(&n1, n.Type, &n.Val) i := gc.Mpgetfix(n1.Val.U.Xval) gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i))) i >>= 32 if n.Type.Etype == gc.TINT64 { gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i))) } else { gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i))) } } } func splitclean() { if nsclean <= 0 { gc.Fatal("splitclean") } nsclean-- if sclean[nsclean].Op != gc.OEMPTY { regfree(&sclean[nsclean]) } } /* * set up nodes representing fp constants */ var zerof gc.Node var two64f gc.Node var two63f gc.Node var bignodes_did int func bignodes() { if bignodes_did != 0 { return } bignodes_did = 1 two64f = *ncon(0) two64f.Type = gc.Types[gc.TFLOAT64] two64f.Val.Ctype = gc.CTFLT two64f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.) two63f = two64f two63f.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.) zerof = two64f zerof.Val.U.Fval = new(gc.Mpflt) gc.Mpmovecflt(zerof.Val.U.Fval, 0) } func memname(n *gc.Node, t *gc.Type) { gc.Tempname(n, t) n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing n.Orig.Sym = n.Sym } func gmove(f *gc.Node, t *gc.Node) { if gc.Debug['M'] != 0 { fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0)) } ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type if gc.Iscomplex[ft] || gc.Iscomplex[tt] { gc.Complexmove(f, t) return } if gc.Isfloat[ft] || gc.Isfloat[tt] { floatmove(f, t) return } // cannot have two integer memory operands; // except 64-bit, which always copies via registers anyway. var r1 gc.Node var a int if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: // should not happen gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return /* * integer copy and truncate */ case gc.TINT8<<16 | gc.TINT8, // same size gc.TINT8<<16 | gc.TUINT8, gc.TUINT8<<16 | gc.TINT8, gc.TUINT8<<16 | gc.TUINT8: a = x86.AMOVB case gc.TINT16<<16 | gc.TINT8, // truncate gc.TUINT16<<16 | gc.TINT8, gc.TINT32<<16 | gc.TINT8, gc.TUINT32<<16 | gc.TINT8, gc.TINT16<<16 | gc.TUINT8, gc.TUINT16<<16 | gc.TUINT8, gc.TINT32<<16 | gc.TUINT8, gc.TUINT32<<16 | gc.TUINT8: a = x86.AMOVB goto rsrc case gc.TINT64<<16 | gc.TINT8, // truncate low word gc.TUINT64<<16 | gc.TINT8, gc.TINT64<<16 | gc.TUINT8, gc.TUINT64<<16 | gc.TUINT8: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVB, &r1, t) splitclean() return case gc.TINT16<<16 | gc.TINT16, // same size gc.TINT16<<16 | gc.TUINT16, gc.TUINT16<<16 | gc.TINT16, gc.TUINT16<<16 | gc.TUINT16: a = x86.AMOVW case gc.TINT32<<16 | gc.TINT16, // truncate gc.TUINT32<<16 | gc.TINT16, gc.TINT32<<16 | gc.TUINT16, gc.TUINT32<<16 | gc.TUINT16: a = x86.AMOVW goto rsrc case gc.TINT64<<16 | gc.TINT16, // truncate low word gc.TUINT64<<16 | gc.TINT16, gc.TINT64<<16 | gc.TUINT16, gc.TUINT64<<16 | gc.TUINT16: var flo gc.Node var fhi gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVW, &r1, t) splitclean() return case gc.TINT32<<16 | gc.TINT32, // same size gc.TINT32<<16 | gc.TUINT32, gc.TUINT32<<16 | gc.TINT32, gc.TUINT32<<16 | gc.TUINT32: a = x86.AMOVL case gc.TINT64<<16 | gc.TINT32, // truncate gc.TUINT64<<16 | gc.TINT32, gc.TINT64<<16 | gc.TUINT32, gc.TUINT64<<16 | gc.TUINT32: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var r1 gc.Node gc.Nodreg(&r1, t.Type, x86.REG_AX) gmove(&flo, &r1) gins(x86.AMOVL, &r1, t) splitclean() return case gc.TINT64<<16 | gc.TINT64, // same size gc.TINT64<<16 | gc.TUINT64, gc.TUINT64<<16 | gc.TINT64, gc.TUINT64<<16 | gc.TUINT64: var fhi gc.Node var flo gc.Node split64(f, &flo, &fhi) var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) if f.Op == gc.OLITERAL { gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) } else { var r1 gc.Node gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) var r2 gc.Node gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX) gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &fhi, &r2) gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r2, &thi) } splitclean() splitclean() return /* * integer up-conversions */ case gc.TINT8<<16 | gc.TINT16, // sign extend int8 gc.TINT8<<16 | gc.TUINT16: a = x86.AMOVBWSX goto rdst case gc.TINT8<<16 | gc.TINT32, gc.TINT8<<16 | gc.TUINT32: a = x86.AMOVBLSX goto rdst case gc.TINT8<<16 | gc.TINT64, // convert via int32 gc.TINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 gc.TUINT8<<16 | gc.TUINT16: a = x86.AMOVBWZX goto rdst case gc.TUINT8<<16 | gc.TINT32, gc.TUINT8<<16 | gc.TUINT32: a = x86.AMOVBLZX goto rdst case gc.TUINT8<<16 | gc.TINT64, // convert via uint32 gc.TUINT8<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT16<<16 | gc.TINT32, // sign extend int16 gc.TINT16<<16 | gc.TUINT32: a = x86.AMOVWLSX goto rdst case gc.TINT16<<16 | gc.TINT64, // convert via int32 gc.TINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TINT32] goto hard case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 gc.TUINT16<<16 | gc.TUINT32: a = x86.AMOVWLZX goto rdst case gc.TUINT16<<16 | gc.TINT64, // convert via uint32 gc.TUINT16<<16 | gc.TUINT64: cvt = gc.Types[gc.TUINT32] goto hard case gc.TINT32<<16 | gc.TINT64, // sign extend int32 gc.TINT32<<16 | gc.TUINT64: var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) var flo gc.Node gc.Nodreg(&flo, tlo.Type, x86.REG_AX) var fhi gc.Node gc.Nodreg(&fhi, thi.Type, x86.REG_DX) gmove(f, &flo) gins(x86.ACDQ, nil, nil) gins(x86.AMOVL, &flo, &tlo) gins(x86.AMOVL, &fhi, &thi) splitclean() return case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 gc.TUINT32<<16 | gc.TUINT64: var tlo gc.Node var thi gc.Node split64(t, &tlo, &thi) gmove(f, &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() return } gins(a, f, t) return // requires register source rsrc: regalloc(&r1, f.Type, t) gmove(f, &r1) gins(a, &r1, t) regfree(&r1) return // requires register destination rdst: { regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return } func floatmove(f *gc.Node, t *gc.Node) { var r1 gc.Node ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type // cannot have two floating point memory operands. if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) { goto hard } // convert constant to desired type if f.Op == gc.OLITERAL { var con gc.Node gc.Convconst(&con, t.Type, &f.Val) f = &con ft = gc.Simsimtype(con.Type) // some constants can't move directly to memory. if gc.Ismem(t) { // float constants come from memory. if gc.Isfloat[tt] { goto hard } } } // value -> value copy, only one memory operand. // figure out the instruction to use. // break out of switch for one-instruction gins. // goto rdst for "destination must be register". // goto hard for "convert to cvt type first". // otherwise handle and return. switch uint32(ft)<<16 | uint32(tt) { default: if gc.Use_sse != 0 { floatmove_sse(f, t) } else { floatmove_387(f, t) } return // float to very long integer. case gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT64: if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return case gc.TFLOAT32<<16 | gc.TUINT64, gc.TFLOAT64<<16 | gc.TUINT64: if !gc.Ismem(f) { cvt = f.Type goto hardmem } bignodes() var f0 gc.Node gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0) var f1 gc.Node gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1) var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX) if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &f0) } else { gins(x86.AFMOVD, f, &f0) } // if 0 > v { answer = 0 } gins(x86.AFMOVD, &zerof, &f0) gins(x86.AFUCOMIP, &f0, &f1) p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) // if 1<<64 <= v { answer = 0 too } gins(x86.AFMOVD, &two64f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0) gc.Patch(p1, gc.Pc) gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack var thi gc.Node var tlo gc.Node split64(t, &tlo, &thi) gins(x86.AMOVL, ncon(0), &tlo) gins(x86.AMOVL, ncon(0), &thi) splitclean() p1 = gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) // in range; algorithm is: // if small enough, use native float64 -> int64 conversion. // otherwise, subtract 2^63, convert, and add it back. // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) // actual work gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFUCOMIP, &f0, &f1) p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0) gins(x86.AFMOVVP, &f0, t) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p2, gc.Pc) gins(x86.AFMOVD, &two63f, &f0) gins(x86.AFSUBDP, &f0, &f1) gins(x86.AFMOVVP, &f0, t) split64(t, &tlo, &thi) gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63 gc.Patch(p3, gc.Pc) splitclean() // restore rounding mode gins(x86.AFLDCW, &t1, nil) gc.Patch(p1, gc.Pc) return /* * integer to float */ case gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op == gc.OREGISTER { goto hardmem } var f0 gc.Node gc.Nodreg(&f0, t.Type, x86.REG_F0) gins(x86.AFMOVV, f, &f0) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &f0, t) } else { gins(x86.AFMOVDP, &f0, t) } return // algorithm is: // if small enough, use native int64 -> float64 conversion. // otherwise, halve (rounding to odd?), convert, and double. case gc.TUINT64<<16 | gc.TFLOAT32, gc.TUINT64<<16 | gc.TFLOAT64: var ax gc.Node gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX) var dx gc.Node gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX) var cx gc.Node gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX) var t1 gc.Node gc.Tempname(&t1, f.Type) var tlo gc.Node var thi gc.Node split64(&t1, &tlo, &thi) gmove(f, &t1) gins(x86.ACMPL, &thi, ncon(0)) p1 := gc.Gbranch(x86.AJLT, nil, 0) // native var r1 gc.Node gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) gins(x86.AFMOVV, &t1, &r1) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } p2 := gc.Gbranch(obj.AJMP, nil, 0) // simulated gc.Patch(p1, gc.Pc) gmove(&tlo, &ax) gmove(&thi, &dx) p1 = gins(x86.ASHRL, ncon(1), &ax) p1.From.Index = x86.REG_DX // double-width shift DX -> AX p1.From.Scale = 0 gins(x86.AMOVL, ncon(0), &cx) gins(x86.ASETCC, nil, &cx) gins(x86.AORL, &cx, &ax) gins(x86.ASHRL, ncon(1), &dx) gmove(&dx, &thi) gmove(&ax, &tlo) gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0) var r2 gc.Node gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1) gins(x86.AFMOVV, &t1, &r1) gins(x86.AFMOVD, &r1, &r1) gins(x86.AFADDDP, &r1, &r2) if tt == gc.TFLOAT32 { gins(x86.AFMOVFP, &r1, t) } else { gins(x86.AFMOVDP, &r1, t) } gc.Patch(p2, gc.Pc) splitclean() return } // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return } func floatmove_387(f *gc.Node, t *gc.Node) { var r1 gc.Node var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) cvt := t.Type switch uint32(ft)<<16 | uint32(tt) { default: goto fatal /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT32, gc.TFLOAT32<<16 | gc.TINT64, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT32, gc.TFLOAT64<<16 | gc.TINT64: if t.Op == gc.OREGISTER { goto hardmem } var r1 gc.Node gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0) if f.Op != gc.OREGISTER { if ft == gc.TFLOAT32 { gins(x86.AFMOVF, f, &r1) } else { gins(x86.AFMOVD, f, &r1) } } // set round to zero mode during conversion var t1 gc.Node memname(&t1, gc.Types[gc.TUINT16]) var t2 gc.Node memname(&t2, gc.Types[gc.TUINT16]) gins(x86.AFSTCW, nil, &t1) gins(x86.AMOVW, ncon(0xf7f), &t2) gins(x86.AFLDCW, &t2, nil) if tt == gc.TINT16 { gins(x86.AFMOVWP, &r1, t) } else if tt == gc.TINT32 { gins(x86.AFMOVLP, &r1, t) } else { gins(x86.AFMOVVP, &r1, t) } gins(x86.AFLDCW, &t1, nil) return // convert via int32. case gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: var t1 gc.Node gc.Tempname(&t1, gc.Types[gc.TINT32]) gmove(f, &t1) switch tt { default: gc.Fatal("gmove %v", gc.Nconv(t, 0)) case gc.TINT8: gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1))) p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1) gins(x86.ACMPL, &t1, ncon(0x7f)) p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1) p3 := gc.Gbranch(obj.AJMP, nil, 0) gc.Patch(p1, gc.Pc) gc.Patch(p2, gc.Pc) gmove(ncon(-0x80&(1<<32-1)), &t1) gc.Patch(p3, gc.Pc) gmove(&t1, t) case gc.TUINT8: gins(x86.ATESTL, ncon(0xffffff00), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) case gc.TUINT16: gins(x86.ATESTL, ncon(0xffff0000), &t1) p1 := gc.Gbranch(x86.AJEQ, nil, +1) gins(x86.AMOVL, ncon(0), &t1) gc.Patch(p1, gc.Pc) gmove(&t1, t) } return // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem /* * integer to float */ case gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TINT32<<16 | gc.TFLOAT32, gc.TINT32<<16 | gc.TFLOAT64, gc.TINT64<<16 | gc.TFLOAT32, gc.TINT64<<16 | gc.TFLOAT64: if t.Op != gc.OREGISTER { goto hard } if f.Op == gc.OREGISTER { cvt = f.Type goto hardmem } switch ft { case gc.TINT16: a = x86.AFMOVW case gc.TINT32: a = x86.AFMOVL default: a = x86.AFMOVV } // convert via int32 memory case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hardmem // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem // The way the code generator uses floating-point // registers, a move from F0 to F0 is intended as a no-op. // On the x86, it's not: it pushes a second copy of F0 // on the floating point stack. So toss it away here. // Also, F0 is the *only* register we ever evaluate // into, so we should only see register/register as F0/F0. /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32, gc.TFLOAT64<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } a = x86.AFMOVF if ft == gc.TFLOAT64 { a = x86.AFMOVD } if gc.Ismem(t) { if f.Op != gc.OREGISTER || f.Val.U.Reg != x86.REG_F0 { gc.Fatal("gmove %v", gc.Nconv(f, 0)) } a = x86.AFMOVFP if ft == gc.TFLOAT64 { a = x86.AFMOVDP } } case gc.TFLOAT32<<16 | gc.TFLOAT64: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 { goto fatal } return } if f.Op == gc.OREGISTER { gins(x86.AFMOVDP, f, t) } else { gins(x86.AFMOVF, f, t) } return case gc.TFLOAT64<<16 | gc.TFLOAT32: if gc.Ismem(f) && gc.Ismem(t) { goto hard } if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER { var r1 gc.Node gc.Tempname(&r1, gc.Types[gc.TFLOAT32]) gins(x86.AFMOVFP, f, &r1) gins(x86.AFMOVF, &r1, t) return } if f.Op == gc.OREGISTER { gins(x86.AFMOVFP, f, t) } else { gins(x86.AFMOVD, f, t) } return } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // should not happen fatal: gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) return } func floatmove_sse(f *gc.Node, t *gc.Node) { var r1 gc.Node var cvt *gc.Type var a int ft := gc.Simsimtype(f.Type) tt := gc.Simsimtype(t.Type) switch uint32(ft)<<16 | uint32(tt) { // should not happen default: gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0)) return // convert via int32. /* * float to integer */ case gc.TFLOAT32<<16 | gc.TINT16, gc.TFLOAT32<<16 | gc.TINT8, gc.TFLOAT32<<16 | gc.TUINT16, gc.TFLOAT32<<16 | gc.TUINT8, gc.TFLOAT64<<16 | gc.TINT16, gc.TFLOAT64<<16 | gc.TINT8, gc.TFLOAT64<<16 | gc.TUINT16, gc.TFLOAT64<<16 | gc.TUINT8: cvt = gc.Types[gc.TINT32] goto hard // convert via int64. case gc.TFLOAT32<<16 | gc.TUINT32, gc.TFLOAT64<<16 | gc.TUINT32: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TFLOAT32<<16 | gc.TINT32: a = x86.ACVTTSS2SL goto rdst case gc.TFLOAT64<<16 | gc.TINT32: a = x86.ACVTTSD2SL goto rdst // convert via int32 memory /* * integer to float */ case gc.TINT8<<16 | gc.TFLOAT32, gc.TINT8<<16 | gc.TFLOAT64, gc.TINT16<<16 | gc.TFLOAT32, gc.TINT16<<16 | gc.TFLOAT64, gc.TUINT16<<16 | gc.TFLOAT32, gc.TUINT16<<16 | gc.TFLOAT64, gc.TUINT8<<16 | gc.TFLOAT32, gc.TUINT8<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT32] goto hard // convert via int64 memory case gc.TUINT32<<16 | gc.TFLOAT32, gc.TUINT32<<16 | gc.TFLOAT64: cvt = gc.Types[gc.TINT64] goto hardmem case gc.TINT32<<16 | gc.TFLOAT32: a = x86.ACVTSL2SS goto rdst case gc.TINT32<<16 | gc.TFLOAT64: a = x86.ACVTSL2SD goto rdst /* * float to float */ case gc.TFLOAT32<<16 | gc.TFLOAT32: a = x86.AMOVSS case gc.TFLOAT64<<16 | gc.TFLOAT64: a = x86.AMOVSD case gc.TFLOAT32<<16 | gc.TFLOAT64: a = x86.ACVTSS2SD goto rdst case gc.TFLOAT64<<16 | gc.TFLOAT32: a = x86.ACVTSD2SS goto rdst } gins(a, f, t) return // requires register intermediate hard: regalloc(&r1, cvt, t) gmove(f, &r1) gmove(&r1, t) regfree(&r1) return // requires memory intermediate hardmem: gc.Tempname(&r1, cvt) gmove(f, &r1) gmove(&r1, t) return // requires register destination rdst: regalloc(&r1, t.Type, t) gins(a, f, &r1) gmove(&r1, t) regfree(&r1) return } func samaddr(f *gc.Node, t *gc.Node) bool { if f.Op != t.Op { return false } switch f.Op { case gc.OREGISTER: if f.Val.U.Reg != t.Val.U.Reg { break } return true } return false } /* * generate one instruction: * as f, t */ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER { gc.Fatal("gins MOVF reg, reg") } if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL { gc.Fatal("gins CVTSD2SS const") } if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == x86.REG_F0 { gc.Fatal("gins MOVSD into F0") } switch as { case x86.AMOVB, x86.AMOVW, x86.AMOVL: if f != nil && t != nil && samaddr(f, t) { return nil } case x86.ALEAL: if f != nil && gc.Isconst(f, gc.CTNIL) { gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0)) } } var af obj.Addr var at obj.Addr if f != nil { af = gc.Naddr(f) } if t != nil { at = gc.Naddr(t) } p := gc.Prog(as) if f != nil { p.From = af } if t != nil { p.To = at } if gc.Debug['g'] != 0 { fmt.Printf("%v\n", p) } w := 0 switch as { case x86.AMOVB: w = 1 case x86.AMOVW: w = 2 case x86.AMOVL: w = 4 } if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) { gc.Dump("bad width from:", f) gc.Dump("bad width to:", t) gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width) } if p.To.Type == obj.TYPE_ADDR && w > 0 { gc.Fatal("bad use of addr: %v", p) } return p } func dotaddable(n *gc.Node, n1 *gc.Node) bool { if n.Op != gc.ODOT { return false } var oary [10]int64 var nn *gc.Node o := gc.Dotoffset(n, oary[:], &nn) if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 { *n1 = *nn n1.Type = n.Type n1.Xoffset += oary[0] return true } return false } func sudoclean() { } func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { *a = obj.Addr{} return false }
func exprfmt(n *Node, prec int) string { for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) { n = n.Left } if n == nil { return "<N>" } nprec := opprec[n.Op] if n.Op == OTYPE && n.Sym != nil { nprec = 8 } if prec > nprec { return fmt.Sprintf("(%v)", n) } switch n.Op { case OPAREN: return fmt.Sprintf("(%v)", n.Left) case ODDDARG: return "... argument" case OREGISTER: return obj.Rconv(int(n.Reg)) case OLITERAL: // this is a bit of a mess if fmtmode == FErr { if n.Orig != nil && n.Orig != n { return exprfmt(n.Orig, prec) } if n.Sym != nil { return sconv(n.Sym, 0) } } if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n { return exprfmt(n.Orig, prec) } if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == Crecv) { return fmt.Sprintf("(%v)(%v)", n.Type, vconv(n.Val(), 0)) } else { return fmt.Sprintf("%v(%v)", n.Type, vconv(n.Val(), 0)) } } return vconv(n.Val(), 0) // Special case: name used as local variable in export. // _ becomes ~b%d internally; print as _ for export case ONAME: if fmtmode == FErr && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { return "_" } fallthrough case OPACK, ONONAME: return sconv(n.Sym, 0) case OTYPE: if n.Type == nil && n.Sym != nil { return sconv(n.Sym, 0) } return Tconv(n.Type, 0) case OTARRAY: if n.Left != nil { return fmt.Sprintf("[]%v", n.Left) } return fmt.Sprintf("[]%v", n.Right) // happens before typecheck case OTMAP: return fmt.Sprintf("map[%v]%v", n.Left, n.Right) case OTCHAN: switch ChanDir(n.Etype) { case Crecv: return fmt.Sprintf("<-chan %v", n.Left) case Csend: return fmt.Sprintf("chan<- %v", n.Left) default: if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && ChanDir(n.Left.Etype) == Crecv { return fmt.Sprintf("chan (%v)", n.Left) } else { return fmt.Sprintf("chan %v", n.Left) } } case OTSTRUCT: return "<struct>" case OTINTER: return "<inter>" case OTFUNC: return "<func>" case OCLOSURE: if fmtmode == FErr { return "func literal" } if n.Nbody.Len() != 0 { return fmt.Sprintf("%v { %v }", n.Type, n.Nbody) } return fmt.Sprintf("%v { %v }", n.Type, n.Func.Closure.Nbody) case OCOMPLIT: ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && n.Right.Type.IsPtr() if fmtmode == FErr { if n.Right != nil && n.Right.Type != nil && !n.Implicit { if ptrlit { return fmt.Sprintf("&%v literal", n.Right.Type.Elem()) } else { return fmt.Sprintf("%v literal", n.Right.Type) } } return "composite literal" } return fmt.Sprintf("(%v{ %v })", n.Right, hconv(n.List, FmtComma)) case OPTRLIT: return fmt.Sprintf("&%v", n.Left) case OSTRUCTLIT, OARRAYLIT, OMAPLIT: if fmtmode == FErr { return fmt.Sprintf("%v literal", n.Type) } return fmt.Sprintf("(%v{ %v })", n.Type, hconv(n.List, FmtComma)) case OKEY: if n.Left != nil && n.Right != nil { return fmt.Sprintf("%v:%v", n.Left, n.Right) } if n.Left == nil && n.Right != nil { return fmt.Sprintf(":%v", n.Right) } if n.Left != nil && n.Right == nil { return fmt.Sprintf("%v:", n.Left) } return ":" case OCALLPART: var f string f += exprfmt(n.Left, nprec) if n.Right == nil || n.Right.Sym == nil { f += ".<nil>" return f } f += fmt.Sprintf(".%v", sconv(n.Right.Sym, FmtShort|FmtByte)) return f case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH: var f string f += exprfmt(n.Left, nprec) if n.Sym == nil { f += ".<nil>" return f } f += fmt.Sprintf(".%v", sconv(n.Sym, FmtShort|FmtByte)) return f case ODOTTYPE, ODOTTYPE2: var f string f += exprfmt(n.Left, nprec) if n.Right != nil { f += fmt.Sprintf(".(%v)", n.Right) return f } f += fmt.Sprintf(".(%v)", n.Type) return f case OINDEX, OINDEXMAP: return fmt.Sprintf("%s[%v]", exprfmt(n.Left, nprec), n.Right) case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: var buf bytes.Buffer buf.WriteString(exprfmt(n.Left, nprec)) buf.WriteString("[") low, high, max := n.SliceBounds() if low != nil { buf.WriteString(low.String()) } buf.WriteString(":") if high != nil { buf.WriteString(high.String()) } if n.Op.IsSlice3() { buf.WriteString(":") if max != nil { buf.WriteString(max.String()) } } buf.WriteString("]") return buf.String() case OCOPY, OCOMPLEX: return fmt.Sprintf("%#v(%v, %v)", n.Op, n.Left, n.Right) case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR: if n.Type == nil || n.Type.Sym == nil { return fmt.Sprintf("(%v)(%v)", n.Type, n.Left) } if n.Left != nil { return fmt.Sprintf("%v(%v)", n.Type, n.Left) } return fmt.Sprintf("%v(%v)", n.Type, hconv(n.List, FmtComma)) case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN: if n.Left != nil { return fmt.Sprintf("%#v(%v)", n.Op, n.Left) } if n.Isddd { return fmt.Sprintf("%#v(%v...)", n.Op, hconv(n.List, FmtComma)) } return fmt.Sprintf("%#v(%v)", n.Op, hconv(n.List, FmtComma)) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: var f string f += exprfmt(n.Left, nprec) if n.Isddd { f += fmt.Sprintf("(%v...)", hconv(n.List, FmtComma)) return f } f += fmt.Sprintf("(%v)", hconv(n.List, FmtComma)) return f case OMAKEMAP, OMAKECHAN, OMAKESLICE: if n.List.Len() != 0 { // pre-typecheck return fmt.Sprintf("make(%v, %v)", n.Type, hconv(n.List, FmtComma)) } if n.Right != nil { return fmt.Sprintf("make(%v, %v, %v)", n.Type, n.Left, n.Right) } if n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()) { return fmt.Sprintf("make(%v, %v)", n.Type, n.Left) } return fmt.Sprintf("make(%v)", n.Type) // Unary case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV: f := n.Op.GoString() // %#v if n.Left.Op == n.Op { f += " " } f += exprfmt(n.Left, nprec+1) return f // Binary case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT, OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR: var f string f += exprfmt(n.Left, nprec) f += fmt.Sprintf(" %#v ", n.Op) f += exprfmt(n.Right, nprec+1) return f case OADDSTR: var f string i := 0 for _, n1 := range n.List.Slice() { if i != 0 { f += " + " } f += exprfmt(n1, nprec) i++ } return f case OCMPSTR, OCMPIFACE: var f string f += exprfmt(n.Left, nprec) // TODO(marvin): Fix Node.EType type union. f += fmt.Sprintf(" %#v ", Op(n.Etype)) f += exprfmt(n.Right, nprec+1) return f case ODCLCONST: // if exporting, DCLCONST should just be removed as its usage // has already been replaced with literals if fmtbody { return "" } } return fmt.Sprintf("<node %v>", n.Op) }
func nodedump(n *Node, flag FmtFlag) string { if n == nil { return "" } recur := flag&FmtShort == 0 var buf bytes.Buffer if recur { indent(&buf) if dumpdepth > 10 { buf.WriteString("...") return buf.String() } if n.Ninit.Len() != 0 { fmt.Fprintf(&buf, "%v-init%v", n.Op, n.Ninit) indent(&buf) } } switch n.Op { default: fmt.Fprintf(&buf, "%v%v", n.Op, jconv(n, 0)) case OREGISTER, OINDREG: fmt.Fprintf(&buf, "%v-%v%v", n.Op, obj.Rconv(int(n.Reg)), jconv(n, 0)) case OLITERAL: fmt.Fprintf(&buf, "%v-%v%v", n.Op, vconv(n.Val(), 0), jconv(n, 0)) case ONAME, ONONAME: if n.Sym != nil { fmt.Fprintf(&buf, "%v-%v%v", n.Op, n.Sym, jconv(n, 0)) } else { fmt.Fprintf(&buf, "%v%v", n.Op, jconv(n, 0)) } if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { indent(&buf) fmt.Fprintf(&buf, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } case OASOP: fmt.Fprintf(&buf, "%v-%v%v", n.Op, Op(n.Etype), jconv(n, 0)) case OTYPE: fmt.Fprintf(&buf, "%v %v%v type=%v", n.Op, n.Sym, jconv(n, 0), n.Type) if recur && n.Type == nil && n.Name.Param.Ntype != nil { indent(&buf) fmt.Fprintf(&buf, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } } if n.Sym != nil && n.Op != ONAME { fmt.Fprintf(&buf, " %v", n.Sym) } if n.Type != nil { fmt.Fprintf(&buf, " %v", n.Type) } if recur { if n.Left != nil { buf.WriteString(Nconv(n.Left, 0)) } if n.Right != nil { buf.WriteString(Nconv(n.Right, 0)) } if n.List.Len() != 0 { indent(&buf) fmt.Fprintf(&buf, "%v-list%v", n.Op, n.List) } if n.Rlist.Len() != 0 { indent(&buf) fmt.Fprintf(&buf, "%v-rlist%v", n.Op, n.Rlist) } if n.Nbody.Len() != 0 { indent(&buf) fmt.Fprintf(&buf, "%v-body%v", n.Op, n.Nbody) } } return buf.String() }