// is reg guaranteed to be truncated by a previous L instruction? func prevl(r0 *gc.Flow, reg int) bool { for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) { p := r.Prog if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg { flags := progflags(p) if flags&gc.RightWrite != 0 { if flags&gc.SizeL != 0 { return true } return false } } } return false }
/* * findinc finds ADD instructions with a constant * argument which falls within the immed_12 range. */ func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow { var r1 *gc.Flow var p *obj.Prog for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; r, r1 = r1, gc.Uniqs(r1) { if gc.Uniqp(r1) != r { return nil } switch copyu(r1.Prog, v, nil) { case 0: /* not touched */ continue case 4: /* set and used */ p = r1.Prog if p.As == arm.AADD { if isdconst(&p.From) { if p.From.Offset > -4096 && p.From.Offset < 4096 { return r1 } } } fallthrough default: return nil } } return nil }
/* * findpre returns the last instruction mentioning v * before r. It must be a set, and there must be * a unique path from that instruction to r. */ func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow { var r1 *gc.Flow for r1 = gc.Uniqp(r); r1 != nil; r, r1 = r1, gc.Uniqp(r1) { if gc.Uniqs(r1) != r { return nil } switch copyu(r1.Prog, v, nil) { case 1, /* used */ 2: /* read-alter-rewrite */ return nil case 3, /* set */ 4: /* set and used */ return r1 } } return nil }
func conprop(r0 *gc.Flow) { var p *obj.Prog var t int p0 := (*obj.Prog)(r0.Prog) v0 := (*obj.Addr)(&p0.To) r := (*gc.Flow)(r0) loop: r = gc.Uniqs(r) if r == nil || r == r0 { return } if gc.Uniqp(r) == nil { return } p = r.Prog t = copyu(p, v0, nil) switch t { case 0, // miss 1: // use goto loop case 2, // rar 4: // use and set break case 3: // set if p.As == p0.As { if p.From.Type == p0.From.Type { if p.From.Reg == p0.From.Reg { if p.From.Node == p0.From.Node { if p.From.Offset == p0.From.Offset { if p.From.Scale == p0.From.Scale { if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) { if p.From.Index == p0.From.Index { excise(r) goto loop } } } } } } } } } }
/* * The idea is to remove redundant constants. * $c1->v1 * ($c1->v2 s/$c1/v1)* * set v1 return * The v1->v2 should be eliminated by copy propagation. */ func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) { if gc.Debug['P'] != 0 { fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1)) } var p *obj.Prog for ; r != nil; r = r.S1 { p = r.Prog if gc.Debug['P'] != 0 { fmt.Printf("%v", p) } if gc.Uniqp(r) == nil { if gc.Debug['P'] != 0 { fmt.Printf("; merge; return\n") } return } if p.As == arm.AMOVW && copyas(&p.From, c1) { if gc.Debug['P'] != 0 { fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1)) } p.From = *v1 } else if copyu(p, v1, nil) > 1 { if gc.Debug['P'] != 0 { fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1)) } return } if gc.Debug['P'] != 0 { fmt.Printf("\n") } if r.S2 != nil { constprop(c1, v1, r.S2) } } }
// copy1 replaces uses of v2 with v1 starting at r and returns 1 if // all uses were rewritten. func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool { if uint32(r.Active) == gactive { if gc.Debug['P'] != 0 { fmt.Printf("act set; return 1\n") } return true } r.Active = int32(gactive) if gc.Debug['P'] != 0 { fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f) } var t int var p *obj.Prog for ; r != nil; r = r.S1 { p = r.Prog if gc.Debug['P'] != 0 { fmt.Printf("%v", p) } if f == 0 && gc.Uniqp(r) == nil { // Multiple predecessors; conservatively // assume v1 was set on other path f = 1 if gc.Debug['P'] != 0 { fmt.Printf("; merge; f=%d", f) } } t = copyu(p, v2, nil) switch t { case 2: /* rar, can't split */ if gc.Debug['P'] != 0 { fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2)) } return false case 3: /* set */ if gc.Debug['P'] != 0 { fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2)) } return true case 1, /* used, substitute */ 4: /* use and set */ if f != 0 { if gc.Debug['P'] == 0 { return false } if t == 4 { fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f) } else { fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f) } return false } if copyu(p, v2, v1) != 0 { if gc.Debug['P'] != 0 { fmt.Printf("; sub fail; return 0\n") } return false } if gc.Debug['P'] != 0 { fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p) } if t == 4 { if gc.Debug['P'] != 0 { fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2)) } return true } } if f == 0 { t = copyu(p, v1, nil) if f == 0 && (t == 2 || t == 3 || t == 4) { f = 1 if gc.Debug['P'] != 0 { fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f) } } } if gc.Debug['P'] != 0 { fmt.Printf("\n") } if r.S2 != nil { if !copy1(v1, v2, r.S2, f) { return false } } } return true }
/* * the idea is to substitute * one register for another * from one MOV to another * MOV a, R1 * ADD b, R1 / no use of R2 * MOV R1, R2 * would be converted to * MOV a, R2 * ADD b, R2 * MOV R2, R1 * hopefully, then the former or latter MOV * will be eliminated by copy propagation. * * r0 (the argument, not the register) is the MOV at the end of the * above sequences. This returns 1 if it modified any instructions. */ func subprop(r0 *gc.Flow) bool { p := (*obj.Prog)(r0.Prog) v1 := (*obj.Addr)(&p.From) if !regtyp(v1) { return false } v2 := (*obj.Addr)(&p.To) if !regtyp(v2) { return false } for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { if gc.Uniqs(r) == nil { break } p = r.Prog if p.As == obj.AVARDEF || p.As == obj.AVARKILL { continue } if p.Info.Flags&gc.Call != 0 { return false } if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite { if p.To.Type == v1.Type { if p.To.Reg == v1.Reg { copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) if p.From.Type == v2.Type { fmt.Printf(" excise") } fmt.Printf("\n") } for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { p = r.Prog copysub(&p.From, v1, v2, 1) copysub1(p, v1, v2, 1) copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("%v\n", r.Prog) } } t := int(int(v1.Reg)) v1.Reg = v2.Reg v2.Reg = int16(t) if gc.Debug['P'] != 0 { fmt.Printf("%v last\n", r.Prog) } return true } } } if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) { break } if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 { break } } return false }
/* * ASLL x,y,w * .. (not use w, not set x y w) * AXXX w,a,b (a != w) * .. (not use w) * (set w) * ----------- changed to * .. * AXXX (x<<y),a,b * .. */ func shiftprop(r *gc.Flow) bool { p := (*obj.Prog)(r.Prog) if p.To.Type != obj.TYPE_REG { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: result not reg; FAILURE\n") } return false } n := int(int(p.To.Reg)) a := obj.Addr(obj.Addr{}) if p.Reg != 0 && p.Reg != p.To.Reg { a.Type = obj.TYPE_REG a.Reg = p.Reg } if gc.Debug['P'] != 0 { fmt.Printf("shiftprop\n%v", p) } r1 := (*gc.Flow)(r) var p1 *obj.Prog for { /* find first use of shift result; abort if shift operands or result are changed */ r1 = gc.Uniqs(r1) if r1 == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tbranch; FAILURE\n") } return false } if gc.Uniqp(r1) == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tmerge; FAILURE\n") } return false } p1 = r1.Prog if gc.Debug['P'] != 0 { fmt.Printf("\n%v", p1) } switch copyu(p1, &p.To, nil) { case 0: /* not used or set */ if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) { if gc.Debug['P'] != 0 { fmt.Printf("\targs modified; FAILURE\n") } return false } continue case 3: /* set, not used */ { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: noref; FAILURE\n") } return false } } break } /* check whether substitution can be done */ switch p1.As { default: if gc.Debug['P'] != 0 { fmt.Printf("\tnon-dpi; FAILURE\n") } return false case arm.AAND, arm.AEOR, arm.AADD, arm.AADC, arm.AORR, arm.ASUB, arm.ASBC, arm.ARSB, arm.ARSC: if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) { if p1.From.Type != obj.TYPE_REG { if gc.Debug['P'] != 0 { fmt.Printf("\tcan't swap; FAILURE\n") } return false } p1.Reg = p1.From.Reg p1.From.Reg = int16(n) switch p1.As { case arm.ASUB: p1.As = arm.ARSB case arm.ARSB: p1.As = arm.ASUB case arm.ASBC: p1.As = arm.ARSC case arm.ARSC: p1.As = arm.ASBC } if gc.Debug['P'] != 0 { fmt.Printf("\t=>%v", p1) } } fallthrough case arm.ABIC, arm.ATST, arm.ACMP, arm.ACMN: if int(p1.Reg) == n { if gc.Debug['P'] != 0 { fmt.Printf("\tcan't swap; FAILURE\n") } return false } if p1.Reg == 0 && int(p1.To.Reg) == n { if gc.Debug['P'] != 0 { fmt.Printf("\tshift result used twice; FAILURE\n") } return false } // case AMVN: if p1.From.Type == obj.TYPE_SHIFT { if gc.Debug['P'] != 0 { fmt.Printf("\tshift result used in shift; FAILURE\n") } return false } if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n { if gc.Debug['P'] != 0 { fmt.Printf("\tBOTCH: where is it used?; FAILURE\n") } return false } } /* check whether shift result is used subsequently */ p2 := (*obj.Prog)(p1) if int(p1.To.Reg) != n { var p1 *obj.Prog for { r1 = gc.Uniqs(r1) if r1 == nil { if gc.Debug['P'] != 0 { fmt.Printf("\tinconclusive; FAILURE\n") } return false } p1 = r1.Prog if gc.Debug['P'] != 0 { fmt.Printf("\n%v", p1) } switch copyu(p1, &p.To, nil) { case 0: /* not used or set */ continue case 3: /* set, not used */ break default: /* used */ if gc.Debug['P'] != 0 { fmt.Printf("\treused; FAILURE\n") } return false } break } } /* make the substitution */ p2.From.Reg = 0 o := int(int(p.Reg)) if o == 0 { o = int(p.To.Reg) } o &= 15 switch p.From.Type { case obj.TYPE_CONST: o |= int((p.From.Offset & 0x1f) << 7) case obj.TYPE_REG: o |= 1<<4 | (int(p.From.Reg)&15)<<8 } switch p.As { case arm.ASLL: o |= 0 << 5 case arm.ASRL: o |= 1 << 5 case arm.ASRA: o |= 2 << 5 } p2.From = obj.Addr{} p2.From.Type = obj.TYPE_SHIFT p2.From.Offset = int64(o) if gc.Debug['P'] != 0 { fmt.Printf("\t=>%v\tSUCCEED\n", p2) } return true }
/* * the idea is to substitute * one register for another * from one MOV to another * MOV a, R0 * ADD b, R0 / no use of R1 * MOV R0, R1 * would be converted to * MOV a, R1 * ADD b, R1 * MOV R1, R0 * hopefully, then the former or latter MOV * will be eliminated by copy propagation. */ func subprop(r0 *gc.Flow) bool { p := (*obj.Prog)(r0.Prog) v1 := (*obj.Addr)(&p.From) if !regtyp(v1) { return false } v2 := (*obj.Addr)(&p.To) if !regtyp(v2) { return false } for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { if gc.Uniqs(r) == nil { break } p = r.Prog if p.As == obj.AVARDEF || p.As == obj.AVARKILL { continue } if p.Info.Flags&gc.Call != 0 { return false } // TODO(rsc): Whatever invalidated the info should have done this call. proginfo(p) if (p.Info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG { p.Info.Flags |= gc.RegRead p.Info.Flags &^= (gc.CanRegRead | gc.RightRead) p.Reg = p.To.Reg } switch p.As { case arm.AMULLU, arm.AMULA, arm.AMVN: return false } if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite { if p.To.Type == v1.Type { if p.To.Reg == v1.Reg { if p.Scond == arm.C_SCOND_NONE { copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) if p.From.Type == v2.Type { fmt.Printf(" excise") } fmt.Printf("\n") } for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { p = r.Prog copysub(&p.From, v1, v2, 1) copysub1(p, v1, v2, 1) copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("%v\n", r.Prog) } } t := int(int(v1.Reg)) v1.Reg = v2.Reg v2.Reg = int16(t) if gc.Debug['P'] != 0 { fmt.Printf("%v last\n", r.Prog) } return true } } } } if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) { break } if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 { break } } return false }
/* * the idea is to substitute * one register for another * from one MOV to another * MOV a, R0 * ADD b, R0 / no use of R1 * MOV R0, R1 * would be converted to * MOV a, R1 * ADD b, R1 * MOV R1, R0 * hopefully, then the former or latter MOV * will be eliminated by copy propagation. */ func subprop(r0 *gc.Flow) bool { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("subprop %v\n", r0.Prog) } p := (*obj.Prog)(r0.Prog) v1 := (*obj.Addr)(&p.From) if !regtyp(v1) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1)) } return false } v2 := (*obj.Addr)(&p.To) if !regtyp(v2) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2)) } return false } for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\t? %v\n", r.Prog) } if gc.Uniqs(r) == nil { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tno unique successor\n") } break } p = r.Prog if p.As == obj.AVARDEF || p.As == obj.AVARKILL { continue } if p.Info.Flags&gc.Call != 0 { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tfound %v; return 0\n", p) } return false } if p.Info.Reguse|p.Info.Regset != 0 { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tfound %v; return 0\n", p) } return false } if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg { copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) if p.From.Type == v2.Type && p.From.Reg == v2.Reg { fmt.Printf(" excise") } fmt.Printf("\n") } for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { p = r.Prog copysub(&p.From, v1, v2, 1) copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("%v\n", r.Prog) } } t := int(int(v1.Reg)) v1.Reg = v2.Reg v2.Reg = int16(t) if gc.Debug['P'] != 0 { fmt.Printf("%v last\n", r.Prog) } return true } if copyau(&p.From, v2) || copyau(&p.To, v2) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2)) } break } if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tcopysub failed\n") } break } } if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tran off end; return 0\n") } return false }
func pushback(r0 *gc.Flow) { var r *gc.Flow var p *obj.Prog var b *gc.Flow p0 := (*obj.Prog)(r0.Prog) for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) { p = r.Prog if p.As != obj.ANOP { if !regconsttyp(&p.From) || !regtyp(&p.To) { break } if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 { break } } if p.As == obj.ACALL { break } b = r } if b == nil { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("no pushback: %v\n", r0.Prog) if r != nil { fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil) } } return } if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("pushback\n") for r := (*gc.Flow)(b); ; r = r.Link { fmt.Printf("\t%v\n", r.Prog) if r == r0 { break } } } t := obj.Prog(*r0.Prog) for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) { p0 = r.Link.Prog p = r.Prog p0.As = p.As p0.Lineno = p.Lineno p0.From = p.From p0.To = p.To if r == b { break } } p0 = r.Prog p0.As = t.As p0.Lineno = t.Lineno p0.From = t.From p0.To = t.To if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tafter\n") for r := (*gc.Flow)(b); ; r = r.Link { fmt.Printf("\t%v\n", r.Prog) if r == r0 { break } } } }
/* * the idea is to substitute * one register for another * from one MOV to another * MOV a, R0 * ADD b, R0 / no use of R1 * MOV R0, R1 * would be converted to * MOV a, R1 * ADD b, R1 * MOV R1, R0 * hopefully, then the former or latter MOV * will be eliminated by copy propagation. */ func subprop(r0 *gc.Flow) bool { p := r0.Prog v1 := &p.From if !regtyp(v1) { return false } v2 := &p.To if !regtyp(v2) { return false } for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\t? %v\n", r.Prog) } if gc.Uniqs(r) == nil { break } p = r.Prog if p.As == obj.AVARDEF || p.As == obj.AVARKILL { continue } if p.Info.Flags&gc.Call != 0 { return false } if p.Info.Reguse|p.Info.Regset != 0 { return false } if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg { copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog) if p.From.Type == v2.Type && p.From.Reg == v2.Reg { fmt.Printf(" excise") } fmt.Printf("\n") } for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { p = r.Prog copysub(&p.From, v1, v2, 1) copysub(&p.To, v1, v2, 1) if gc.Debug['P'] != 0 { fmt.Printf("%v\n", r.Prog) } } t := int(v1.Reg) v1.Reg = v2.Reg v2.Reg = int16(t) if gc.Debug['P'] != 0 { fmt.Printf("%v last\n", r.Prog) } return true } if copyau(&p.From, v2) || copyau(&p.To, v2) { break } if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 { break } } return false }
func peep(firstp *obj.Prog) { g := (*gc.Graph)(gc.Flowstart(firstp, nil)) if g == nil { return } gactive = 0 var p *obj.Prog var r *gc.Flow var t int loop1: if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { gc.Dumpit("loop1", g.Start, 0) } t = 0 for r = g.Start; r != nil; r = r.Link { p = r.Prog // TODO(austin) Handle smaller moves. arm and amd64 // distinguish between moves that moves that *must* // sign/zero extend and moves that don't care so they // can eliminate moves that don't care without // breaking moves that do care. This might let us // simplify or remove the next peep loop, too. if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD { if regtyp(&p.To) { // Try to eliminate reg->reg moves if regtyp(&p.From) { if p.From.Type == p.To.Type { if copyprop(r) { excise(r) t++ } else if subprop(r) && copyprop(r) { excise(r) t++ } } } // Convert uses to $0 to uses of R0 and // propagate R0 if regzer(&p.From) != 0 { if p.To.Type == obj.TYPE_REG { p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REGZERO if copyprop(r) { excise(r) t++ } else if subprop(r) && copyprop(r) { excise(r) t++ } } } } } } if t != 0 { goto loop1 } /* * look for MOVB x,R; MOVB R,R (for small MOVs not handled above) */ var p1 *obj.Prog var r1 *gc.Flow for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { default: continue case ppc64.AMOVH, ppc64.AMOVHZ, ppc64.AMOVB, ppc64.AMOVBZ, ppc64.AMOVW, ppc64.AMOVWZ: if p.To.Type != obj.TYPE_REG { continue } } r1 = r.Link if r1 == nil { continue } p1 = r1.Prog if p1.As != p.As { continue } if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { continue } if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg { continue } excise(r1) } if gc.Debug['D'] > 1 { goto ret /* allow following code improvement to be suppressed */ } /* * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R * when OP can set condition codes correctly */ for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { case ppc64.ACMP, ppc64.ACMPW: /* always safe? */ if regzer(&p.To) == 0 { continue } r1 = r.S1 if r1 == nil { continue } switch r1.Prog.As { default: continue /* the conditions can be complex and these are currently little used */ case ppc64.ABCL, ppc64.ABC: continue case ppc64.ABEQ, ppc64.ABGE, ppc64.ABGT, ppc64.ABLE, ppc64.ABLT, ppc64.ABNE, ppc64.ABVC, ppc64.ABVS: break } r1 = r for { r1 = gc.Uniqp(r1) if r1 == nil || r1.Prog.As != obj.ANOP { break } } if r1 == nil { continue } p1 = r1.Prog if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg { continue } switch p1.As { /* irregular instructions */ case ppc64.ASUB, ppc64.AADD, ppc64.AXOR, ppc64.AOR: if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR { continue } } switch p1.As { default: continue case ppc64.AMOVW, ppc64.AMOVD: if p1.From.Type != obj.TYPE_REG { continue } continue case ppc64.AANDCC, ppc64.AANDNCC, ppc64.AORCC, ppc64.AORNCC, ppc64.AXORCC, ppc64.ASUBCC, ppc64.ASUBECC, ppc64.ASUBMECC, ppc64.ASUBZECC, ppc64.AADDCC, ppc64.AADDCCC, ppc64.AADDECC, ppc64.AADDMECC, ppc64.AADDZECC, ppc64.ARLWMICC, ppc64.ARLWNMCC, /* don't deal with floating point instructions for now */ /* case AFABS: case AFADD: case AFADDS: case AFCTIW: case AFCTIWZ: case AFDIV: case AFDIVS: case AFMADD: case AFMADDS: case AFMOVD: case AFMSUB: case AFMSUBS: case AFMUL: case AFMULS: case AFNABS: case AFNEG: case AFNMADD: case AFNMADDS: case AFNMSUB: case AFNMSUBS: case AFRSP: case AFSUB: case AFSUBS: case ACNTLZW: case AMTFSB0: case AMTFSB1: */ ppc64.AADD, ppc64.AADDV, ppc64.AADDC, ppc64.AADDCV, ppc64.AADDME, ppc64.AADDMEV, ppc64.AADDE, ppc64.AADDEV, ppc64.AADDZE, ppc64.AADDZEV, ppc64.AAND, ppc64.AANDN, ppc64.ADIVW, ppc64.ADIVWV, ppc64.ADIVWU, ppc64.ADIVWUV, ppc64.ADIVD, ppc64.ADIVDV, ppc64.ADIVDU, ppc64.ADIVDUV, ppc64.AEQV, ppc64.AEXTSB, ppc64.AEXTSH, ppc64.AEXTSW, ppc64.AMULHW, ppc64.AMULHWU, ppc64.AMULLW, ppc64.AMULLWV, ppc64.AMULHD, ppc64.AMULHDU, ppc64.AMULLD, ppc64.AMULLDV, ppc64.ANAND, ppc64.ANEG, ppc64.ANEGV, ppc64.ANOR, ppc64.AOR, ppc64.AORN, ppc64.AREM, ppc64.AREMV, ppc64.AREMU, ppc64.AREMUV, ppc64.AREMD, ppc64.AREMDV, ppc64.AREMDU, ppc64.AREMDUV, ppc64.ARLWMI, ppc64.ARLWNM, ppc64.ASLW, ppc64.ASRAW, ppc64.ASRW, ppc64.ASLD, ppc64.ASRAD, ppc64.ASRD, ppc64.ASUB, ppc64.ASUBV, ppc64.ASUBC, ppc64.ASUBCV, ppc64.ASUBME, ppc64.ASUBMEV, ppc64.ASUBE, ppc64.ASUBEV, ppc64.ASUBZE, ppc64.ASUBZEV, ppc64.AXOR: t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC) } if gc.Debug['D'] != 0 { fmt.Printf("cmp %v; %v -> ", p1, p) } p1.As = int16(t) if gc.Debug['D'] != 0 { fmt.Printf("%v\n", p1) } excise(r) continue } } ret: gc.Flowend(g) }