Esempio n. 1
0
/*
 * findinc finds ADD instructions with a constant
 * argument which falls within the immed_12 range.
 */
func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
	var r1 *gc.Flow
	var p *obj.Prog

	for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; r, r1 = r1, gc.Uniqs(r1) {
		if gc.Uniqp(r1) != r {
			return nil
		}
		switch copyu(r1.Prog, v, nil) {
		case 0: /* not touched */
			continue

		case 4: /* set and used */
			p = r1.Prog

			if p.As == arm.AADD {
				if isdconst(&p.From) {
					if p.From.Offset > -4096 && p.From.Offset < 4096 {
						return r1
					}
				}
			}
			fallthrough

		default:
			return nil
		}
	}

	return nil
}
Esempio n. 2
0
// removeLoadHitStores trys to remove loads that take place
// immediately after a store to the same location. Returns
// true if load-hit-stores were removed.
//
// For example:
// 	MOVD	R1, 0(R15)
// 	MOVD	0(R15), R2
// Would become:
// 	MOVD	R1, 0(R15)
// 	MOVD	R1, R2
func removeLoadHitStores(r *gc.Flow) int {
	n := 0
	for ; r != nil; r = r.Link {
		p := r.Prog
		if !isStore(p) {
			continue
		}
		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
			pp := rr.Prog
			if gc.Uniqp(rr) == nil {
				break
			}
			if pp.As == obj.ANOP {
				continue
			}
			if isLoad(pp) && sameStackMem(&p.To, &pp.From) {
				if size(p.As) >= size(pp.As) && isGPR(&p.From) == isGPR(&pp.To) {
					pp.From = p.From
				}
			}
			if !isMove(pp) || isStore(pp) {
				break
			}
			if copyau(&p.From, &pp.To) {
				break
			}
		}
	}
	return n
}
Esempio n. 3
0
// the idea is to substitute
// one register for another
// from one MOV to another
//	MOV	a, R1
//	ADD	b, R1	/ no use of R2
//	MOV	R1, R2
// would be converted to
//	MOV	a, R2
//	ADD	b, R2
//	MOV	R2, R1
// hopefully, then the former or latter MOV
// will be eliminated by copy propagation.
//
// r0 (the argument, not the register) is the MOV at the end of the
// above sequences. subprop returns true if it modified any instructions.
func subprop(r0 *gc.Flow) bool {
	p := r0.Prog
	v1 := &p.From
	if !isReg(v1) {
		return false
	}
	v2 := &p.To
	if !isReg(v2) {
		return false
	}
	cast := false
	switch p.As {
	case s390x.AMOVW, s390x.AMOVWZ,
		s390x.AMOVH, s390x.AMOVHZ,
		s390x.AMOVB, s390x.AMOVBZ:
		cast = true
	}
	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
		if gc.Uniqs(r) == nil {
			break
		}
		p = r.Prog
		switch copyu(p, v1, nil) {
		case _Write, _ReadWriteDiff:
			if p.As == obj.ACALL {
				return false
			}
			if (!cast || p.As == r0.Prog.As) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
				copysub(&p.To, v1, v2)
				for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
					p = r.Prog
					copysub(&p.From, v1, v2)
					copysub1(p, v1, v2)
					copysub(&p.To, v1, v2)
				}
				v1.Reg, v2.Reg = v2.Reg, v1.Reg
				return true
			}
			if cast {
				return false
			}
		case _ReadWriteSame:
			if cast {
				return false
			}
		}
		if copyu(p, v2, nil) != _None {
			return false
		}
	}
	return false
}
Esempio n. 4
0
// The idea is to remove redundant copies.
//     v1->v2  F=0
//     (use v2 s/v2/v1/)*
//     set v1  F=1
//     use v2  return fail (v1->v2 move must remain)
//     -----------------
//     v1->v2  F=0
//     (use v2 s/v2/v1/)*
//     set v1  F=1
//     set v2  return success (caller can remove v1->v2 move)
func copyprop(r *gc.Flow) bool {
	p := r.Prog

	canSub := false
	switch p.As {
	case s390x.AFMOVS, s390x.AFMOVD, s390x.AMOVD:
		canSub = true
	default:
		for rr := gc.Uniqp(r); rr != nil; rr = gc.Uniqp(rr) {
			if gc.Uniqs(rr) == nil {
				break
			}
			switch copyu(rr.Prog, &p.From, nil) {
			case _Read, _None:
				continue
			}
			// write
			if rr.Prog.As == p.As {
				canSub = true
			}
			break
		}
	}
	if !canSub {
		return false
	}
	if copyas(&p.From, &p.To) {
		return true
	}

	gactive++
	return copy1(&p.From, &p.To, r.S1, 0)
}
Esempio n. 5
0
// deadCodeElimination removes writes to registers which are written
// to again before they are next read.
func deadCodeElimination(r *gc.Flow) int {
	n := 0
	for ; r != nil; r = r.Link {
		p := r.Prog
		// Currently there are no instructions which write to multiple
		// registers in copyu. This check will need to change if there
		// ever are.
		if !(isGPR(&p.To) || isFPR(&p.To)) || copyu(p, &p.To, nil) != _Write {
			continue
		}
		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
			t := copyu(rr.Prog, &p.To, nil)
			if t == _None {
				continue
			}
			if t == _Write {
				excise(r)
				n++
			}
			break
		}
	}
	return n
}
Esempio n. 6
0
func conprop(r0 *gc.Flow) {
	var p *obj.Prog

	p0 := r0.Prog
	v0 := &p0.To
	r := r0

loop:
	r = gc.Uniqs(r)
	if r == nil || r == r0 {
		return
	}
	if gc.Uniqp(r) == nil {
		return
	}

	p = r.Prog
	switch copyu(p, v0, nil) {
	case 0, // miss
		1: // use
		goto loop

	case 2, // rar
		4: // use and set
		break

	case 3: // set
		if p.As == p0.As {
			if p.From.Type == p0.From.Type {
				if p.From.Reg == p0.From.Reg {
					if p.From.Node == p0.From.Node {
						if p.From.Offset == p0.From.Offset {
							if p.From.Scale == p0.From.Scale {
								if p.From.Type == obj.TYPE_FCONST && p.From.Val.(float64) == p0.From.Val.(float64) {
									if p.From.Index == p0.From.Index {
										excise(r)
										goto loop
									}
								}
							}
						}
					}
				}
			}
		}
	}
}
Esempio n. 7
0
func rnops(r *gc.Flow) *gc.Flow {
	if r != nil {
		var p *obj.Prog
		var r1 *gc.Flow
		for {
			p = r.Prog
			if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
				break
			}
			r1 = gc.Uniqs(r)
			if r1 == nil {
				break
			}
			r = r1
		}
	}

	return r
}
Esempio n. 8
0
/*
 * findpre returns the last instruction mentioning v
 * before r. It must be a set, and there must be
 * a unique path from that instruction to r.
 */
func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow {
	var r1 *gc.Flow

	for r1 = gc.Uniqp(r); r1 != nil; r, r1 = r1, gc.Uniqp(r1) {
		if gc.Uniqs(r1) != r {
			return nil
		}
		switch copyu(r1.Prog, v, nil) {
		case 1, /* used */
			2: /* read-alter-rewrite */
			return nil

		case 3, /* set */
			4: /* set and used */
			return r1
		}
	}

	return nil
}
Esempio n. 9
0
func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
	if r == r2 {
		return true
	}
	n := int(0)
	var a [3]obj.Addr
	if p.Reg != 0 && p.Reg != p.To.Reg {
		a[n].Type = obj.TYPE_REG
		a[n].Reg = p.Reg
		n++
	}

	switch p.From.Type {
	case obj.TYPE_SHIFT:
		a[n].Type = obj.TYPE_REG
		a[n].Reg = int16(arm.REG_R0 + (p.From.Offset & 0xf))
		n++
		fallthrough

	case obj.TYPE_REG:
		a[n].Type = obj.TYPE_REG
		a[n].Reg = p.From.Reg
		n++
	}

	if n == 0 {
		return true
	}
	var i int
	for ; r != nil && r != r2; r = gc.Uniqs(r) {
		p = r.Prog
		for i = 0; i < n; i++ {
			if copyu(p, &a[i], nil) > 1 {
				return false
			}
		}
	}

	return true
}
Esempio n. 10
0
/*
 * ASLL x,y,w
 * .. (not use w, not set x y w)
 * AXXX w,a,b (a != w)
 * .. (not use w)
 * (set w)
 * ----------- changed to
 * ..
 * AXXX (x<<y),a,b
 * ..
 */
func shiftprop(r *gc.Flow) bool {
	p := r.Prog
	if p.To.Type != obj.TYPE_REG {
		if gc.Debug['P'] != 0 {
			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
		}
		return false
	}

	n := p.To.Reg
	var a obj.Addr
	if p.Reg != 0 && p.Reg != p.To.Reg {
		a.Type = obj.TYPE_REG
		a.Reg = p.Reg
	}

	if gc.Debug['P'] != 0 {
		fmt.Printf("shiftprop\n%v", p)
	}
	r1 := r
	var p1 *obj.Prog
	for {
		/* find first use of shift result; abort if shift operands or result are changed */
		r1 = gc.Uniqs(r1)

		if r1 == nil {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tbranch; FAILURE\n")
			}
			return false
		}

		if gc.Uniqp(r1) == nil {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tmerge; FAILURE\n")
			}
			return false
		}

		p1 = r1.Prog
		if gc.Debug['P'] != 0 {
			fmt.Printf("\n%v", p1)
		}
		switch copyu(p1, &p.To, nil) {
		case 0: /* not used or set */
			if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\targs modified; FAILURE\n")
				}
				return false
			}

			continue
		case 3: /* set, not used */
			{
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tBOTCH: noref; FAILURE\n")
				}
				return false
			}
		}

		break
	}

	/* check whether substitution can be done */
	switch p1.As {
	default:
		if gc.Debug['P'] != 0 {
			fmt.Printf("\tnon-dpi; FAILURE\n")
		}
		return false

	case arm.AAND,
		arm.AEOR,
		arm.AADD,
		arm.AADC,
		arm.AORR,
		arm.ASUB,
		arm.ASBC,
		arm.ARSB,
		arm.ARSC:
		if p1.Reg == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && p1.To.Reg == n) {
			if p1.From.Type != obj.TYPE_REG {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tcan't swap; FAILURE\n")
				}
				return false
			}

			p1.Reg = p1.From.Reg
			p1.From.Reg = n
			switch p1.As {
			case arm.ASUB:
				p1.As = arm.ARSB

			case arm.ARSB:
				p1.As = arm.ASUB

			case arm.ASBC:
				p1.As = arm.ARSC

			case arm.ARSC:
				p1.As = arm.ASBC
			}

			if gc.Debug['P'] != 0 {
				fmt.Printf("\t=>%v", p1)
			}
		}
		fallthrough

	case arm.ABIC,
		arm.ATST,
		arm.ACMP,
		arm.ACMN:
		if p1.Reg == n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tcan't swap; FAILURE\n")
			}
			return false
		}

		if p1.Reg == 0 && p1.To.Reg == n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tshift result used twice; FAILURE\n")
			}
			return false
		}

		//	case AMVN:
		if p1.From.Type == obj.TYPE_SHIFT {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tshift result used in shift; FAILURE\n")
			}
			return false
		}

		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
			}
			return false
		}
	}

	/* check whether shift result is used subsequently */
	p2 := p1

	if p1.To.Reg != n {
		var p1 *obj.Prog
		for {
			r1 = gc.Uniqs(r1)
			if r1 == nil {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tinconclusive; FAILURE\n")
				}
				return false
			}

			p1 = r1.Prog
			if gc.Debug['P'] != 0 {
				fmt.Printf("\n%v", p1)
			}
			switch copyu(p1, &p.To, nil) {
			case 0: /* not used or set */
				continue

			case 3: /* set, not used */
				break

			default: /* used */
				if gc.Debug['P'] != 0 {
					fmt.Printf("\treused; FAILURE\n")
				}
				return false
			}

			break
		}
	}

	/* make the substitution */
	p2.From.Reg = 0
	o := p.Reg
	if o == 0 {
		o = p.To.Reg
	}
	o &= 15

	switch p.From.Type {
	case obj.TYPE_CONST:
		o |= int16(p.From.Offset&0x1f) << 7

	case obj.TYPE_REG:
		o |= 1<<4 | (p.From.Reg&15)<<8
	}

	switch p.As {
	case arm.ASLL:
		o |= 0 << 5

	case arm.ASRL:
		o |= 1 << 5

	case arm.ASRA:
		o |= 2 << 5
	}

	p2.From = obj.Addr{}
	p2.From.Type = obj.TYPE_SHIFT
	p2.From.Offset = int64(o)
	if gc.Debug['P'] != 0 {
		fmt.Printf("\t=>%v\tSUCCEED\n", p2)
	}
	return true
}
Esempio n. 11
0
func peep(firstp *obj.Prog) {
	g := gc.Flowstart(firstp, nil)
	if g == nil {
		return
	}
	gactive = 0

	// byte, word arithmetic elimination.
	elimshortmov(g)

	// constant propagation
	// find MOV $con,R followed by
	// another MOV $con,R without
	// setting R in the interim
	var p *obj.Prog
	for r := g.Start; r != nil; r = r.Link {
		p = r.Prog
		switch p.As {
		case x86.ALEAL:
			if regtyp(&p.To) {
				if p.From.Sym != nil {
					if p.From.Index == x86.REG_NONE {
						conprop(r)
					}
				}
			}

		case x86.AMOVB,
			x86.AMOVW,
			x86.AMOVL,
			x86.AMOVSS,
			x86.AMOVSD:
			if regtyp(&p.To) {
				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
					conprop(r)
				}
			}
		}
	}

	var r1 *gc.Flow
	var p1 *obj.Prog
	var r *gc.Flow
	var t int
loop1:
	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		gc.Dumpit("loop1", g.Start, 0)
	}

	t = 0
	for r = g.Start; r != nil; r = r.Link {
		p = r.Prog
		switch p.As {
		case x86.AMOVL,
			x86.AMOVSS,
			x86.AMOVSD:
			if regtyp(&p.To) {
				if regtyp(&p.From) {
					if copyprop(g, r) {
						excise(r)
						t++
					} else if subprop(r) && copyprop(g, r) {
						excise(r)
						t++
					}
				}
			}

		case x86.AMOVBLZX,
			x86.AMOVWLZX,
			x86.AMOVBLSX,
			x86.AMOVWLSX:
			if regtyp(&p.To) {
				r1 = rnops(gc.Uniqs(r))
				if r1 != nil {
					p1 = r1.Prog
					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
						p1.As = x86.AMOVL
						t++
					}
				}
			}

		case x86.AADDL,
			x86.AADDW:
			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
				break
			}
			if p.From.Offset == -1 {
				if p.As == x86.AADDL {
					p.As = x86.ADECL
				} else {
					p.As = x86.ADECW
				}
				p.From = obj.Addr{}
				break
			}

			if p.From.Offset == 1 {
				if p.As == x86.AADDL {
					p.As = x86.AINCL
				} else {
					p.As = x86.AINCW
				}
				p.From = obj.Addr{}
				break
			}

		case x86.ASUBL,
			x86.ASUBW:
			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
				break
			}
			if p.From.Offset == -1 {
				if p.As == x86.ASUBL {
					p.As = x86.AINCL
				} else {
					p.As = x86.AINCW
				}
				p.From = obj.Addr{}
				break
			}

			if p.From.Offset == 1 {
				if p.As == x86.ASUBL {
					p.As = x86.ADECL
				} else {
					p.As = x86.ADECW
				}
				p.From = obj.Addr{}
				break
			}
		}
	}

	if t != 0 {
		goto loop1
	}

	// MOVSD removal.
	// We never use packed registers, so a MOVSD between registers
	// can be replaced by MOVAPD, which moves the pair of float64s
	// instead of just the lower one. We only use the lower one, but
	// the processor can do better if we do moves using both.
	for r := g.Start; r != nil; r = r.Link {
		p = r.Prog
		if p.As == x86.AMOVSD {
			if regtyp(&p.From) {
				if regtyp(&p.To) {
					p.As = x86.AMOVAPD
				}
			}
		}
	}

	gc.Flowend(g)
}
Esempio n. 12
0
func pushback(r0 *gc.Flow) {
	var r *gc.Flow

	var b *gc.Flow
	p0 := r0.Prog
	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
		p := r.Prog
		if p.As != obj.ANOP {
			if !(isReg(&p.From) || isConst(&p.From)) || !isReg(&p.To) {
				break
			}
			if copyu(p, &p0.To, nil) != _None || copyu(p0, &p.To, nil) != _None {
				break
			}
		}

		if p.As == obj.ACALL {
			break
		}
		b = r
	}

	if b == nil {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("no pushback: %v\n", r0.Prog)
			if r != nil {
				fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
			}
		}

		return
	}

	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("pushback\n")
		for r := b; ; r = r.Link {
			fmt.Printf("\t%v\n", r.Prog)
			if r == r0 {
				break
			}
		}
	}

	t := *r0.Prog
	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
		p0 = r.Link.Prog
		p := r.Prog
		p0.As = p.As
		p0.Lineno = p.Lineno
		p0.From = p.From
		p0.To = p.To
		p0.From3 = p.From3
		p0.Reg = p.Reg
		p0.RegTo2 = p.RegTo2
		if r == b {
			break
		}
	}

	p0 = r.Prog
	p0.As = t.As
	p0.Lineno = t.Lineno
	p0.From = t.From
	p0.To = t.To
	p0.From3 = t.From3
	p0.Reg = t.Reg
	p0.RegTo2 = t.RegTo2

	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("\tafter\n")
		for r := b; ; r = r.Link {
			fmt.Printf("\t%v\n", r.Prog)
			if r == r0 {
				break
			}
		}
	}
}
Esempio n. 13
0
/*
 * the idea is to substitute
 * one register for another
 * from one MOV to another
 *	MOV	a, R0
 *	ADD	b, R0	/ no use of R1
 *	MOV	R0, R1
 * would be converted to
 *	MOV	a, R1
 *	ADD	b, R1
 *	MOV	R1, R0
 * hopefully, then the former or latter MOV
 * will be eliminated by copy propagation.
 */
func subprop(r0 *gc.Flow) bool {
	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("subprop %v\n", r0.Prog)
	}
	p := r0.Prog
	v1 := &p.From
	if !regtyp(v1) {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
		}
		return false
	}

	v2 := &p.To
	if !regtyp(v2) {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
		}
		return false
	}

	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("\t? %v\n", r.Prog)
		}
		if gc.Uniqs(r) == nil {
			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
				fmt.Printf("\tno unique successor\n")
			}
			break
		}

		p = r.Prog
		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
			continue
		}
		if p.Info.Flags&gc.Call != 0 {
			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
				fmt.Printf("\tfound %v; return 0\n", p)
			}
			return false
		}

		if p.Info.Reguse|p.Info.Regset != 0 {
			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
				fmt.Printf("\tfound %v; return 0\n", p)
			}
			return false
		}

		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
			copysub(&p.To, v1, v2, true)
			if gc.Debug['P'] != 0 {
				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
					fmt.Printf(" excise")
				}
				fmt.Printf("\n")
			}

			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
				p = r.Prog
				copysub(&p.From, v1, v2, true)
				copysub(&p.To, v1, v2, true)
				if gc.Debug['P'] != 0 {
					fmt.Printf("%v\n", r.Prog)
				}
			}

			v1.Reg, v2.Reg = v2.Reg, v1.Reg
			if gc.Debug['P'] != 0 {
				fmt.Printf("%v last\n", r.Prog)
			}
			return true
		}

		if copyau(&p.From, v2) || copyau(&p.To, v2) {
			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
				fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
			}
			break
		}

		if copysub(&p.From, v1, v2, false) || copysub(&p.To, v1, v2, false) {
			if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
				fmt.Printf("\tcopysub failed\n")
			}
			break
		}
	}

	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("\tran off end; return 0\n")
	}
	return false
}
Esempio n. 14
0
func pushback(r0 *gc.Flow) {
	var r *gc.Flow
	var p *obj.Prog

	var b *gc.Flow
	p0 := r0.Prog
	for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
		p = r.Prog
		if p.As != obj.ANOP {
			if !regconsttyp(&p.From) || !regtyp(&p.To) {
				break
			}
			if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
				break
			}
		}

		if p.As == obj.ACALL {
			break
		}
		b = r
	}

	if b == nil {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("no pushback: %v\n", r0.Prog)
			if r != nil {
				fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
			}
		}

		return
	}

	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("pushback\n")
		for r := b; ; r = r.Link {
			fmt.Printf("\t%v\n", r.Prog)
			if r == r0 {
				break
			}
		}
	}

	t := *r0.Prog
	for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
		p0 = r.Link.Prog
		p = r.Prog
		p0.As = p.As
		p0.Lineno = p.Lineno
		p0.From = p.From
		p0.To = p.To

		if r == b {
			break
		}
	}

	p0 = r.Prog
	p0.As = t.As
	p0.Lineno = t.Lineno
	p0.From = t.From
	p0.To = t.To

	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		fmt.Printf("\tafter\n")
		for r := b; ; r = r.Link {
			fmt.Printf("\t%v\n", r.Prog)
			if r == r0 {
				break
			}
		}
	}
}
Esempio n. 15
0
/*
 * xtramodes enables the ARM post increment and
 * shift offset addressing modes to transform
 *   MOVW   0(R3),R1
 *   ADD    $4,R3,R3
 * into
 *   MOVW.P 4(R3),R1
 * and
 *   ADD    R0,R1
 *   MOVBU  0(R1),R0
 * into
 *   MOVBU  R0<<0(R1),R0
 */
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
	p := r.Prog
	v := *a
	v.Type = obj.TYPE_REG
	r1 := findpre(r, &v)
	if r1 != nil {
		p1 := r1.Prog
		if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
			switch p1.As {
			case arm.AADD:
				if p1.Scond&arm.C_SBIT != 0 {
					// avoid altering ADD.S/ADC sequences.
					break
				}

				if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
					if nochange(gc.Uniqs(r1), r, p1) {
						if a != &p.From || v.Reg != p.To.Reg {
							if finduse(g, r.S1, &v) {
								if p1.Reg == 0 || p1.Reg == v.Reg {
									/* pre-indexing */
									p.Scond |= arm.C_WBIT
								} else {
									return false
								}
							}
						}

						switch p1.From.Type {
						/* register offset */
						case obj.TYPE_REG:
							if gc.Nacl {
								return false
							}
							*a = obj.Addr{}
							a.Type = obj.TYPE_SHIFT
							a.Offset = int64(p1.From.Reg) & 15

							/* scaled register offset */
						case obj.TYPE_SHIFT:
							if gc.Nacl {
								return false
							}
							*a = obj.Addr{}
							a.Type = obj.TYPE_SHIFT
							fallthrough

							/* immediate offset */
						case obj.TYPE_CONST,
							obj.TYPE_ADDR:
							a.Offset = p1.From.Offset
						}

						if p1.Reg != 0 {
							a.Reg = p1.Reg
						}
						excise(r1)
						return true
					}
				}

			case arm.AMOVW:
				if p1.From.Type == obj.TYPE_REG {
					r2 := findinc(r1, r, &p1.From)
					if r2 != nil {
						var r3 *gc.Flow
						for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
						}
						if r3 == r {
							/* post-indexing */
							p1 := r2.Prog

							a.Reg = p1.To.Reg
							a.Offset = p1.From.Offset
							p.Scond |= arm.C_PBIT
							if !finduse(g, r, &r1.Prog.To) {
								excise(r1)
							}
							excise(r2)
							return true
						}
					}
				}
			}
		}
	}

	if a != &p.From || a.Reg != p.To.Reg {
		r1 := findinc(r, nil, &v)
		if r1 != nil {
			/* post-indexing */
			p1 := r1.Prog

			a.Offset = p1.From.Offset
			p.Scond |= arm.C_PBIT
			excise(r1)
			return true
		}
	}

	return false
}
Esempio n. 16
0
// castPropagation tries to eliminate unecessary casts.
//
// For example:
// 	MOVHZ	R1, R2     // uint16
//	MOVB	R2, 0(R15) // int8
// Can be simplified to:
//	MOVB	R1, 0(R15)
func castPropagation(r *gc.Flow) int {
	n := 0
	for ; r != nil; r = r.Link {
		p := r.Prog
		if !isMove(p) || !isGPR(&p.To) {
			continue
		}

		// r is a move with a destination register
		var move *gc.Flow
		for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
			if gc.Uniqp(rr) == nil {
				// branch target: leave alone
				break
			}
			pp := rr.Prog
			if isMove(pp) && copyas(&pp.From, &p.To) {
				if pp.To.Type == obj.TYPE_MEM {
					if p.From.Type == obj.TYPE_MEM ||
						p.From.Type == obj.TYPE_ADDR {
						break
					}
					if p.From.Type == obj.TYPE_CONST &&
						int64(int16(p.From.Offset)) != p.From.Offset {
						break
					}
				}
				move = rr
				break
			}
			if pp.As == obj.ANOP {
				continue
			}
			break
		}
		if move == nil {
			continue
		}

		// we have a move that reads from our destination reg, check if any future
		// instructions also read from the reg
		mp := move.Prog
		if !copyas(&mp.From, &mp.To) {
			safe := false
			for rr := gc.Uniqs(move); rr != nil; rr = gc.Uniqs(rr) {
				if gc.Uniqp(rr) == nil {
					break
				}
				switch copyu(rr.Prog, &p.To, nil) {
				case _None:
					continue
				case _Write:
					safe = true
				}
				break
			}
			if !safe {
				continue
			}
		}

		// at this point we have something like:
		// MOV* const/mem/reg, reg
		// MOV* reg, reg/mem
		// now check if this is a cast that cannot be forward propagated
		execute := false
		if p.As == mp.As || isZero(&p.From) || size(p.As) == size(mp.As) {
			execute = true
		} else if isGPR(&p.From) && size(p.As) >= size(mp.As) {
			execute = true
		}

		if execute {
			mp.From = p.From
			excise(r)
			n++
		}
	}
	return n
}
Esempio n. 17
0
func peep(firstp *obj.Prog) {
	g := gc.Flowstart(firstp, nil)
	if g == nil {
		return
	}
	gactive = 0

	// byte, word arithmetic elimination.
	elimshortmov(g)

	// constant propagation
	// find MOV $con,R followed by
	// another MOV $con,R without
	// setting R in the interim
	var p *obj.Prog
	for r := g.Start; r != nil; r = r.Link {
		p = r.Prog
		switch p.As {
		case x86.ALEAL,
			x86.ALEAQ:
			if regtyp(&p.To) {
				if p.From.Sym != nil {
					if p.From.Index == x86.REG_NONE {
						conprop(r)
					}
				}
			}

		case x86.AMOVB,
			x86.AMOVW,
			x86.AMOVL,
			x86.AMOVQ,
			x86.AMOVSS,
			x86.AMOVSD:
			if regtyp(&p.To) {
				if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
					conprop(r)
				}
			}
		}
	}

	var r *gc.Flow
	var r1 *gc.Flow
	var p1 *obj.Prog
	var t int
loop1:
	if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
		gc.Dumpit("loop1", g.Start, 0)
	}

	t = 0
	for r = g.Start; r != nil; r = r.Link {
		p = r.Prog
		switch p.As {
		case x86.AMOVL,
			x86.AMOVQ,
			x86.AMOVSS,
			x86.AMOVSD:
			if regtyp(&p.To) {
				if regtyp(&p.From) {
					if copyprop(g, r) {
						excise(r)
						t++
					} else if subprop(r) && copyprop(g, r) {
						excise(r)
						t++
					}
				}
			}

		case x86.AMOVBLZX,
			x86.AMOVWLZX,
			x86.AMOVBLSX,
			x86.AMOVWLSX:
			if regtyp(&p.To) {
				r1 = rnops(gc.Uniqs(r))
				if r1 != nil {
					p1 = r1.Prog
					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
						p1.As = x86.AMOVL
						t++
					}
				}
			}

		case x86.AMOVBQSX,
			x86.AMOVBQZX,
			x86.AMOVWQSX,
			x86.AMOVWQZX,
			x86.AMOVLQSX,
			x86.AMOVLQZX,
			x86.AMOVQL:
			if regtyp(&p.To) {
				r1 = rnops(gc.Uniqs(r))
				if r1 != nil {
					p1 = r1.Prog
					if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
						p1.As = x86.AMOVQ
						t++
					}
				}
			}

		case x86.AADDL,
			x86.AADDQ,
			x86.AADDW:
			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
				break
			}
			if p.From.Offset == -1 {
				if p.As == x86.AADDQ {
					p.As = x86.ADECQ
				} else if p.As == x86.AADDL {
					p.As = x86.ADECL
				} else {
					p.As = x86.ADECW
				}
				p.From = obj.Addr{}
				break
			}

			if p.From.Offset == 1 {
				if p.As == x86.AADDQ {
					p.As = x86.AINCQ
				} else if p.As == x86.AADDL {
					p.As = x86.AINCL
				} else {
					p.As = x86.AINCW
				}
				p.From = obj.Addr{}
				break
			}

		case x86.ASUBL,
			x86.ASUBQ,
			x86.ASUBW:
			if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
				break
			}
			if p.From.Offset == -1 {
				if p.As == x86.ASUBQ {
					p.As = x86.AINCQ
				} else if p.As == x86.ASUBL {
					p.As = x86.AINCL
				} else {
					p.As = x86.AINCW
				}
				p.From = obj.Addr{}
				break
			}

			if p.From.Offset == 1 {
				if p.As == x86.ASUBQ {
					p.As = x86.ADECQ
				} else if p.As == x86.ASUBL {
					p.As = x86.ADECL
				} else {
					p.As = x86.ADECW
				}
				p.From = obj.Addr{}
				break
			}
		}
	}

	if t != 0 {
		goto loop1
	}

	// MOVLQZX removal.
	// The MOVLQZX exists to avoid being confused for a
	// MOVL that is just copying 32-bit data around during
	// copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
	// if it is dominated by an earlier ADDL/MOVL/etc into R1 that
	// will have already cleared the high bits.
	//
	// MOVSD removal.
	// We never use packed registers, so a MOVSD between registers
	// can be replaced by MOVAPD, which moves the pair of float64s
	// instead of just the lower one. We only use the lower one, but
	// the processor can do better if we do moves using both.
	for r := g.Start; r != nil; r = r.Link {
		p = r.Prog
		if p.As == x86.AMOVLQZX {
			if regtyp(&p.From) {
				if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
					if prevl(r, p.From.Reg) {
						excise(r)
					}
				}
			}
		}

		if p.As == x86.AMOVSD {
			if regtyp(&p.From) {
				if regtyp(&p.To) {
					p.As = x86.AMOVAPD
				}
			}
		}
	}

	// load pipelining
	// push any load from memory as early as possible
	// to give it time to complete before use.
	for r := g.Start; r != nil; r = r.Link {
		p = r.Prog
		switch p.As {
		case x86.AMOVB,
			x86.AMOVW,
			x86.AMOVL,
			x86.AMOVQ,
			x86.AMOVLQZX:
			if regtyp(&p.To) && !regconsttyp(&p.From) {
				pushback(r)
			}
		}
	}

	gc.Flowend(g)
}
Esempio n. 18
0
/*
 * the idea is to substitute
 * one register for another
 * from one MOV to another
 *	MOV	a, R0
 *	ADD	b, R0	/ no use of R1
 *	MOV	R0, R1
 * would be converted to
 *	MOV	a, R1
 *	ADD	b, R1
 *	MOV	R1, R0
 * hopefully, then the former or latter MOV
 * will be eliminated by copy propagation.
 */
func subprop(r0 *gc.Flow) bool {
	p := r0.Prog
	v1 := &p.From
	if !regtyp(v1) {
		return false
	}
	v2 := &p.To
	if !regtyp(v2) {
		return false
	}
	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("\t? %v\n", r.Prog)
		}
		if gc.Uniqs(r) == nil {
			break
		}
		p = r.Prog
		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
			continue
		}
		if p.Info.Flags&gc.Call != 0 {
			return false
		}

		if p.Info.Reguse|p.Info.Regset != 0 {
			return false
		}

		if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
			copysub(&p.To, v1, v2, true)
			if gc.Debug['P'] != 0 {
				fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
				if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
					fmt.Printf(" excise")
				}
				fmt.Printf("\n")
			}

			for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
				p = r.Prog
				copysub(&p.From, v1, v2, true)
				copysub(&p.To, v1, v2, true)
				if gc.Debug['P'] != 0 {
					fmt.Printf("%v\n", r.Prog)
				}
			}

			t := int(v1.Reg)
			v1.Reg = v2.Reg
			v2.Reg = int16(t)
			if gc.Debug['P'] != 0 {
				fmt.Printf("%v last\n", r.Prog)
			}
			return true
		}

		if copyau(&p.From, v2) || copyau(&p.To, v2) {
			break
		}
		if copysub(&p.From, v1, v2, false) || copysub(&p.To, v1, v2, false) {
			break
		}
	}

	return false
}
Esempio n. 19
0
// fuseOpMoves looks for moves following 2-operand operations and trys to merge them into
// a 3-operand operation.
//
// For example:
//	ADD R1, R2
//	MOVD R2, R3
// might become
//	ADD R1, R2, R3
func fuseOpMoves(r *gc.Flow) int {
	n := 0
	for ; r != nil; r = r.Link {
		p := r.Prog
		switch p.As {
		case s390x.AADD:
		case s390x.ASUB:
			if isConst(&p.From) && int64(int16(p.From.Offset)) != p.From.Offset {
				continue
			}
		case s390x.ASLW,
			s390x.ASRW,
			s390x.ASRAW,
			s390x.ASLD,
			s390x.ASRD,
			s390x.ASRAD,
			s390x.ARLL,
			s390x.ARLLG:
			// ok - p.From will be a reg or a constant
		case s390x.AOR,
			s390x.AORN,
			s390x.AAND,
			s390x.AANDN,
			s390x.ANAND,
			s390x.ANOR,
			s390x.AXOR,
			s390x.AMULLW,
			s390x.AMULLD:
			if isConst(&p.From) {
				// these instructions can either use 3 register form
				// or have an immediate but not both
				continue
			}
		default:
			continue
		}

		if p.Reg != 0 && p.Reg != p.To.Reg {
			continue
		}

		var move *gc.Flow
		rr := gc.Uniqs(r)
		for {
			if rr == nil || gc.Uniqp(rr) == nil || rr == r {
				break
			}
			pp := rr.Prog
			switch copyu(pp, &p.To, nil) {
			case _None:
				rr = gc.Uniqs(rr)
				continue
			case _Read:
				if move == nil && pp.As == s390x.AMOVD && isGPR(&pp.From) && isGPR(&pp.To) {
					move = rr
					rr = gc.Uniqs(rr)
					continue
				}
			case _Write:
				if move == nil {
					// dead code
					excise(r)
					n++
				} else {
					for prev := gc.Uniqp(move); prev != r; prev = gc.Uniqp(prev) {
						if copyu(prev.Prog, &move.Prog.To, nil) != 0 {
							move = nil
							break
						}
					}
					if move == nil {
						break
					}
					p.Reg, p.To.Reg = p.To.Reg, move.Prog.To.Reg
					excise(move)
					n++

					// clean up
					if p.From.Reg == p.To.Reg && isCommutative(p.As) {
						p.From.Reg, p.Reg = p.Reg, 0
					}
					if p.To.Reg == p.Reg {
						p.Reg = 0
					}
					// we could try again if p has become a 2-operand op
					// but in testing nothing extra was extracted
				}
			}
			break
		}
	}
	return n
}
Esempio n. 20
0
// constantPropagation removes redundant constant copies.
func constantPropagation(r *gc.Flow) int {
	n := 0
	// find MOV $con,R followed by
	// another MOV $con,R without
	// setting R in the interim
	for ; r != nil; r = r.Link {
		p := r.Prog
		if isMove(p) {
			if !isReg(&p.To) {
				continue
			}
			if !isConst(&p.From) {
				continue
			}
		} else {
			continue
		}

		rr := r
		for {
			rr = gc.Uniqs(rr)
			if rr == nil || rr == r {
				break
			}
			if gc.Uniqp(rr) == nil {
				break
			}

			pp := rr.Prog
			t := copyu(pp, &p.To, nil)
			switch t {
			case _None:
				continue
			case _Read:
				if !isGPR(&pp.From) || !isMove(pp) {
					continue
				}
				if p.From.Type == obj.TYPE_CONST {
					v := applyCast(p.As, p.From.Offset)
					if isGPR(&pp.To) {
						if int64(int32(v)) == v || ((v>>32)<<32) == v {
							pp.From.Reg = 0
							pp.From.Offset = v
							pp.From.Type = obj.TYPE_CONST
							n++
						}
					} else if int64(int16(v)) == v {
						pp.From.Reg = 0
						pp.From.Offset = v
						pp.From.Type = obj.TYPE_CONST
						n++
					}
				}
				continue
			case _Write:
				if p.As != pp.As || p.From.Type != pp.From.Type {
					break
				}
				if p.From.Type == obj.TYPE_CONST && p.From.Offset == pp.From.Offset {
					excise(rr)
					n++
					continue
				} else if p.From.Type == obj.TYPE_FCONST {
					if p.From.Val.(float64) == pp.From.Val.(float64) {
						excise(rr)
						n++
						continue
					}
				}
			}
			break
		}
	}
	return n
}
Esempio n. 21
0
// fuseCompareBranch finds comparisons followed by a branch and converts
// them into a compare-and-branch instruction (which avoid setting the
// condition code).
func fuseCompareBranch(r *gc.Flow) int {
	n := 0
	for ; r != nil; r = r.Link {
		p := r.Prog
		r1 := gc.Uniqs(r)
		if r1 == nil {
			continue
		}
		p1 := r1.Prog

		var ins obj.As
		switch p.As {
		case s390x.ACMP:
			switch p1.As {
			case s390x.ABCL, s390x.ABC:
				continue
			case s390x.ABEQ:
				ins = s390x.ACMPBEQ
			case s390x.ABGE:
				ins = s390x.ACMPBGE
			case s390x.ABGT:
				ins = s390x.ACMPBGT
			case s390x.ABLE:
				ins = s390x.ACMPBLE
			case s390x.ABLT:
				ins = s390x.ACMPBLT
			case s390x.ABNE:
				ins = s390x.ACMPBNE
			default:
				continue
			}

		case s390x.ACMPU:
			switch p1.As {
			case s390x.ABCL, s390x.ABC:
				continue
			case s390x.ABEQ:
				ins = s390x.ACMPUBEQ
			case s390x.ABGE:
				ins = s390x.ACMPUBGE
			case s390x.ABGT:
				ins = s390x.ACMPUBGT
			case s390x.ABLE:
				ins = s390x.ACMPUBLE
			case s390x.ABLT:
				ins = s390x.ACMPUBLT
			case s390x.ABNE:
				ins = s390x.ACMPUBNE
			default:
				continue
			}

		case s390x.ACMPW, s390x.ACMPWU:
			continue

		default:
			continue
		}

		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("cnb %v; %v  ", p, p1)
		}

		if p1.To.Sym != nil {
			continue
		}

		if p.To.Type == obj.TYPE_REG {
			p1.As = ins
			p1.From = p.From
			p1.Reg = p.To.Reg
			p1.From3 = nil
		} else if p.To.Type == obj.TYPE_CONST {
			switch p.As {
			case s390x.ACMP, s390x.ACMPW:
				if (p.To.Offset < -(1 << 7)) || (p.To.Offset >= ((1 << 7) - 1)) {
					continue
				}
			case s390x.ACMPU, s390x.ACMPWU:
				if p.To.Offset >= (1 << 8) {
					continue
				}
			default:
			}
			p1.As = ins
			p1.From = p.From
			p1.Reg = 0
			p1.From3 = new(obj.Addr)
			*(p1.From3) = p.To
		} else {
			continue
		}

		if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
			fmt.Printf("%v\n", p1)
		}
		excise(r)
		n++
	}
	return n
}
Esempio n. 22
0
/*
 * the idea is to substitute
 * one register for another
 * from one MOV to another
 *	MOV	a, R1
 *	ADD	b, R1	/ no use of R2
 *	MOV	R1, R2
 * would be converted to
 *	MOV	a, R2
 *	ADD	b, R2
 *	MOV	R2, R1
 * hopefully, then the former or latter MOV
 * will be eliminated by copy propagation.
 *
 * r0 (the argument, not the register) is the MOV at the end of the
 * above sequences.  This returns 1 if it modified any instructions.
 */
func subprop(r0 *gc.Flow) bool {
	p := r0.Prog
	v1 := &p.From
	if !regtyp(v1) {
		return false
	}
	v2 := &p.To
	if !regtyp(v2) {
		return false
	}
	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
		if gc.Uniqs(r) == nil {
			break
		}
		p = r.Prog
		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
			continue
		}
		if p.Info.Flags&gc.Call != 0 {
			return false
		}

		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
			if p.To.Type == v1.Type {
				if p.To.Reg == v1.Reg {
					copysub(&p.To, v1, v2, true)
					if gc.Debug['P'] != 0 {
						fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
						if p.From.Type == v2.Type {
							fmt.Printf(" excise")
						}
						fmt.Printf("\n")
					}

					for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
						p = r.Prog
						copysub(&p.From, v1, v2, true)
						copysub1(p, v1, v2, true)
						copysub(&p.To, v1, v2, true)
						if gc.Debug['P'] != 0 {
							fmt.Printf("%v\n", r.Prog)
						}
					}

					v1.Reg, v2.Reg = v2.Reg, v1.Reg
					if gc.Debug['P'] != 0 {
						fmt.Printf("%v last\n", r.Prog)
					}
					return true
				}
			}
		}

		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
			break
		}
		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
			break
		}
	}

	return false
}
Esempio n. 23
0
/*
 * the idea is to substitute
 * one register for another
 * from one MOV to another
 *	MOV	a, R0
 *	ADD	b, R0	/ no use of R1
 *	MOV	R0, R1
 * would be converted to
 *	MOV	a, R1
 *	ADD	b, R1
 *	MOV	R1, R0
 * hopefully, then the former or latter MOV
 * will be eliminated by copy propagation.
 */
func subprop(r0 *gc.Flow) bool {
	p := r0.Prog
	v1 := &p.From
	if !regtyp(v1) {
		return false
	}
	v2 := &p.To
	if !regtyp(v2) {
		return false
	}
	for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
		if gc.Uniqs(r) == nil {
			break
		}
		p = r.Prog
		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
			continue
		}
		if p.Info.Flags&gc.Call != 0 {
			return false
		}

		// TODO(rsc): Whatever invalidated the info should have done this call.
		proginfo(p)

		if (p.Info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
			p.Info.Flags |= gc.RegRead
			p.Info.Flags &^= (gc.CanRegRead | gc.RightRead)
			p.Reg = p.To.Reg
		}

		switch p.As {
		case arm.AMULLU,
			arm.AMULA,
			arm.AMVN:
			return false
		}

		if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
			if p.To.Type == v1.Type {
				if p.To.Reg == v1.Reg {
					if p.Scond == arm.C_SCOND_NONE {
						copysub(&p.To, v1, v2, true)
						if gc.Debug['P'] != 0 {
							fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
							if p.From.Type == v2.Type {
								fmt.Printf(" excise")
							}
							fmt.Printf("\n")
						}

						for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
							p = r.Prog
							copysub(&p.From, v1, v2, true)
							copysub1(p, v1, v2, true)
							copysub(&p.To, v1, v2, true)
							if gc.Debug['P'] != 0 {
								fmt.Printf("%v\n", r.Prog)
							}
						}

						v1.Reg, v2.Reg = v2.Reg, v1.Reg
						if gc.Debug['P'] != 0 {
							fmt.Printf("%v last\n", r.Prog)
						}
						return true
					}
				}
			}
		}

		if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
			break
		}
		if copysub(&p.From, v1, v2, false) || copysub1(p, v1, v2, false) || copysub(&p.To, v1, v2, false) {
			break
		}
	}

	return false
}