Ejemplo n.º 1
0
// copysub substitute s for v in a.
// copysub returns true on failure to substitute.
// TODO(dfc) reverse this logic to return false on sunstitution failure.
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
	if copyas(a, v) {
		if s.Reg >= x86.REG_AX && s.Reg <= x86.REG_DI || s.Reg >= x86.REG_X0 && s.Reg <= x86.REG_X7 {
			if f {
				a.Reg = s.Reg
			}
		}
		return false
	}

	if regtyp(v) {
		if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
			if (s.Reg == x86.REG_BP) && a.Index != x86.REG_NONE {
				return true /* can't use BP-base with index */
			}
			if f {
				a.Reg = s.Reg
			}
		}

		if a.Index == v.Reg {
			if f {
				a.Index = s.Reg
			}
		}
	}
	return false
}
Ejemplo n.º 2
0
func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
	if p.As == ALEAL || p.As == ALEAQ {
		return
	}

	if a.Reg == REG_BP {
		ctxt.Diag("invalid address: %v", p)
		return
	}

	if a.Reg == REG_TLS {
		a.Reg = REG_BP
	}
	if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
		switch a.Reg {
		// all ok
		case REG_BP, REG_SP, REG_R15:
			break

		default:
			if a.Index != REG_NONE {
				ctxt.Diag("invalid address %v", p)
			}
			a.Index = a.Reg
			if a.Index != REG_NONE {
				a.Scale = 1
			}
			a.Reg = REG_R15
		}
	}
}
Ejemplo n.º 3
0
// copysub substitute s for v in a.
// copysub returns true on failure to substitute.
// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
	if f && copyau(a, v) {
		if a.Type == obj.TYPE_SHIFT {
			if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
				a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
			}
			if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
				a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8
			}
		} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
			if a.Offset == int64(v.Reg) {
				a.Offset = int64(s.Reg)
			}
			if a.Reg == v.Reg {
				a.Reg = s.Reg
			}
		} else {
			a.Reg = s.Reg
		}
	}
	return false
}
Ejemplo n.º 4
0
func addreg(a *obj.Addr, rn int) {
	a.Sym = nil
	a.Node = nil
	a.Offset = 0
	a.Type = obj.TYPE_REG
	a.Reg = int16(rn)
	a.Name = 0

	Ostats.Ncvtreg++
}
Ejemplo n.º 5
0
// copysub replaces v with s in a if f==true or indicates it if could if f==false.
// Returns true on failure to substitute (it always succeeds on mips).
// TODO(dfc) remove unused return value, remove calls with f=false as they do nothing.
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f bool) bool {
	if f && copyau(a, v) {
		a.Reg = s.Reg
	}
	return false
}
Ejemplo n.º 6
0
/*
 * generate code to compute address of n,
 * a reference to a (perhaps nested) field inside
 * an array or struct.
 * return 0 on failure, 1 on success.
 * on success, leaves usable address in a.
 *
 * caller is responsible for calling sudoclean
 * after successful sudoaddable,
 * to release the register used for a.
 */
func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
	if n.Type == nil {
		return false
	}

	*a = obj.Addr{}

	switch n.Op {
	case gc.OLITERAL:
		if !gc.Isconst(n, gc.CTINT) {
			break
		}
		v := n.Int64()
		if v >= 32000 || v <= -32000 {
			break
		}
		switch as {
		default:
			return false

		case arm.AADD,
			arm.ASUB,
			arm.AAND,
			arm.AORR,
			arm.AEOR,
			arm.AMOVB,
			arm.AMOVBS,
			arm.AMOVBU,
			arm.AMOVH,
			arm.AMOVHS,
			arm.AMOVHU,
			arm.AMOVW:
			break
		}

		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		gc.Naddr(a, n)
		return true

	case gc.ODOT,
		gc.ODOTPTR:
		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		var nn *gc.Node
		var oary [10]int64
		o := gc.Dotoffset(n, oary[:], &nn)
		if nn == nil {
			sudoclean()
			return false
		}

		if nn.Addable && o == 1 && oary[0] >= 0 {
			// directly addressable set of DOTs
			n1 := *nn

			n1.Type = n.Type
			n1.Xoffset += oary[0]
			gc.Naddr(a, &n1)
			return true
		}

		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
		n1 := *reg
		n1.Op = gc.OINDREG
		if oary[0] >= 0 {
			gc.Agen(nn, reg)
			n1.Xoffset = oary[0]
		} else {
			gc.Cgen(nn, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[0] + 1)
		}

		for i := 1; i < o; i++ {
			if oary[i] >= 0 {
				gc.Fatalf("can't happen")
			}
			gins(arm.AMOVW, &n1, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[i] + 1)
		}

		a.Type = obj.TYPE_NONE
		a.Name = obj.NAME_NONE
		n1.Type = n.Type
		gc.Naddr(a, &n1)
		return true

	case gc.OINDEX:
		return false
	}

	return false
}
Ejemplo n.º 7
0
// Naddr rewrites a to refer to n.
// It assumes that a is zeroed on entry.
func Naddr(a *obj.Addr, n *Node) {
	if n == nil {
		return
	}

	if n.Type != nil && n.Type.Etype != TIDEAL {
		// TODO(rsc): This is undone by the selective clearing of width below,
		// to match architectures that were not as aggressive in setting width
		// during naddr. Those widths must be cleared to avoid triggering
		// failures in gins when it detects real but heretofore latent (and one
		// hopes innocuous) type mismatches.
		// The type mismatches should be fixed and the clearing below removed.
		dowidth(n.Type)

		a.Width = n.Type.Width
	}

	switch n.Op {
	default:
		a := a // copy to let escape into Ctxt.Dconv
		Debug['h'] = 1
		Dump("naddr", n)
		Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a))

	case OREGISTER:
		a.Type = obj.TYPE_REG
		a.Reg = n.Reg
		a.Sym = nil
		if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width.
			a.Width = 0
		}

	case OINDREG:
		a.Type = obj.TYPE_MEM
		a.Reg = n.Reg
		a.Sym = Linksym(n.Sym)
		a.Offset = n.Xoffset
		if a.Offset != int64(int32(a.Offset)) {
			Yyerror("offset %d too large for OINDREG", a.Offset)
		}
		if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width.
			a.Width = 0
		}

	case OCLOSUREVAR:
		if !Curfn.Func.Needctxt {
			Fatalf("closurevar without needctxt")
		}
		a.Type = obj.TYPE_MEM
		a.Reg = int16(Thearch.REGCTXT)
		a.Sym = nil
		a.Offset = n.Xoffset

	case OCFUNC:
		Naddr(a, n.Left)
		a.Sym = Linksym(n.Left.Sym)

	case ONAME:
		a.Etype = 0
		if n.Type != nil {
			a.Etype = uint8(Simtype[n.Type.Etype])
		}
		a.Offset = n.Xoffset
		s := n.Sym
		a.Node = n.Orig

		//if(a->node >= (Node*)&n)
		//	fatal("stack node");
		if s == nil {
			s = Lookup(".noname")
		}
		if n.Name.Method && n.Type != nil && n.Type.Sym != nil && n.Type.Sym.Pkg != nil {
			s = Pkglookup(s.Name, n.Type.Sym.Pkg)
		}

		a.Type = obj.TYPE_MEM
		switch n.Class {
		default:
			Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)

		case PEXTERN:
			a.Name = obj.NAME_EXTERN

		case PAUTO:
			a.Name = obj.NAME_AUTO

		case PPARAM, PPARAMOUT:
			a.Name = obj.NAME_PARAM

		case PFUNC:
			a.Name = obj.NAME_EXTERN
			a.Type = obj.TYPE_ADDR
			a.Width = int64(Widthptr)
			s = funcsym(s)
		}

		a.Sym = Linksym(s)

	case ODOT:
		// A special case to make write barriers more efficient.
		// Taking the address of the first field of a named struct
		// is the same as taking the address of the struct.
		if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym {
			Debug['h'] = 1
			Dump("naddr", n)
			Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a))
		}
		Naddr(a, n.Left)

	case OLITERAL:
		if Thearch.LinkArch.Family == sys.I386 {
			a.Width = 0
		}
		switch u := n.Val().U.(type) {
		default:
			Fatalf("naddr: const %v", Tconv(n.Type, FmtLong))

		case *Mpflt:
			a.Type = obj.TYPE_FCONST
			a.Val = u.Float64()

		case *Mpint:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = u.Int64()

		case string:
			datagostring(u, a)

		case bool:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = int64(obj.Bool2int(u))

		case *NilVal:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = 0
		}

	case OADDR:
		Naddr(a, n.Left)
		a.Etype = uint8(Tptr)
		if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { // TODO(rsc): Do this even for these architectures.
			a.Width = int64(Widthptr)
		}
		if a.Type != obj.TYPE_MEM {
			a := a // copy to let escape into Ctxt.Dconv
			Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), n.Left.Op)
		}
		a.Type = obj.TYPE_ADDR

		// itable of interface value
	case OITAB:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // itab(nil)
		}
		a.Etype = uint8(Tptr)
		a.Width = int64(Widthptr)

		// pointer in a string or slice
	case OSPTR:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // ptr(nil)
		}
		a.Etype = uint8(Simtype[Tptr])
		a.Offset += int64(Array_array)
		a.Width = int64(Widthptr)

		// len of string or slice
	case OLEN:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // len(nil)
		}
		a.Etype = uint8(Simtype[TUINT])
		a.Offset += int64(Array_nel)
		if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm.
			a.Width = int64(Widthint)
		}

		// cap of string or slice
	case OCAP:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // cap(nil)
		}
		a.Etype = uint8(Simtype[TUINT])
		a.Offset += int64(Array_cap)
		if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm.
			a.Width = int64(Widthint)
		}
	}
}
Ejemplo n.º 8
0
func Afunclit(a *obj.Addr, n *Node) {
	if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
		a.Type = obj.TYPE_MEM
		a.Sym = Linksym(n.Sym)
	}
}
Ejemplo n.º 9
0
// copysub replaces v.Reg with s.Reg if a.Reg and v.Reg are direct
// references to the same register.
func copysub(a, v, s *obj.Addr) {
	if copyau(a, v) {
		a.Reg = s.Reg
	}
}
Ejemplo n.º 10
0
func mkvar(f *Flow, a *obj.Addr) Bits {
	// mark registers used
	if a.Type == obj.TYPE_NONE {
		return zbits
	}

	r := f.Data.(*Reg)
	r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB

	var n int
	switch a.Type {
	default:
		regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
		if regu == 0 {
			return zbits
		}
		bit := zbits
		bit.b[0] = regu
		return bit

		// TODO(rsc): Remove special case here.
	case obj.TYPE_ADDR:
		var bit Bits
		if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) {
			goto memcase
		}
		a.Type = obj.TYPE_MEM
		bit = mkvar(f, a)
		setaddrs(bit)
		a.Type = obj.TYPE_ADDR
		Ostats.Naddr++
		return zbits

	memcase:
		fallthrough

	case obj.TYPE_MEM:
		if r != nil {
			r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
		}

		/* NOTE: 5g did
		if(r->f.prog->scond & (C_PBIT|C_WBIT))
			r->set.b[0] |= RtoB(a->reg);
		*/
		switch a.Name {
		default:
			// Note: This case handles NAME_EXTERN and NAME_STATIC.
			// We treat these as requiring eager writes to memory, due to
			// the possibility of a fault handler looking at them, so there is
			// not much point in registerizing the loads.
			// If we later choose the set of candidate variables from a
			// larger list, these cases could be deprioritized instead of
			// removed entirely.
			return zbits

		case obj.NAME_PARAM,
			obj.NAME_AUTO:
			n = int(a.Name)
		}
	}

	node, _ := a.Node.(*Node)
	if node == nil || node.Op != ONAME || node.Orig == nil {
		return zbits
	}
	node = node.Orig
	if node.Orig != node {
		Fatalf("%v: bad node", Ctxt.Dconv(a))
	}
	if node.Sym == nil || node.Sym.Name[0] == '.' {
		return zbits
	}
	et := EType(a.Etype)
	o := a.Offset
	w := a.Width
	if w < 0 {
		Fatalf("bad width %d for %v", w, Ctxt.Dconv(a))
	}

	flag := 0
	var v *Var
	for i := 0; i < nvar; i++ {
		v = &vars[i]
		if v.node == node && int(v.name) == n {
			if v.offset == o {
				if v.etype == et {
					if int64(v.width) == w {
						// TODO(rsc): Remove special case for arm here.
						if flag == 0 || Thearch.LinkArch.Family != sys.ARM {
							return blsh(uint(i))
						}
					}
				}
			}

			// if they overlap, disable both
			if overlap_reg(v.offset, v.width, o, int(w)) {
				//				print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
				v.addr = 1

				flag = 1
			}
		}
	}

	switch et {
	case 0, TFUNC:
		return zbits
	}

	if nvar >= NVAR {
		if Debug['w'] > 1 && node != nil {
			Fatalf("variable not optimized: %v", Nconv(node, FmtSharp))
		}
		if Debug['v'] > 0 {
			Warn("variable not optimized: %v", Nconv(node, FmtSharp))
		}

		// If we're not tracking a word in a variable, mark the rest as
		// having its address taken, so that we keep the whole thing
		// live at all calls. otherwise we might optimize away part of
		// a variable but not all of it.
		var v *Var
		for i := 0; i < nvar; i++ {
			v = &vars[i]
			if v.node == node {
				v.addr = 1
			}
		}

		return zbits
	}

	i := nvar
	nvar++
	v = &vars[i]
	v.id = i
	v.offset = o
	v.name = int8(n)
	v.etype = et
	v.width = int(w)
	v.addr = int8(flag) // funny punning
	v.node = node

	// node->opt is the head of a linked list
	// of Vars within the given Node, so that
	// we can start at a Var and find all the other
	// Vars in the same Go variable.
	v.nextinnode, _ = node.Opt().(*Var)

	node.SetOpt(v)

	bit := blsh(uint(i))
	if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
		for z := 0; z < BITS; z++ {
			externs.b[z] |= bit.b[z]
		}
	}
	if n == obj.NAME_PARAM {
		for z := 0; z < BITS; z++ {
			params.b[z] |= bit.b[z]
		}
	}

	if node.Class == PPARAM {
		for z := 0; z < BITS; z++ {
			ivar.b[z] |= bit.b[z]
		}
	}
	if node.Class == PPARAMOUT {
		for z := 0; z < BITS; z++ {
			ovar.b[z] |= bit.b[z]
		}
	}

	// Treat values with their address taken as live at calls,
	// because the garbage collector's liveness analysis in plive.go does.
	// These must be consistent or else we will elide stores and the garbage
	// collector will see uninitialized data.
	// The typical case where our own analysis is out of sync is when the
	// node appears to have its address taken but that code doesn't actually
	// get generated and therefore doesn't show up as an address being
	// taken when we analyze the instruction stream.
	// One instance of this case is when a closure uses the same name as
	// an outer variable for one of its own variables declared with :=.
	// The parser flags the outer variable as possibly shared, and therefore
	// sets addrtaken, even though it ends up not being actually shared.
	// If we were better about _ elision, _ = &x would suffice too.
	// The broader := in a closure problem is mentioned in a comment in
	// closure.go:/^typecheckclosure and dcl.go:/^oldname.
	if node.Addrtaken {
		v.addr = 1
	}

	// Disable registerization for globals, because:
	// (1) we might panic at any time and we want the recovery code
	// to see the latest values (issue 1304).
	// (2) we don't know what pointers might point at them and we want
	// loads via those pointers to see updated values and vice versa (issue 7995).
	//
	// Disable registerization for results if using defer, because the deferred func
	// might recover and return, causing the current values to be used.
	if node.Class == PEXTERN || (hasdefer && node.Class == PPARAMOUT) {
		v.addr = 1
	}

	if Debug['R'] != 0 {
		fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, et, o, w, Nconv(node, FmtSharp), Ctxt.Dconv(a), v.addr)
	}
	Ostats.Nvar++

	return bit
}
Ejemplo n.º 11
0
/*
 * generate code to compute address of n,
 * a reference to a (perhaps nested) field inside
 * an array or struct.
 * return 0 on failure, 1 on success.
 * on success, leaves usable address in a.
 *
 * caller is responsible for calling sudoclean
 * after successful sudoaddable,
 * to release the register used for a.
 */
func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
	if n.Type == nil {
		return false
	}

	*a = obj.Addr{}

	switch n.Op {
	case gc.OLITERAL:
		if !gc.Isconst(n, gc.CTINT) {
			break
		}
		v := n.Int64()
		if v >= 32000 || v <= -32000 {
			break
		}
		switch as {
		default:
			return false

		case x86.AADDB,
			x86.AADDW,
			x86.AADDL,
			x86.AADDQ,
			x86.ASUBB,
			x86.ASUBW,
			x86.ASUBL,
			x86.ASUBQ,
			x86.AANDB,
			x86.AANDW,
			x86.AANDL,
			x86.AANDQ,
			x86.AORB,
			x86.AORW,
			x86.AORL,
			x86.AORQ,
			x86.AXORB,
			x86.AXORW,
			x86.AXORL,
			x86.AXORQ,
			x86.AINCB,
			x86.AINCW,
			x86.AINCL,
			x86.AINCQ,
			x86.ADECB,
			x86.ADECW,
			x86.ADECL,
			x86.ADECQ,
			x86.AMOVB,
			x86.AMOVW,
			x86.AMOVL,
			x86.AMOVQ:
			break
		}

		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		gc.Naddr(a, n)
		return true

	case gc.ODOT,
		gc.ODOTPTR:
		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		var nn *gc.Node
		var oary [10]int64
		o := gc.Dotoffset(n, oary[:], &nn)
		if nn == nil {
			sudoclean()
			return false
		}

		if nn.Addable && o == 1 && oary[0] >= 0 {
			// directly addressable set of DOTs
			n1 := *nn

			n1.Type = n.Type
			n1.Xoffset += oary[0]
			gc.Naddr(a, &n1)
			return true
		}

		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
		n1 := *reg
		n1.Op = gc.OINDREG
		if oary[0] >= 0 {
			gc.Agen(nn, reg)
			n1.Xoffset = oary[0]
		} else {
			gc.Cgen(nn, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[0] + 1)
		}

		for i := 1; i < o; i++ {
			if oary[i] >= 0 {
				gc.Fatalf("can't happen")
			}
			gins(movptr, &n1, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[i] + 1)
		}

		a.Type = obj.TYPE_NONE
		a.Index = x86.REG_NONE
		gc.Fixlargeoffset(&n1)
		gc.Naddr(a, &n1)
		return true

	case gc.OINDEX:
		return false
	}

	return false
}
Ejemplo n.º 12
0
/*
 * xtramodes enables the ARM post increment and
 * shift offset addressing modes to transform
 *   MOVW   0(R3),R1
 *   ADD    $4,R3,R3
 * into
 *   MOVW.P 4(R3),R1
 * and
 *   ADD    R0,R1
 *   MOVBU  0(R1),R0
 * into
 *   MOVBU  R0<<0(R1),R0
 */
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
	p := r.Prog
	v := *a
	v.Type = obj.TYPE_REG
	r1 := findpre(r, &v)
	if r1 != nil {
		p1 := r1.Prog
		if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
			switch p1.As {
			case arm.AADD:
				if p1.Scond&arm.C_SBIT != 0 {
					// avoid altering ADD.S/ADC sequences.
					break
				}

				if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
					if nochange(gc.Uniqs(r1), r, p1) {
						if a != &p.From || v.Reg != p.To.Reg {
							if finduse(g, r.S1, &v) {
								if p1.Reg == 0 || p1.Reg == v.Reg {
									/* pre-indexing */
									p.Scond |= arm.C_WBIT
								} else {
									return false
								}
							}
						}

						switch p1.From.Type {
						/* register offset */
						case obj.TYPE_REG:
							if gc.Nacl {
								return false
							}
							*a = obj.Addr{}
							a.Type = obj.TYPE_SHIFT
							a.Offset = int64(p1.From.Reg) & 15

							/* scaled register offset */
						case obj.TYPE_SHIFT:
							if gc.Nacl {
								return false
							}
							*a = obj.Addr{}
							a.Type = obj.TYPE_SHIFT
							fallthrough

							/* immediate offset */
						case obj.TYPE_CONST,
							obj.TYPE_ADDR:
							a.Offset = p1.From.Offset
						}

						if p1.Reg != 0 {
							a.Reg = p1.Reg
						}
						excise(r1)
						return true
					}
				}

			case arm.AMOVW:
				if p1.From.Type == obj.TYPE_REG {
					r2 := findinc(r1, r, &p1.From)
					if r2 != nil {
						var r3 *gc.Flow
						for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
						}
						if r3 == r {
							/* post-indexing */
							p1 := r2.Prog

							a.Reg = p1.To.Reg
							a.Offset = p1.From.Offset
							p.Scond |= arm.C_PBIT
							if !finduse(g, r, &r1.Prog.To) {
								excise(r1)
							}
							excise(r2)
							return true
						}
					}
				}
			}
		}
	}

	if a != &p.From || a.Reg != p.To.Reg {
		r1 := findinc(r, nil, &v)
		if r1 != nil {
			/* post-indexing */
			p1 := r1.Prog

			a.Offset = p1.From.Offset
			p.Scond |= arm.C_PBIT
			excise(r1)
			return true
		}
	}

	return false
}
Ejemplo n.º 13
0
/*
 * ASLL x,y,w
 * .. (not use w, not set x y w)
 * AXXX w,a,b (a != w)
 * .. (not use w)
 * (set w)
 * ----------- changed to
 * ..
 * AXXX (x<<y),a,b
 * ..
 */
func shiftprop(r *gc.Flow) bool {
	p := r.Prog
	if p.To.Type != obj.TYPE_REG {
		if gc.Debug['P'] != 0 {
			fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
		}
		return false
	}

	n := p.To.Reg
	var a obj.Addr
	if p.Reg != 0 && p.Reg != p.To.Reg {
		a.Type = obj.TYPE_REG
		a.Reg = p.Reg
	}

	if gc.Debug['P'] != 0 {
		fmt.Printf("shiftprop\n%v", p)
	}
	r1 := r
	var p1 *obj.Prog
	for {
		/* find first use of shift result; abort if shift operands or result are changed */
		r1 = gc.Uniqs(r1)

		if r1 == nil {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tbranch; FAILURE\n")
			}
			return false
		}

		if gc.Uniqp(r1) == nil {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tmerge; FAILURE\n")
			}
			return false
		}

		p1 = r1.Prog
		if gc.Debug['P'] != 0 {
			fmt.Printf("\n%v", p1)
		}
		switch copyu(p1, &p.To, nil) {
		case 0: /* not used or set */
			if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\targs modified; FAILURE\n")
				}
				return false
			}

			continue
		case 3: /* set, not used */
			{
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tBOTCH: noref; FAILURE\n")
				}
				return false
			}
		}

		break
	}

	/* check whether substitution can be done */
	switch p1.As {
	default:
		if gc.Debug['P'] != 0 {
			fmt.Printf("\tnon-dpi; FAILURE\n")
		}
		return false

	case arm.AAND,
		arm.AEOR,
		arm.AADD,
		arm.AADC,
		arm.AORR,
		arm.ASUB,
		arm.ASBC,
		arm.ARSB,
		arm.ARSC:
		if p1.Reg == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && p1.To.Reg == n) {
			if p1.From.Type != obj.TYPE_REG {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tcan't swap; FAILURE\n")
				}
				return false
			}

			p1.Reg = p1.From.Reg
			p1.From.Reg = n
			switch p1.As {
			case arm.ASUB:
				p1.As = arm.ARSB

			case arm.ARSB:
				p1.As = arm.ASUB

			case arm.ASBC:
				p1.As = arm.ARSC

			case arm.ARSC:
				p1.As = arm.ASBC
			}

			if gc.Debug['P'] != 0 {
				fmt.Printf("\t=>%v", p1)
			}
		}
		fallthrough

	case arm.ABIC,
		arm.ATST,
		arm.ACMP,
		arm.ACMN:
		if p1.Reg == n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tcan't swap; FAILURE\n")
			}
			return false
		}

		if p1.Reg == 0 && p1.To.Reg == n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tshift result used twice; FAILURE\n")
			}
			return false
		}

		//	case AMVN:
		if p1.From.Type == obj.TYPE_SHIFT {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tshift result used in shift; FAILURE\n")
			}
			return false
		}

		if p1.From.Type != obj.TYPE_REG || p1.From.Reg != n {
			if gc.Debug['P'] != 0 {
				fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
			}
			return false
		}
	}

	/* check whether shift result is used subsequently */
	p2 := p1

	if p1.To.Reg != n {
		var p1 *obj.Prog
		for {
			r1 = gc.Uniqs(r1)
			if r1 == nil {
				if gc.Debug['P'] != 0 {
					fmt.Printf("\tinconclusive; FAILURE\n")
				}
				return false
			}

			p1 = r1.Prog
			if gc.Debug['P'] != 0 {
				fmt.Printf("\n%v", p1)
			}
			switch copyu(p1, &p.To, nil) {
			case 0: /* not used or set */
				continue

			case 3: /* set, not used */
				break

			default: /* used */
				if gc.Debug['P'] != 0 {
					fmt.Printf("\treused; FAILURE\n")
				}
				return false
			}

			break
		}
	}

	/* make the substitution */
	p2.From.Reg = 0
	o := p.Reg
	if o == 0 {
		o = p.To.Reg
	}
	o &= 15

	switch p.From.Type {
	case obj.TYPE_CONST:
		o |= int16(p.From.Offset&0x1f) << 7

	case obj.TYPE_REG:
		o |= 1<<4 | (p.From.Reg&15)<<8
	}

	switch p.As {
	case arm.ASLL:
		o |= 0 << 5

	case arm.ASRL:
		o |= 1 << 5

	case arm.ASRA:
		o |= 2 << 5
	}

	p2.From = obj.Addr{}
	p2.From.Type = obj.TYPE_SHIFT
	p2.From.Offset = int64(o)
	if gc.Debug['P'] != 0 {
		fmt.Printf("\t=>%v\tSUCCEED\n", p2)
	}
	return true
}
Ejemplo n.º 14
0
/*
 * generate code to compute address of n,
 * a reference to a (perhaps nested) field inside
 * an array or struct.
 * return 0 on failure, 1 on success.
 * on success, leaves usable address in a.
 *
 * caller is responsible for calling sudoclean
 * after successful sudoaddable,
 * to release the register used for a.
 */
func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
	if n.Type == nil {
		return false
	}

	*a = obj.Addr{}

	switch n.Op {
	case gc.OLITERAL:
		if !gc.Isconst(n, gc.CTINT) {
			return false
		}
		v := n.Int64()
		switch as {
		default:
			return false

		// operations that can cope with a 32-bit immediate
		// TODO(mundaym): logical operations can work on high bits
		case s390x.AADD,
			s390x.AADDC,
			s390x.ASUB,
			s390x.AMULLW,
			s390x.AAND,
			s390x.AOR,
			s390x.AXOR,
			s390x.ASLD,
			s390x.ASLW,
			s390x.ASRAW,
			s390x.ASRAD,
			s390x.ASRW,
			s390x.ASRD,
			s390x.AMOVB,
			s390x.AMOVBZ,
			s390x.AMOVH,
			s390x.AMOVHZ,
			s390x.AMOVW,
			s390x.AMOVWZ,
			s390x.AMOVD:
			if int64(int32(v)) != v {
				return false
			}

		// for comparisons avoid immediates unless they can
		// fit into a int8/uint8
		// this favours combined compare and branch instructions
		case s390x.ACMP:
			if int64(int8(v)) != v {
				return false
			}
		case s390x.ACMPU:
			if int64(uint8(v)) != v {
				return false
			}
		}

		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		gc.Naddr(a, n)
		return true

	case gc.ODOT,
		gc.ODOTPTR:
		cleani += 2
		reg := &clean[cleani-1]
		reg1 := &clean[cleani-2]
		reg.Op = gc.OEMPTY
		reg1.Op = gc.OEMPTY
		var nn *gc.Node
		var oary [10]int64
		o := gc.Dotoffset(n, oary[:], &nn)
		if nn == nil {
			sudoclean()
			return false
		}

		if nn.Addable && o == 1 && oary[0] >= 0 {
			// directly addressable set of DOTs
			n1 := *nn

			n1.Type = n.Type
			n1.Xoffset += oary[0]
			// check that the offset fits into a 12-bit displacement
			if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
				sudoclean()
				return false
			}
			gc.Naddr(a, &n1)
			return true
		}

		gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
		n1 := *reg
		n1.Op = gc.OINDREG
		if oary[0] >= 0 {
			gc.Agen(nn, reg)
			n1.Xoffset = oary[0]
		} else {
			gc.Cgen(nn, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[0] + 1)
		}

		for i := 1; i < o; i++ {
			if oary[i] >= 0 {
				gc.Fatalf("can't happen")
			}
			gins(s390x.AMOVD, &n1, reg)
			gc.Cgen_checknil(reg)
			n1.Xoffset = -(oary[i] + 1)
		}

		a.Type = obj.TYPE_NONE
		a.Index = 0
		// check that the offset fits into a 12-bit displacement
		if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
			tmp := n1
			tmp.Op = gc.OREGISTER
			tmp.Type = gc.Types[gc.Tptr]
			tmp.Xoffset = 0
			gc.Cgen_checknil(&tmp)
			ginscon(s390x.AADD, n1.Xoffset, &tmp)
			n1.Xoffset = 0
		}
		gc.Naddr(a, &n1)
		return true
	}

	return false
}