Exemple #1
0
func gclean() {
	for _, r := range Thearch.ReservedRegs {
		reg[r-Thearch.REGMIN]--
	}

	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
		n := reg[r-Thearch.REGMIN]
		if n != 0 {
			if Debug['v'] != 0 {
				Regdump()
			}
			Yyerror("reg %v left allocated", obj.Rconv(r))
		}
	}

	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
		n := reg[r-Thearch.REGMIN]
		if n != 0 {
			if Debug['v'] != 0 {
				Regdump()
			}
			Yyerror("reg %v left allocated", obj.Rconv(r))
		}
	}
}
Exemple #2
0
func archPPC64() *Arch {
	register := make(map[string]int16)
	// Create maps for easy lookup of instruction names etc.
	// Note that there is no list of names as there is for x86.
	for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	register["CR"] = ppc64.REG_CR
	register["XER"] = ppc64.REG_XER
	register["LR"] = ppc64.REG_LR
	register["CTR"] = ppc64.REG_CTR
	register["FPSCR"] = ppc64.REG_FPSCR
	register["MSR"] = ppc64.REG_MSR
	// Pseudo-registers.
	register["SB"] = RSB
	register["FP"] = RFP
	register["PC"] = RPC
	// Avoid unintentionally clobbering g using R30.
	delete(register, "R30")
	register["g"] = ppc64.REG_R30
	registerPrefix := map[string]bool{
		"CR":  true,
		"F":   true,
		"R":   true,
		"SPR": true,
	}

	instructions := make(map[string]int)
	for i, s := range obj.Anames {
		instructions[s] = i
	}
	for i, s := range ppc64.Anames {
		if i >= obj.A_ARCHSPECIFIC {
			instructions[s] = i + obj.ABasePPC64
		}
	}
	// Annoying aliases.
	instructions["BR"] = ppc64.ABR
	instructions["BL"] = ppc64.ABL

	return &Arch{
		LinkArch:       &ppc64.Linkppc64,
		Instructions:   instructions,
		Register:       register,
		RegisterPrefix: registerPrefix,
		RegisterNumber: ppc64RegisterNumber,
		IsJump:         jumpPPC64,
	}
}
Exemple #3
0
func archMips64() *Arch {
	register := make(map[string]int16)
	// Create maps for easy lookup of instruction names etc.
	// Note that there is no list of names as there is for x86.
	for i := mips.REG_R0; i <= mips.REG_R31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := mips.REG_F0; i <= mips.REG_F31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := mips.REG_M0; i <= mips.REG_M31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	register["HI"] = mips.REG_HI
	register["LO"] = mips.REG_LO
	// Pseudo-registers.
	register["SB"] = RSB
	register["FP"] = RFP
	register["PC"] = RPC
	// Avoid unintentionally clobbering g using R30.
	delete(register, "R30")
	register["g"] = mips.REG_R30
	registerPrefix := map[string]bool{
		"F":   true,
		"FCR": true,
		"M":   true,
		"R":   true,
	}

	instructions := make(map[string]int)
	for i, s := range obj.Anames {
		instructions[s] = i
	}
	for i, s := range mips.Anames {
		if i >= obj.A_ARCHSPECIFIC {
			instructions[s] = i + obj.ABaseMIPS64
		}
	}
	// Annoying alias.
	instructions["JAL"] = mips.AJAL

	return &Arch{
		LinkArch:       &mips.Linkmips64,
		Instructions:   instructions,
		Register:       register,
		RegisterPrefix: registerPrefix,
		RegisterNumber: mipsRegisterNumber,
		IsJump:         jumpMIPS64,
	}
}
Exemple #4
0
func Regdump() {
	if Debug['v'] == 0 {
		fmt.Printf("run compiler with -v for register allocation sites\n")
		return
	}

	dump := func(r int) {
		stk := regstk[r-Thearch.REGMIN]
		if len(stk) == 0 {
			return
		}
		fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
		fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
	}

	for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
		if reg[r-Thearch.REGMIN] != 0 {
			dump(r)
		}
	}
	for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
		if reg[r-Thearch.REGMIN] == 0 {
			dump(r)
		}
	}
}
Exemple #5
0
func archArm() *Arch {
	register := make(map[string]int16)
	// Create maps for easy lookup of instruction names etc.
	// Note that there is no list of names as there is for x86.
	for i := arm.REG_R0; i < arm.REG_SPSR; i++ {
		register[obj.Rconv(i)] = int16(i)
	}
	// Avoid unintentionally clobbering g using R10.
	delete(register, "R10")
	register["g"] = arm.REG_R10
	for i := 0; i < 16; i++ {
		register[fmt.Sprintf("C%d", i)] = int16(i)
	}

	// Pseudo-registers.
	register["SB"] = RSB
	register["FP"] = RFP
	register["PC"] = RPC
	register["SP"] = RSP
	registerPrefix := map[string]bool{
		"F": true,
		"R": true,
	}

	instructions := make(map[string]int)
	for i, s := range obj.Anames {
		instructions[s] = i
	}
	for i, s := range arm.Anames {
		if i >= obj.A_ARCHSPECIFIC {
			instructions[s] = i + obj.ABaseARM
		}
	}
	// Annoying aliases.
	instructions["B"] = obj.AJMP
	instructions["BL"] = obj.ACALL
	// MCR differs from MRC by the way fields of the word are encoded.
	// (Details in arm.go). Here we add the instruction so parse will find
	// it, but give it an opcode number known only to us.
	instructions["MCR"] = aMCR

	return &Arch{
		LinkArch:       &arm.Linkarm,
		Instructions:   instructions,
		Register:       register,
		RegisterPrefix: registerPrefix,
		RegisterNumber: armRegisterNumber,
		IsJump:         jumpArm,
	}
}
Exemple #6
0
func nodedump(n *Node, flag int) string {
	if n == nil {
		return ""
	}

	recur := flag&obj.FmtShort == 0

	var buf bytes.Buffer
	if recur {
		indent(&buf)
		if dumpdepth > 10 {
			buf.WriteString("...")
			return buf.String()
		}

		if n.Ninit != nil {
			fmt.Fprintf(&buf, "%v-init%v", Oconv(int(n.Op), 0), n.Ninit)
			indent(&buf)
		}
	}

	switch n.Op {
	default:
		fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))

	case OREGISTER, OINDREG:
		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), obj.Rconv(int(n.Reg)), Jconv(n, 0))

	case OLITERAL:
		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Vconv(n.Val(), 0), Jconv(n, 0))

	case ONAME, ONONAME:
		if n.Sym != nil {
			fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0))
		} else {
			fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
		}
		if recur && n.Type == nil && n.Name.Param.Ntype != nil {
			indent(&buf)
			fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Name.Param.Ntype)
		}

	case OASOP:
		fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0))

	case OTYPE:
		fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0), n.Type)
		if recur && n.Type == nil && n.Name.Param.Ntype != nil {
			indent(&buf)
			fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Name.Param.Ntype)
		}
	}

	if n.Sym != nil && n.Op != ONAME {
		fmt.Fprintf(&buf, " %v", n.Sym)
	}

	if n.Type != nil {
		fmt.Fprintf(&buf, " %v", n.Type)
	}

	if recur {
		if n.Left != nil {
			buf.WriteString(Nconv(n.Left, 0))
		}
		if n.Right != nil {
			buf.WriteString(Nconv(n.Right, 0))
		}
		if n.List != nil {
			indent(&buf)
			fmt.Fprintf(&buf, "%v-list%v", Oconv(int(n.Op), 0), n.List)
		}

		if n.Rlist != nil {
			indent(&buf)
			fmt.Fprintf(&buf, "%v-rlist%v", Oconv(int(n.Op), 0), n.Rlist)
		}

		if n.Nbody != nil {
			indent(&buf)
			fmt.Fprintf(&buf, "%v-body%v", Oconv(int(n.Op), 0), n.Nbody)
		}
	}

	return buf.String()
}
Exemple #7
0
func exprfmt(n *Node, prec int) string {
	for n != nil && n.Implicit && (n.Op == OIND || n.Op == OADDR) {
		n = n.Left
	}

	if n == nil {
		return "<N>"
	}

	nprec := opprec[n.Op]
	if n.Op == OTYPE && n.Sym != nil {
		nprec = 8
	}

	if prec > nprec {
		return fmt.Sprintf("(%v)", n)
	}

	switch n.Op {
	case OPAREN:
		return fmt.Sprintf("(%v)", n.Left)

	case ODDDARG:
		return "... argument"

	case OREGISTER:
		return obj.Rconv(int(n.Reg))

	case OLITERAL: // this is a bit of a mess
		if fmtmode == FErr {
			if n.Orig != nil && n.Orig != n {
				return exprfmt(n.Orig, prec)
			}
			if n.Sym != nil {
				return Sconv(n.Sym, 0)
			}
		}
		if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
			return exprfmt(n.Orig, prec)
		}
		if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != idealbool && n.Type != idealstring {
			// Need parens when type begins with what might
			// be misinterpreted as a unary operator: * or <-.
			if Isptr[n.Type.Etype] || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
				return fmt.Sprintf("(%v)(%v)", n.Type, Vconv(n.Val(), 0))
			} else {
				return fmt.Sprintf("%v(%v)", n.Type, Vconv(n.Val(), 0))
			}
		}

		return Vconv(n.Val(), 0)

	// Special case: name used as local variable in export.
	// _ becomes ~b%d internally; print as _ for export
	case ONAME:
		if (fmtmode == FExp || fmtmode == FErr) && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
			return "_"
		}
		if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Name.Vargen > 0 {
			return fmt.Sprintf("%v·%d", n.Sym, n.Name.Vargen)
		}

		// Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
		// but for export, this should be rendered as (*pkg.T).meth.
		// These nodes have the special property that they are names with a left OTYPE and a right ONAME.
		if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
			if Isptr[n.Left.Type.Etype] {
				return fmt.Sprintf("(%v).%v", n.Left.Type, Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
			} else {
				return fmt.Sprintf("%v.%v", n.Left.Type, Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
			}
		}
		fallthrough

	case OPACK, ONONAME:
		return Sconv(n.Sym, 0)

	case OTYPE:
		if n.Type == nil && n.Sym != nil {
			return Sconv(n.Sym, 0)
		}
		return Tconv(n.Type, 0)

	case OTARRAY:
		if n.Left != nil {
			return fmt.Sprintf("[]%v", n.Left)
		}
		var f string
		f += fmt.Sprintf("[]%v", n.Right)
		return f // happens before typecheck

	case OTMAP:
		return fmt.Sprintf("map[%v]%v", n.Left, n.Right)

	case OTCHAN:
		switch n.Etype {
		case Crecv:
			return fmt.Sprintf("<-chan %v", n.Left)

		case Csend:
			return fmt.Sprintf("chan<- %v", n.Left)

		default:
			if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
				return fmt.Sprintf("chan (%v)", n.Left)
			} else {
				return fmt.Sprintf("chan %v", n.Left)
			}
		}

	case OTSTRUCT:
		return "<struct>"

	case OTINTER:
		return "<inter>"

	case OTFUNC:
		return "<func>"

	case OCLOSURE:
		if fmtmode == FErr {
			return "func literal"
		}
		if n.Nbody != nil {
			return fmt.Sprintf("%v { %v }", n.Type, n.Nbody)
		}
		return fmt.Sprintf("%v { %v }", n.Type, n.Name.Param.Closure.Nbody)

	case OCOMPLIT:
		ptrlit := n.Right != nil && n.Right.Implicit && n.Right.Type != nil && Isptr[n.Right.Type.Etype]
		if fmtmode == FErr {
			if n.Right != nil && n.Right.Type != nil && !n.Implicit {
				if ptrlit {
					return fmt.Sprintf("&%v literal", n.Right.Type.Type)
				} else {
					return fmt.Sprintf("%v literal", n.Right.Type)
				}
			}

			return "composite literal"
		}

		if fmtmode == FExp && ptrlit {
			// typecheck has overwritten OIND by OTYPE with pointer type.
			return fmt.Sprintf("(&%v{ %v })", n.Right.Type.Type, Hconv(n.List, obj.FmtComma))
		}

		return fmt.Sprintf("(%v{ %v })", n.Right, Hconv(n.List, obj.FmtComma))

	case OPTRLIT:
		if fmtmode == FExp && n.Left.Implicit {
			return Nconv(n.Left, 0)
		}
		return fmt.Sprintf("&%v", n.Left)

	case OSTRUCTLIT:
		if fmtmode == FExp { // requires special handling of field names
			var f string
			if n.Implicit {
				f += "{"
			} else {
				f += fmt.Sprintf("(%v{", n.Type)
			}
			for l := n.List; l != nil; l = l.Next {
				f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), l.N.Right)

				if l.Next != nil {
					f += ","
				} else {
					f += " "
				}
			}

			if !n.Implicit {
				f += "})"
				return f
			}
			f += "}"
			return f
		}
		fallthrough

	case OARRAYLIT, OMAPLIT:
		if fmtmode == FErr {
			return fmt.Sprintf("%v literal", n.Type)
		}
		if fmtmode == FExp && n.Implicit {
			return fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
		}
		return fmt.Sprintf("(%v{ %v })", n.Type, Hconv(n.List, obj.FmtComma))

	case OKEY:
		if n.Left != nil && n.Right != nil {
			if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
				// requires special handling of field names
				return fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), n.Right)
			} else {
				return fmt.Sprintf("%v:%v", n.Left, n.Right)
			}
		}

		if n.Left == nil && n.Right != nil {
			return fmt.Sprintf(":%v", n.Right)
		}
		if n.Left != nil && n.Right == nil {
			return fmt.Sprintf("%v:", n.Left)
		}
		return ":"

	case OXDOT,
		ODOT,
		ODOTPTR,
		ODOTINTER,
		ODOTMETH,
		OCALLPART:
		var f string
		f += exprfmt(n.Left, nprec)
		if n.Right == nil || n.Right.Sym == nil {
			f += ".<nil>"
			return f
		}
		f += fmt.Sprintf(".%v", Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
		return f

	case ODOTTYPE, ODOTTYPE2:
		var f string
		f += exprfmt(n.Left, nprec)
		if n.Right != nil {
			f += fmt.Sprintf(".(%v)", n.Right)
			return f
		}
		f += fmt.Sprintf(".(%v)", n.Type)
		return f

	case OINDEX,
		OINDEXMAP,
		OSLICE,
		OSLICESTR,
		OSLICEARR,
		OSLICE3,
		OSLICE3ARR:
		var f string
		f += exprfmt(n.Left, nprec)
		f += fmt.Sprintf("[%v]", n.Right)
		return f

	case OCOPY, OCOMPLEX:
		return fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), n.Left, n.Right)

	case OCONV,
		OCONVIFACE,
		OCONVNOP,
		OARRAYBYTESTR,
		OARRAYRUNESTR,
		OSTRARRAYBYTE,
		OSTRARRAYRUNE,
		ORUNESTR:
		if n.Type == nil || n.Type.Sym == nil {
			return fmt.Sprintf("(%v)(%v)", n.Type, n.Left)
		}
		if n.Left != nil {
			return fmt.Sprintf("%v(%v)", n.Type, n.Left)
		}
		return fmt.Sprintf("%v(%v)", n.Type, Hconv(n.List, obj.FmtComma))

	case OREAL,
		OIMAG,
		OAPPEND,
		OCAP,
		OCLOSE,
		ODELETE,
		OLEN,
		OMAKE,
		ONEW,
		OPANIC,
		ORECOVER,
		OPRINT,
		OPRINTN:
		if n.Left != nil {
			return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), n.Left)
		}
		if n.Isddd {
			return fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
		}
		return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))

	case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
		var f string
		f += exprfmt(n.Left, nprec)
		if n.Isddd {
			f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
			return f
		}
		f += fmt.Sprintf("(%v)", Hconv(n.List, obj.FmtComma))
		return f

	case OMAKEMAP, OMAKECHAN, OMAKESLICE:
		if n.List != nil { // pre-typecheck
			return fmt.Sprintf("make(%v, %v)", n.Type, Hconv(n.List, obj.FmtComma))
		}
		if n.Right != nil {
			return fmt.Sprintf("make(%v, %v, %v)", n.Type, n.Left, n.Right)
		}
		if n.Left != nil && (n.Op == OMAKESLICE || !isideal(n.Left.Type)) {
			return fmt.Sprintf("make(%v, %v)", n.Type, n.Left)
		}
		return fmt.Sprintf("make(%v)", n.Type)

		// Unary
	case OPLUS,
		OMINUS,
		OADDR,
		OCOM,
		OIND,
		ONOT,
		ORECV:
		var f string
		if n.Left.Op == n.Op {
			f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
		} else {
			f += Oconv(int(n.Op), obj.FmtSharp)
		}
		f += exprfmt(n.Left, nprec+1)
		return f

		// Binary
	case OADD,
		OAND,
		OANDAND,
		OANDNOT,
		ODIV,
		OEQ,
		OGE,
		OGT,
		OLE,
		OLT,
		OLSH,
		OMOD,
		OMUL,
		ONE,
		OOR,
		OOROR,
		ORSH,
		OSEND,
		OSUB,
		OXOR:
		var f string
		f += exprfmt(n.Left, nprec)

		f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
		f += exprfmt(n.Right, nprec+1)
		return f

	case OADDSTR:
		var f string
		for l := n.List; l != nil; l = l.Next {
			if l != n.List {
				f += " + "
			}
			f += exprfmt(l.N, nprec)
		}

		return f

	case OCMPSTR, OCMPIFACE:
		var f string
		f += exprfmt(n.Left, nprec)
		// TODO(marvin): Fix Node.EType type union.
		f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
		f += exprfmt(n.Right, nprec+1)
		return f
	}

	return fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
}
Exemple #8
0
// If s==nil, copyu returns the set/use of v in p; otherwise, it
// modifies p to replace reads of v with reads of s and returns 0 for
// success or non-zero for failure.
//
// If s==nil, copy returns one of the following values:
//	1 if v only used
//	2 if v is set and used in one address (read-alter-rewrite;
//	  can't substitute)
//	3 if v is only set
//	4 if v is set in one address and used in another (so addresses
//	  can be rewritten independently)
//	0 otherwise (not touched)
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
	if p.From3Type() != obj.TYPE_NONE {
		// 7g never generates a from3
		fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
	}
	if p.RegTo2 != obj.REG_NONE {
		// 7g never generates a to2
		fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2)))
	}

	switch p.As {
	default:
		fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
		return 2

	case obj.ANOP, /* read p->from, write p->to */
		arm64.ANEG,
		arm64.AFNEGD,
		arm64.AFNEGS,
		arm64.AFSQRTD,
		arm64.AFCVTZSD,
		arm64.AFCVTZSS,
		arm64.AFCVTZSDW,
		arm64.AFCVTZSSW,
		arm64.AFCVTZUD,
		arm64.AFCVTZUS,
		arm64.AFCVTZUDW,
		arm64.AFCVTZUSW,
		arm64.AFCVTSD,
		arm64.AFCVTDS,
		arm64.ASCVTFD,
		arm64.ASCVTFS,
		arm64.ASCVTFWD,
		arm64.ASCVTFWS,
		arm64.AUCVTFD,
		arm64.AUCVTFS,
		arm64.AUCVTFWD,
		arm64.AUCVTFWS,
		arm64.AMOVB,
		arm64.AMOVBU,
		arm64.AMOVH,
		arm64.AMOVHU,
		arm64.AMOVW,
		arm64.AMOVWU,
		arm64.AMOVD,
		arm64.AFMOVS,
		arm64.AFMOVD:
		if p.Scond == 0 {
			if s != nil {
				if copysub(&p.From, v, s, 1) != 0 {
					return 1
				}

				// Update only indirect uses of v in p->to
				if !copyas(&p.To, v) {
					if copysub(&p.To, v, s, 1) != 0 {
						return 1
					}
				}
				return 0
			}

			if copyas(&p.To, v) {
				// Fix up implicit from
				if p.From.Type == obj.TYPE_NONE {
					p.From = p.To
				}
				if copyau(&p.From, v) {
					return 4
				}
				return 3
			}

			if copyau(&p.From, v) {
				return 1
			}
			if copyau(&p.To, v) {
				// p->to only indirectly uses v
				return 1
			}

			return 0
		}

		/* rar p->from, write p->to or read p->from, rar p->to */
		if p.From.Type == obj.TYPE_MEM {
			if copyas(&p.From, v) {
				// No s!=nil check; need to fail
				// anyway in that case
				return 2
			}

			if s != nil {
				if copysub(&p.To, v, s, 1) != 0 {
					return 1
				}
				return 0
			}

			if copyas(&p.To, v) {
				return 3
			}
		} else if p.To.Type == obj.TYPE_MEM {
			if copyas(&p.To, v) {
				return 2
			}
			if s != nil {
				if copysub(&p.From, v, s, 1) != 0 {
					return 1
				}
				return 0
			}

			if copyau(&p.From, v) {
				return 1
			}
		} else {
			fmt.Printf("copyu: bad %v\n", p)
		}

		return 0

	case arm64.AADD, /* read p->from, read p->reg, write p->to */
		arm64.ASUB,
		arm64.AAND,
		arm64.AORR,
		arm64.AEOR,
		arm64.AMUL,
		arm64.ASMULL,
		arm64.AUMULL,
		arm64.ASMULH,
		arm64.AUMULH,
		arm64.ASDIV,
		arm64.AUDIV,
		arm64.ALSL,
		arm64.ALSR,
		arm64.AASR,
		arm64.AFADDD,
		arm64.AFADDS,
		arm64.AFSUBD,
		arm64.AFSUBS,
		arm64.AFMULD,
		arm64.AFMULS,
		arm64.AFDIVD,
		arm64.AFDIVS:
		if s != nil {
			if copysub(&p.From, v, s, 1) != 0 {
				return 1
			}
			if copysub1(p, v, s, 1) != 0 {
				return 1
			}

			// Update only indirect uses of v in p->to
			if !copyas(&p.To, v) {
				if copysub(&p.To, v, s, 1) != 0 {
					return 1
				}
			}
			return 0
		}

		if copyas(&p.To, v) {
			if p.Reg == 0 {
				// Fix up implicit reg (e.g., ADD
				// R3,R4 -> ADD R3,R4,R4) so we can
				// update reg and to separately.
				p.Reg = p.To.Reg
			}

			if copyau(&p.From, v) {
				return 4
			}
			if copyau1(p, v) {
				return 4
			}
			return 3
		}

		if copyau(&p.From, v) {
			return 1
		}
		if copyau1(p, v) {
			return 1
		}
		if copyau(&p.To, v) {
			return 1
		}
		return 0

	case arm64.ABEQ,
		arm64.ABNE,
		arm64.ABGE,
		arm64.ABLT,
		arm64.ABGT,
		arm64.ABLE,
		arm64.ABLO,
		arm64.ABLS,
		arm64.ABHI,
		arm64.ABHS:
		return 0

	case obj.ACHECKNIL, /* read p->from */
		arm64.ACMP, /* read p->from, read p->reg */
		arm64.AFCMPD,
		arm64.AFCMPS:
		if s != nil {
			if copysub(&p.From, v, s, 1) != 0 {
				return 1
			}
			return copysub1(p, v, s, 1)
		}

		if copyau(&p.From, v) {
			return 1
		}
		if copyau1(p, v) {
			return 1
		}
		return 0

	case arm64.AB: /* read p->to */
		if s != nil {
			if copysub(&p.To, v, s, 1) != 0 {
				return 1
			}
			return 0
		}

		if copyau(&p.To, v) {
			return 1
		}
		return 0

	case obj.ARET: /* funny */
		if s != nil {
			return 0
		}

		// All registers die at this point, so claim
		// everything is set (and not used).
		return 3

	case arm64.ABL: /* funny */
		if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
			return 2
		}

		if s != nil {
			if copysub(&p.To, v, s, 1) != 0 {
				return 1
			}
			return 0
		}

		if copyau(&p.To, v) {
			return 4
		}
		return 3

	// R31 is zero, used by DUFFZERO, cannot be substituted.
	// R16 is ptr to memory, used and set, cannot be substituted.
	case obj.ADUFFZERO:
		if v.Type == obj.TYPE_REG {
			if v.Reg == 31 {
				return 1
			}
			if v.Reg == 16 {
				return 2
			}
		}

		return 0

	// R16, R17 are ptr to src, dst, used and set, cannot be substituted.
	// R27 is scratch, set by DUFFCOPY, cannot be substituted.
	case obj.ADUFFCOPY:
		if v.Type == obj.TYPE_REG {
			if v.Reg == 16 || v.Reg == 17 {
				return 2
			}
			if v.Reg == 27 {
				return 3
			}
		}

		return 0

	case arm64.AHINT,
		obj.ATEXT,
		obj.APCDATA,
		obj.AFUNCDATA,
		obj.AVARDEF,
		obj.AVARKILL,
		obj.AVARLIVE,
		obj.AUSEFIELD:
		return 0
	}
}
Exemple #9
0
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
	}

	w := uint64(uint64(nl.Type.Width))

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := uint64(w % 8) // bytes
	q := uint64(w / 8) // dwords

	if gc.Reginuse(mips.REGRT1) {
		gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
	}

	var r0 gc.Node
	gc.Nodreg(&r0, gc.Types[gc.TUINT64], mips.REGZERO)
	var dst gc.Node
	gc.Nodreg(&dst, gc.Types[gc.Tptr], mips.REGRT1)
	gc.Regrealloc(&dst)
	gc.Agen(nl, &dst)

	var boff uint64
	if q > 128 {
		p := gins(mips.ASUBV, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 8

		var end gc.Node
		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
		p = gins(mips.AMOVV, &dst, &end)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(q * 8)

		p = gins(mips.AMOVV, &r0, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 8
		pl := (*obj.Prog)(p)

		p = gins(mips.AADDV, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 8

		gc.Patch(ginsbranch(mips.ABNE, nil, &dst, &end, 0), pl)

		gc.Regfree(&end)

		// The loop leaves R1 on the last zeroed dword
		boff = 8
		// TODO(dfc): https://golang.org/issue/12108
		// If DUFFZERO is used inside a tail call (see genwrapper) it will
		// overwrite the link register.
	} else if false && q >= 4 {
		p := gins(mips.ASUBV, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 8
		f := (*gc.Node)(gc.Sysfunc("duffzero"))
		p = gins(obj.ADUFFZERO, nil, f)
		gc.Afunclit(&p.To, f)

		// 8 and 128 = magic constants: see ../../runtime/asm_mips64x.s
		p.To.Offset = int64(8 * (128 - q))

		// duffzero leaves R1 on the last zeroed dword
		boff = 8
	} else {
		var p *obj.Prog
		for t := uint64(0); t < q; t++ {
			p = gins(mips.AMOVV, &r0, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(8 * t)
		}

		boff = 8 * q
	}

	var p *obj.Prog
	for t := uint64(0); t < c; t++ {
		p = gins(mips.AMOVB, &r0, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(t + boff)
	}

	gc.Regfree(&dst)
}
Exemple #10
0
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
	}

	w := uint64(uint64(nl.Type.Width))

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := uint64(w % 8) // bytes
	q := uint64(w / 8) // dwords

	if gc.Reginuse(ppc64.REGRT1) {
		gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
	}

	var r0 gc.Node
	gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
	var dst gc.Node
	gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
	gc.Regrealloc(&dst)
	gc.Agen(nl, &dst)

	var boff uint64
	if q > 128 {
		p := gins(ppc64.ASUB, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 8

		var end gc.Node
		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
		p = gins(ppc64.AMOVD, &dst, &end)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(q * 8)

		p = gins(ppc64.AMOVDU, &r0, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 8
		pl := (*obj.Prog)(p)

		p = gins(ppc64.ACMP, &dst, &end)
		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)

		gc.Regfree(&end)

		// The loop leaves R3 on the last zeroed dword
		boff = 8
	} else if q >= 4 {
		p := gins(ppc64.ASUB, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 8
		f := (*gc.Node)(gc.Sysfunc("duffzero"))
		p = gins(obj.ADUFFZERO, nil, f)
		gc.Afunclit(&p.To, f)

		// 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
		p.To.Offset = int64(4 * (128 - q))

		// duffzero leaves R3 on the last zeroed dword
		boff = 8
	} else {
		var p *obj.Prog
		for t := uint64(0); t < q; t++ {
			p = gins(ppc64.AMOVD, &r0, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(8 * t)
		}

		boff = 8 * q
	}

	var p *obj.Prog
	for t := uint64(0); t < c; t++ {
		p = gins(ppc64.AMOVB, &r0, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(t + boff)
	}

	gc.Regfree(&dst)
}
Exemple #11
0
func regopt(firstp *obj.Prog) {
	mergetemp(firstp)

	// control flow is more complicated in generated go code
	// than in generated c code.  define pseudo-variables for
	// registers, so we have complete register usage information.
	var nreg int
	regnames := Thearch.Regnames(&nreg)

	nvar = nreg
	for i := 0; i < nreg; i++ {
		vars[i] = Var{}
	}
	for i := 0; i < nreg; i++ {
		if regnodes[i] == nil {
			regnodes[i] = newname(Lookup(regnames[i]))
		}
		vars[i].node = regnodes[i]
	}

	regbits = Thearch.Excludedregs()
	externs = zbits
	params = zbits
	consts = zbits
	addrs = zbits
	ivar = zbits
	ovar = zbits

	// pass 1
	// build aux data structure
	// allocate pcs
	// find use and set of variables
	g := Flowstart(firstp, func() interface{} { return new(Reg) })
	if g == nil {
		for i := 0; i < nvar; i++ {
			vars[i].node.SetOpt(nil)
		}
		return
	}

	firstf := g.Start

	for f := firstf; f != nil; f = f.Link {
		p := f.Prog
		// AVARLIVE must be considered a use, do not skip it.
		// Otherwise the variable will be optimized away,
		// and the whole point of AVARLIVE is to keep it on the stack.
		if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
			continue
		}

		// Avoid making variables for direct-called functions.
		if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
			continue
		}

		// from vs to doesn't matter for registers.
		r := f.Data.(*Reg)
		r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex
		r.set.b[0] |= p.Info.Regset

		bit := mkvar(f, &p.From)
		if bany(&bit) {
			if p.Info.Flags&LeftAddr != 0 {
				setaddrs(bit)
			}
			if p.Info.Flags&LeftRead != 0 {
				for z := 0; z < BITS; z++ {
					r.use1.b[z] |= bit.b[z]
				}
			}
			if p.Info.Flags&LeftWrite != 0 {
				for z := 0; z < BITS; z++ {
					r.set.b[z] |= bit.b[z]
				}
			}
		}

		// Compute used register for reg
		if p.Info.Flags&RegRead != 0 {
			r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
		}

		// Currently we never generate three register forms.
		// If we do, this will need to change.
		if p.From3Type() != obj.TYPE_NONE {
			Fatalf("regopt not implemented for from3")
		}

		bit = mkvar(f, &p.To)
		if bany(&bit) {
			if p.Info.Flags&RightAddr != 0 {
				setaddrs(bit)
			}
			if p.Info.Flags&RightRead != 0 {
				for z := 0; z < BITS; z++ {
					r.use2.b[z] |= bit.b[z]
				}
			}
			if p.Info.Flags&RightWrite != 0 {
				for z := 0; z < BITS; z++ {
					r.set.b[z] |= bit.b[z]
				}
			}
		}
	}

	for i := 0; i < nvar; i++ {
		v := &vars[i]
		if v.addr != 0 {
			bit := blsh(uint(i))
			for z := 0; z < BITS; z++ {
				addrs.b[z] |= bit.b[z]
			}
		}

		if Debug['R'] != 0 && Debug['v'] != 0 {
			fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset)
		}
	}

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass1", firstf, 1)
	}

	// pass 2
	// find looping structure
	flowrpo(g)

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass2", firstf, 1)
	}

	// pass 2.5
	// iterate propagating fat vardef covering forward
	// r->act records vars with a VARDEF since the last CALL.
	// (r->act will be reused in pass 5 for something else,
	// but we'll be done with it by then.)
	active := 0

	for f := firstf; f != nil; f = f.Link {
		f.Active = 0
		r := f.Data.(*Reg)
		r.act = zbits
	}

	for f := firstf; f != nil; f = f.Link {
		p := f.Prog
		if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt() != nil {
			active++
			walkvardef(p.To.Node.(*Node), f, active)
		}
	}

	// pass 3
	// iterate propagating usage
	// 	back until flow graph is complete
	var f1 *Flow
	var i int
	var f *Flow
loop1:
	change = 0

	for f = firstf; f != nil; f = f.Link {
		f.Active = 0
	}
	for f = firstf; f != nil; f = f.Link {
		if f.Prog.As == obj.ARET {
			prop(f, zbits, zbits)
		}
	}

	// pick up unreachable code
loop11:
	i = 0

	for f = firstf; f != nil; f = f1 {
		f1 = f.Link
		if f1 != nil && f1.Active != 0 && f.Active == 0 {
			prop(f, zbits, zbits)
			i = 1
		}
	}

	if i != 0 {
		goto loop11
	}
	if change != 0 {
		goto loop1
	}

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass3", firstf, 1)
	}

	// pass 4
	// iterate propagating register/variable synchrony
	// 	forward until graph is complete
loop2:
	change = 0

	for f = firstf; f != nil; f = f.Link {
		f.Active = 0
	}
	synch(firstf, zbits)
	if change != 0 {
		goto loop2
	}

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass4", firstf, 1)
	}

	// pass 4.5
	// move register pseudo-variables into regu.
	mask := uint64((1 << uint(nreg)) - 1)
	for f := firstf; f != nil; f = f.Link {
		r := f.Data.(*Reg)
		r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
		r.set.b[0] &^= mask
		r.use1.b[0] &^= mask
		r.use2.b[0] &^= mask
		r.refbehind.b[0] &^= mask
		r.refahead.b[0] &^= mask
		r.calbehind.b[0] &^= mask
		r.calahead.b[0] &^= mask
		r.regdiff.b[0] &^= mask
		r.act.b[0] &^= mask
	}

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass4.5", firstf, 1)
	}

	// pass 5
	// isolate regions
	// calculate costs (paint1)
	var bit Bits
	if f := firstf; f != nil {
		r := f.Data.(*Reg)
		for z := 0; z < BITS; z++ {
			bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
		}
		if bany(&bit) && !f.Refset {
			// should never happen - all variables are preset
			if Debug['w'] != 0 {
				fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit)
			}
			f.Refset = true
		}
	}

	for f := firstf; f != nil; f = f.Link {
		(f.Data.(*Reg)).act = zbits
	}
	nregion = 0
	region = region[:0]
	var rgp *Rgn
	for f := firstf; f != nil; f = f.Link {
		r := f.Data.(*Reg)
		for z := 0; z < BITS; z++ {
			bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
		}
		if bany(&bit) && !f.Refset {
			if Debug['w'] != 0 {
				fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit)
			}
			f.Refset = true
			Thearch.Excise(f)
		}

		for z := 0; z < BITS; z++ {
			bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
		}
		for bany(&bit) {
			i = bnum(&bit)
			change = 0
			paint1(f, i)
			biclr(&bit, uint(i))
			if change <= 0 {
				continue
			}
			if nregion >= MaxRgn {
				nregion++
				continue
			}

			region = append(region, Rgn{
				enter: f,
				cost:  int16(change),
				varno: int16(i),
			})
			nregion++
		}
	}

	if false && Debug['v'] != 0 && strings.Contains(Curfn.Func.Nname.Sym.Name, "Parse") {
		Warn("regions: %d\n", nregion)
	}
	if nregion >= MaxRgn {
		if Debug['v'] != 0 {
			Warn("too many regions: %d\n", nregion)
		}
		nregion = MaxRgn
	}

	sort.Sort(rcmp(region[:nregion]))

	if Debug['R'] != 0 && Debug['v'] != 0 {
		Dumpit("pass5", firstf, 1)
	}

	// pass 6
	// determine used registers (paint2)
	// replace code (paint3)
	if Debug['R'] != 0 && Debug['v'] != 0 {
		fmt.Printf("\nregisterizing\n")
	}
	var usedreg uint64
	var vreg uint64
	for i := 0; i < nregion; i++ {
		rgp = &region[i]
		if Debug['R'] != 0 && Debug['v'] != 0 {
			fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
		}
		bit = blsh(uint(rgp.varno))
		usedreg = paint2(rgp.enter, int(rgp.varno), 0)
		vreg = allreg(usedreg, rgp)
		if rgp.regno != 0 {
			if Debug['R'] != 0 && Debug['v'] != 0 {
				v := &vars[rgp.varno]
				fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg)
			}

			paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
		}
	}

	// free aux structures. peep allocates new ones.
	for i := 0; i < nvar; i++ {
		vars[i].node.SetOpt(nil)
	}
	Flowend(g)
	firstf = nil

	if Debug['R'] != 0 && Debug['v'] != 0 {
		// Rebuild flow graph, since we inserted instructions
		g := Flowstart(firstp, nil)
		firstf = g.Start
		Dumpit("pass6", firstf, 0)
		Flowend(g)
		firstf = nil
	}

	// pass 7
	// peep-hole on basic block
	if Debug['R'] == 0 || Debug['P'] != 0 {
		Thearch.Peep(firstp)
	}

	// eliminate nops
	for p := firstp; p != nil; p = p.Link {
		for p.Link != nil && p.Link.As == obj.ANOP {
			p.Link = p.Link.Link
		}
		if p.To.Type == obj.TYPE_BRANCH {
			for p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.ANOP {
				p.To.Val = p.To.Val.(*obj.Prog).Link
			}
		}
	}

	if Debug['R'] != 0 {
		if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
			fmt.Printf("\nstats\n")
		}

		if Ostats.Ncvtreg != 0 {
			fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
		}
		if Ostats.Nspill != 0 {
			fmt.Printf("\t%4d spill\n", Ostats.Nspill)
		}
		if Ostats.Nreload != 0 {
			fmt.Printf("\t%4d reload\n", Ostats.Nreload)
		}
		if Ostats.Ndelmov != 0 {
			fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
		}
		if Ostats.Nvar != 0 {
			fmt.Printf("\t%4d var\n", Ostats.Nvar)
		}
		if Ostats.Naddr != 0 {
			fmt.Printf("\t%4d addr\n", Ostats.Naddr)
		}

		Ostats = OptStats{}
	}
}