コード例 #1
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * allocate a register (reusing res if possible) and generate
 *  a = n
 * The caller must call regfree(a).
 */
func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("cgenr-n", n)
	}

	if gc.Isfat(n.Type) {
		gc.Fatal("cgenr on fat node")
	}

	if n.Addable != 0 {
		regalloc(a, n.Type, res)
		gmove(n, a)
		return
	}

	switch n.Op {
	case gc.ONAME,
		gc.ODOT,
		gc.ODOTPTR,
		gc.OINDEX,
		gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		var n1 gc.Node
		igen(n, &n1, res)
		regalloc(a, gc.Types[gc.Tptr], &n1)
		gmove(&n1, a)
		regfree(&n1)

	default:
		regalloc(a, n.Type, res)
		cgen(n, a)
	}
}
コード例 #2
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * branch gen
 *	if(n == true) goto to;
 */
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nbgen", n)
	}

	if n == nil {
		n = gc.Nodbool(true)
	}

	if n.Ninit != nil {
		gc.Genlist(n.Ninit)
	}

	if n.Type == nil {
		gc.Convlit(&n, gc.Types[gc.TBOOL])
		if n.Type == nil {
			return
		}
	}

	et := int(n.Type.Etype)
	if et != gc.TBOOL {
		gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
		gc.Patch(gins(obj.AEND, nil, nil), to)
		return
	}

	for n.Op == gc.OCONVNOP {
		n = n.Left
		if n.Ninit != nil {
			gc.Genlist(n.Ninit)
		}
	}

	nl := n.Left
	var nr *gc.Node

	if nl != nil && gc.Isfloat[nl.Type.Etype] {
		bgen_float(n, bool2int(true_), likely, to)
		return
	}

	switch n.Op {
	default:
		goto def

		// need to ask if it is bool?
	case gc.OLITERAL:
		if !true_ == (n.Val.U.Bval == 0) {
			gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
		}
		return

	case gc.ONAME:
		if n.Addable == 0 {
			goto def
		}
		var n1 gc.Node
		gc.Nodconst(&n1, n.Type, 0)
		gins(optoas(gc.OCMP, n.Type), n, &n1)
		a := x86.AJNE
		if !true_ {
			a = x86.AJEQ
		}
		gc.Patch(gc.Gbranch(a, n.Type, likely), to)
		return

	case gc.OANDAND,
		gc.OOROR:
		if (n.Op == gc.OANDAND) == true_ {
			p1 := gc.Gbranch(obj.AJMP, nil, 0)
			p2 := gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, gc.Pc)
			bgen(n.Left, !true_, -likely, p2)
			bgen(n.Right, !true_, -likely, p2)
			p1 = gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, to)
			gc.Patch(p2, gc.Pc)
		} else {
			bgen(n.Left, true_, likely, to)
			bgen(n.Right, true_, likely, to)
		}

		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		nr = n.Right
		if nr == nil || nr.Type == nil {
			return
		}
		fallthrough

	case gc.ONOT: // unary
		nl = n.Left

		if nl == nil || nl.Type == nil {
			return
		}
	}

	switch n.Op {
	case gc.ONOT:
		bgen(nl, !true_, likely, to)

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		a := int(n.Op)
		if !true_ {
			a = gc.Brcom(a)
			true_ = !true_
		}

		// make simplest on right
		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
			a = gc.Brrev(a)
			r := nl
			nl = nr
			nr = r
		}

		if gc.Isslice(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal slice comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Xoffset += int64(gc.Array_array)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Isinter(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal interface comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Iscomplex[nl.Type.Etype] {
			gc.Complexbool(a, nl, nr, true_, likely, to)
			break
		}

		if gc.Is64(nr.Type) {
			if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
				var n1 gc.Node
				gc.Tempname(&n1, nl.Type)
				cgen(nl, &n1)
				nl = &n1
			}

			if nr.Addable == 0 {
				var n2 gc.Node
				gc.Tempname(&n2, nr.Type)
				cgen(nr, &n2)
				nr = &n2
			}

			cmp64(nl, nr, a, likely, to)
			break
		}

		var n2 gc.Node
		if nr.Ullman >= gc.UINF {
			if nl.Addable == 0 {
				var n1 gc.Node
				gc.Tempname(&n1, nl.Type)
				cgen(nl, &n1)
				nl = &n1
			}

			if nr.Addable == 0 {
				var tmp gc.Node
				gc.Tempname(&tmp, nr.Type)
				cgen(nr, &tmp)
				nr = &tmp
			}

			var n2 gc.Node
			regalloc(&n2, nr.Type, nil)
			cgen(nr, &n2)
			nr = &n2
			goto cmp
		}

		if nl.Addable == 0 {
			var n1 gc.Node
			gc.Tempname(&n1, nl.Type)
			cgen(nl, &n1)
			nl = &n1
		}

		if gc.Smallintconst(nr) {
			gins(optoas(gc.OCMP, nr.Type), nl, nr)
			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
			break
		}

		if nr.Addable == 0 {
			var tmp gc.Node
			gc.Tempname(&tmp, nr.Type)
			cgen(nr, &tmp)
			nr = &tmp
		}

		regalloc(&n2, nr.Type, nil)
		gmove(nr, &n2)
		nr = &n2

	cmp:
		gins(optoas(gc.OCMP, nr.Type), nl, nr)
		gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)

		if nl.Op == gc.OREGISTER {
			regfree(nl)
		}
		regfree(nr)
	}

	return

def:
	var n1 gc.Node
	regalloc(&n1, n.Type, nil)
	cgen(n, &n1)
	var n2 gc.Node
	gc.Nodconst(&n2, n.Type, 0)
	gins(optoas(gc.OCMP, n.Type), &n1, &n2)
	a := x86.AJNE
	if !true_ {
		a = x86.AJEQ
	}
	gc.Patch(gc.Gbranch(a, n.Type, likely), to)
	regfree(&n1)
	return
}
コード例 #3
0
ファイル: ggen.go プロジェクト: tidatida/go
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		gc.Dump("\nclearfat", nl)
	}

	w := uint32(nl.Type.Width)

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := w % 4 // bytes
	q := w / 4 // quads

	if q < 4 {
		// Write sequence of MOV 0, off(base) instead of using STOSL.
		// The hope is that although the code will be slightly longer,
		// the MOVs will have no dependencies and pipeline better
		// than the unrolled STOSL loop.
		// NOTE: Must use agen, not igen, so that optimizer sees address
		// being taken. We are not writing on field boundaries.
		var n1 gc.Node
		gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)

		gc.Agen(nl, &n1)
		n1.Op = gc.OINDREG
		var z gc.Node
		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
		for {
			tmp14 := q
			q--
			if tmp14 <= 0 {
				break
			}
			n1.Type = z.Type
			gins(x86.AMOVL, &z, &n1)
			n1.Xoffset += 4
		}

		gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
		for {
			tmp15 := c
			c--
			if tmp15 <= 0 {
				break
			}
			n1.Type = z.Type
			gins(x86.AMOVB, &z, &n1)
			n1.Xoffset++
		}

		gc.Regfree(&n1)
		return
	}

	var n1 gc.Node
	gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
	gc.Agen(nl, &n1)
	gconreg(x86.AMOVL, 0, x86.REG_AX)

	if q > 128 || (q >= 4 && gc.Nacl) {
		gconreg(x86.AMOVL, int64(q), x86.REG_CX)
		gins(x86.AREP, nil, nil)   // repeat
		gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
	} else if q >= 4 {
		p := gins(obj.ADUFFZERO, nil, nil)
		p.To.Type = obj.TYPE_ADDR
		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))

		// 1 and 128 = magic constants: see ../../runtime/asm_386.s
		p.To.Offset = 1 * (128 - int64(q))
	} else {
		for q > 0 {
			gins(x86.ASTOSL, nil, nil) // STOL AL,*(DI)+
			q--
		}
	}

	for c > 0 {
		gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
		c--
	}
}
コード例 #4
0
ファイル: gsubr.go プロジェクト: tidatida/go
/*
 * generate one instruction:
 *	as f, t
 */
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	// TODO(austin): Add self-move test like in 6g (but be careful
	// of truncation moves)

	p := gc.Prog(as)
	gc.Naddr(&p.From, f)
	gc.Naddr(&p.To, t)

	switch as {
	case obj.ACALL:
		if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
			// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
			pp := gc.Prog(as)
			pp.From = p.From
			pp.To.Type = obj.TYPE_REG
			pp.To.Reg = ppc64.REG_CTR

			p.As = ppc64.AMOVD
			p.From = p.To
			p.To.Type = obj.TYPE_REG
			p.To.Reg = ppc64.REG_CTR

			if gc.Debug['g'] != 0 {
				fmt.Printf("%v\n", p)
				fmt.Printf("%v\n", pp)
			}

			return pp
		}

	// Bad things the front end has done to us. Crash to find call stack.
	case ppc64.AAND, ppc64.AMULLD:
		if p.From.Type == obj.TYPE_CONST {
			gc.Debug['h'] = 1
			gc.Fatal("bad inst: %v", p)
		}
	case ppc64.ACMP, ppc64.ACMPU:
		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
			gc.Debug['h'] = 1
			gc.Fatal("bad inst: %v", p)
		}
	}

	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := int32(0)
	switch as {
	case ppc64.AMOVB,
		ppc64.AMOVBU,
		ppc64.AMOVBZ,
		ppc64.AMOVBZU:
		w = 1

	case ppc64.AMOVH,
		ppc64.AMOVHU,
		ppc64.AMOVHZ,
		ppc64.AMOVHZU:
		w = 2

	case ppc64.AMOVW,
		ppc64.AMOVWU,
		ppc64.AMOVWZ,
		ppc64.AMOVWZU:
		w = 4

	case ppc64.AMOVD,
		ppc64.AMOVDU:
		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
			break
		}
		w = 8
	}

	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
		gc.Dump("f", f)
		gc.Dump("t", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
	}

	return p
}
コード例 #5
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	res = &n;
 * The generated code checks that the result is not nil.
 */
func agen(n *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nagen-res", res)
		gc.Dump("agen-r", n)
	}

	if n == nil || n.Type == nil {
		return
	}

	for n.Op == gc.OCONVNOP {
		n = n.Left
	}

	if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
		// Use of a nil interface or nil slice.
		// Create a temporary we can take the address of and read.
		// The generated code is just going to panic, so it need not
		// be terribly efficient. See issue 3670.
		var n1 gc.Node
		gc.Tempname(&n1, n.Type)

		gc.Gvardef(&n1)
		clearfat(&n1)
		var n2 gc.Node
		regalloc(&n2, gc.Types[gc.Tptr], res)
		var n3 gc.Node
		n3.Op = gc.OADDR
		n3.Left = &n1
		gins(ppc64.AMOVD, &n3, &n2)
		gmove(&n2, res)
		regfree(&n2)
		return
	}

	if n.Addable != 0 {
		var n1 gc.Node
		n1.Op = gc.OADDR
		n1.Left = n
		var n2 gc.Node
		regalloc(&n2, gc.Types[gc.Tptr], res)
		gins(ppc64.AMOVD, &n1, &n2)
		gmove(&n2, res)
		regfree(&n2)
		return
	}

	nl := n.Left

	switch n.Op {
	default:
		gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))

		// TODO(minux): 5g has this: Release res so that it is available for cgen_call.
	// Pick it up again after the call for OCALLMETH and OCALLFUNC.
	case gc.OCALLMETH:
		gc.Cgen_callmeth(n, 0)

		cgen_aret(n, res)

	case gc.OCALLINTER:
		cgen_callinter(n, res, 0)
		cgen_aret(n, res)

	case gc.OCALLFUNC:
		cgen_call(n, 0)
		cgen_aret(n, res)

	case gc.OSLICE,
		gc.OSLICEARR,
		gc.OSLICESTR,
		gc.OSLICE3,
		gc.OSLICE3ARR:
		var n1 gc.Node
		gc.Tempname(&n1, n.Type)
		gc.Cgen_slice(n, &n1)
		agen(&n1, res)

	case gc.OEFACE:
		var n1 gc.Node
		gc.Tempname(&n1, n.Type)
		gc.Cgen_eface(n, &n1)
		agen(&n1, res)

	case gc.OINDEX:
		var n1 gc.Node
		agenr(n, &n1, res)
		gmove(&n1, res)
		regfree(&n1)

		// should only get here with names in this func.
	case gc.ONAME:
		if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
			gc.Dump("bad agen", n)
			gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
		}

		// should only get here for heap vars or paramref
		if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
			gc.Dump("bad agen", n)
			gc.Fatal("agen: bad ONAME class %#x", n.Class)
		}

		cgen(n.Heapaddr, res)
		if n.Xoffset != 0 {
			ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
		}

	case gc.OIND:
		cgen(nl, res)
		gc.Cgen_checknil(res)

	case gc.ODOT:
		agen(nl, res)
		if n.Xoffset != 0 {
			ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
		}

	case gc.ODOTPTR:
		cgen(nl, res)
		gc.Cgen_checknil(res)
		if n.Xoffset != 0 {
			ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
		}
	}
}
コード例 #6
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	newreg = &n;
 *	res = newreg
 *
 * on exit, a has been changed to be *newreg.
 * caller must regfree(a).
 * The generated code checks that the result is not *nil.
 */
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nigen-n", n)
	}

	switch n.Op {
	case gc.ONAME:
		if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
			break
		}
		*a = *n
		return

		// Increase the refcount of the register so that igen's caller
	// has to call regfree.
	case gc.OINDREG:
		if n.Val.U.Reg != ppc64.REGSP {
			reg[n.Val.U.Reg]++
		}
		*a = *n
		return

	case gc.ODOT:
		igen(n.Left, a, res)
		a.Xoffset += n.Xoffset
		a.Type = n.Type
		fixlargeoffset(a)
		return

	case gc.ODOTPTR:
		cgenr(n.Left, a, res)
		gc.Cgen_checknil(a)
		a.Op = gc.OINDREG
		a.Xoffset += n.Xoffset
		a.Type = n.Type
		fixlargeoffset(a)
		return

	case gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		switch n.Op {
		case gc.OCALLFUNC:
			cgen_call(n, 0)

		case gc.OCALLMETH:
			gc.Cgen_callmeth(n, 0)

		case gc.OCALLINTER:
			cgen_callinter(n, nil, 0)
		}

		var flist gc.Iter
		fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
		*a = gc.Node{}
		a.Op = gc.OINDREG
		a.Val.U.Reg = ppc64.REGSP
		a.Addable = 1
		a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
		a.Type = n.Type
		return

		// Index of fixed-size array by constant can
	// put the offset in the addressing.
	// Could do the same for slice except that we need
	// to use the real index for the bounds checking.
	case gc.OINDEX:
		if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
			if gc.Isconst(n.Right, gc.CTINT) {
				// Compute &a.
				if !gc.Isptr[n.Left.Type.Etype] {
					igen(n.Left, a, res)
				} else {
					var n1 gc.Node
					igen(n.Left, &n1, res)
					gc.Cgen_checknil(&n1)
					regalloc(a, gc.Types[gc.Tptr], res)
					gmove(&n1, a)
					regfree(&n1)
					a.Op = gc.OINDREG
				}

				// Compute &a[i] as &a + i*width.
				a.Type = n.Type

				a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
				fixlargeoffset(a)
				return
			}
		}
	}

	agenr(n, a, res)
	a.Op = gc.OINDREG
	a.Type = n.Type
}
コード例 #7
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * block copy:
 *	memmove(&ns, &n, w);
 */
func sgen(n *gc.Node, ns *gc.Node, w int64) {
	var res *gc.Node = ns

	if gc.Debug['g'] != 0 {
		fmt.Printf("\nsgen w=%d\n", w)
		gc.Dump("r", n)
		gc.Dump("res", ns)
	}

	if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
		gc.Fatal("sgen UINF")
	}

	if w < 0 {
		gc.Fatal("sgen copy %d", w)
	}

	// If copying .args, that's all the results, so record definition sites
	// for them for the liveness analysis.
	if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
			if l.N.Class == gc.PPARAMOUT {
				gc.Gvardef(l.N)
			}
		}
	}

	// Avoid taking the address for simple enough types.
	//if(componentgen(n, ns))
	//	return;
	if w == 0 {
		// evaluate side effects only.
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], nil)

		agen(res, &dst)
		agen(n, &dst)
		regfree(&dst)
		return
	}

	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op int
	switch align {
	default:
		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))

	case 1:
		op = ppc64.AMOVBU

	case 2:
		op = ppc64.AMOVHU

	case 4:
		op = ppc64.AMOVWZU // there is no lwau, only lwaux

	case 8:
		op = ppc64.AMOVDU
	}

	if w%int64(align) != 0 {
		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
	}
	c := int32(w / int64(align))

	// offset on the stack
	osrc := int32(stkof(n))

	odst := int32(stkof(res))
	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
		// osrc and odst both on stack, and at least one is in
		// an unknown position.  Could generate code to test
		// for forward/backward copy, but instead just copy
		// to a temporary location first.
		var tmp gc.Node
		gc.Tempname(&tmp, n.Type)

		sgen(n, &tmp, w)
		sgen(&tmp, res, w)
		return
	}

	if osrc%int32(align) != 0 || odst%int32(align) != 0 {
		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align

	if osrc < odst && int64(odst) < int64(osrc)+w {
		dir = -dir
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		agenr(n, &dst, res) // temporarily use dst
		regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(ppc64.AMOVD, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agenr(res, &dst, res)
		agenr(n, &src, nil)
	}

	var tmp gc.Node
	regalloc(&tmp, gc.Types[gc.Tptr], nil)

	// set up end marker
	var nend gc.Node

	// move src and dest to the end of block if necessary
	if dir < 0 {
		if c >= 4 {
			regalloc(&nend, gc.Types[gc.Tptr], nil)
			gins(ppc64.AMOVD, &src, &nend)
		}

		p := gins(ppc64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w

		p = gins(ppc64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w
	} else {
		p := gins(ppc64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		p = gins(ppc64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		if c >= 4 {
			regalloc(&nend, gc.Types[gc.Tptr], nil)
			p := gins(ppc64.AMOVD, &src, &nend)
			p.From.Type = obj.TYPE_ADDR
			p.From.Offset = w
		}
	}

	// move
	// TODO: enable duffcopy for larger copies.
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)

		p = gins(ppc64.ACMP, &src, &nend)

		gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
		regfree(&nend)
	} else {
		// TODO(austin): Instead of generating ADD $-8,R8; ADD
		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
		// generate the offsets directly and eliminate the
		// ADDs.  That will produce shorter, more
		// pipeline-able code.
		var p *obj.Prog
		for {
			tmp14 := c
			c--
			if tmp14 <= 0 {
				break
			}

			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
		}
	}

	regfree(&dst)
	regfree(&src)
	regfree(&tmp)
}
コード例 #8
0
ファイル: gsubr.go プロジェクト: klueska/go-akaros
func anyregalloc() bool {
	var j int

	for i := int(0); i < len(reg); i++ {
		if reg[i] == 0 {
			goto ok
		}
		for j = 0; j < len(resvd); j++ {
			if resvd[j] == i {
				goto ok
			}
		}
		return true
	ok:
	}

	return false
}

/*
 * allocate register of type t, leave in n.
 * if o != N, o is desired fixed register.
 * caller must regfree(n).
 */
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
	if t == nil {
		gc.Fatal("regalloc: t nil")
	}
	et := int(int(gc.Simtype[t.Etype]))

	if gc.Debug['r'] != 0 {
		fixfree := int(0)
		fltfree := int(0)
		for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ {
			if reg[i-ppc64.REG_R0] == 0 {
				if i < ppc64.REG_F0 {
					fixfree++
				} else {
					fltfree++
				}
			}
		}

		fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
	}

	var i int
	switch et {
	case gc.TINT8,
		gc.TUINT8,
		gc.TINT16,
		gc.TUINT16,
		gc.TINT32,
		gc.TUINT32,
		gc.TINT64,
		gc.TUINT64,
		gc.TPTR32,
		gc.TPTR64,
		gc.TBOOL:
		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= ppc64.REGMIN && i <= ppc64.REGMAX {
				goto out
			}
		}

		for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ {
			if reg[i-ppc64.REG_R0] == 0 {
				regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
				goto out
			}
		}

		gc.Flusherrors()
		for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ {
			fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
		}
		gc.Fatal("out of fixed registers")

	case gc.TFLOAT32,
		gc.TFLOAT64:
		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX {
				goto out
			}
		}

		for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ {
			if reg[i-ppc64.REG_R0] == 0 {
				regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
				goto out
			}
		}

		gc.Flusherrors()
		for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ {
			fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
		}
		gc.Fatal("out of floating registers")

	case gc.TCOMPLEX64,
		gc.TCOMPLEX128:
		gc.Tempname(n, t)
		return
	}

	gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
	return

out:
	reg[i-ppc64.REG_R0]++
	gc.Nodreg(n, t, i)
}

func regfree(n *gc.Node) {
	if n.Op == gc.ONAME {
		return
	}
	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
		gc.Fatal("regfree: not a register")
	}
	i := int(int(n.Val.U.Reg) - ppc64.REG_R0)
	if i == ppc64.REGSP-ppc64.REG_R0 {
		return
	}
	if i < 0 || i >= len(reg) {
		gc.Fatal("regfree: reg out of range")
	}
	if reg[i] <= 0 {
		gc.Fatal("regfree: reg not allocated")
	}
	reg[i]--
	if reg[i] == 0 {
		regpc[i] = 0
	}
}

/*
 * generate
 *	as $c, n
 */
func ginscon(as int, c int64, n2 *gc.Node) {
	var n1 gc.Node

	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)

	if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
		// cannot have more than 16-bit of immediate in ADD, etc.
		// instead, MOV into register first.
		var ntmp gc.Node
		regalloc(&ntmp, gc.Types[gc.TINT64], nil)

		gins(ppc64.AMOVD, &n1, &ntmp)
		gins(as, &ntmp, n2)
		regfree(&ntmp)
		return
	}

	gins(as, &n1, n2)
}

/*
 * generate
 *	as n, $c (CMP/CMPU)
 */
func ginscon2(as int, n2 *gc.Node, c int64) {
	var n1 gc.Node

	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)

	switch as {
	default:
		gc.Fatal("ginscon2")

	case ppc64.ACMP:
		if -ppc64.BIG <= c && c <= ppc64.BIG {
			gins(as, n2, &n1)
			return
		}

	case ppc64.ACMPU:
		if 0 <= c && c <= 2*ppc64.BIG {
			gins(as, n2, &n1)
			return
		}
	}

	// MOV n1 into register first
	var ntmp gc.Node
	regalloc(&ntmp, gc.Types[gc.TINT64], nil)

	gins(ppc64.AMOVD, &n1, &ntmp)
	gins(as, n2, &ntmp)
	regfree(&ntmp)
}

/*
 * set up nodes representing 2^63
 */
var bigi gc.Node

var bigf gc.Node

var bignodes_did int

func bignodes() {
	if bignodes_did != 0 {
		return
	}
	bignodes_did = 1

	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
	gc.Mpshiftfix(bigi.Val.U.Xval, 63)

	bigf = bigi
	bigf.Type = gc.Types[gc.TFLOAT64]
	bigf.Val.Ctype = gc.CTFLT
	bigf.Val.U.Fval = new(gc.Mpflt)
	gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
}

/*
 * generate move:
 *	t = f
 * hard part is conversions.
 */
func gmove(f *gc.Node, t *gc.Node) {
	if gc.Debug['M'] != 0 {
		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
	}

	ft := int(gc.Simsimtype(f.Type))
	tt := int(gc.Simsimtype(t.Type))
	cvt := (*gc.Type)(t.Type)

	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
		gc.Complexmove(f, t)
		return
	}

	// cannot have two memory operands
	var r2 gc.Node
	var r1 gc.Node
	var a int
	if gc.Ismem(f) && gc.Ismem(t) {
		goto hard
	}

	// convert constant to desired type
	if f.Op == gc.OLITERAL {
		var con gc.Node
		switch tt {
		default:
			gc.Convconst(&con, t.Type, &f.Val)

		case gc.TINT32,
			gc.TINT16,
			gc.TINT8:
			var con gc.Node
			gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
			var r1 gc.Node
			regalloc(&r1, con.Type, t)
			gins(ppc64.AMOVD, &con, &r1)
			gmove(&r1, t)
			regfree(&r1)
			return

		case gc.TUINT32,
			gc.TUINT16,
			gc.TUINT8:
			var con gc.Node
			gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
			var r1 gc.Node
			regalloc(&r1, con.Type, t)
			gins(ppc64.AMOVD, &con, &r1)
			gmove(&r1, t)
			regfree(&r1)
			return
		}

		f = &con
		ft = tt // so big switch will choose a simple mov

		// constants can't move directly to memory.
		if gc.Ismem(t) {
			goto hard
		}
	}

	// float constants come from memory.
	//if(isfloat[tt])
	//	goto hard;

	// 64-bit immediates are also from memory.
	//if(isint[tt])
	//	goto hard;
	//// 64-bit immediates are really 32-bit sign-extended
	//// unless moving into a register.
	//if(isint[tt]) {
	//	if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
	//		goto hard;
	//	if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
	//		goto hard;
	//}

	// value -> value copy, only one memory operand.
	// figure out the instruction to use.
	// break out of switch for one-instruction gins.
	// goto rdst for "destination must be register".
	// goto hard for "convert to cvt type first".
	// otherwise handle and return.

	switch uint32(ft)<<16 | uint32(tt) {
	default:
		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))

		/*
		 * integer copy and truncate
		 */
	case gc.TINT8<<16 | gc.TINT8, // same size
		gc.TUINT8<<16 | gc.TINT8,
		gc.TINT16<<16 | gc.TINT8,
		// truncate
		gc.TUINT16<<16 | gc.TINT8,
		gc.TINT32<<16 | gc.TINT8,
		gc.TUINT32<<16 | gc.TINT8,
		gc.TINT64<<16 | gc.TINT8,
		gc.TUINT64<<16 | gc.TINT8:
		a = ppc64.AMOVB

	case gc.TINT8<<16 | gc.TUINT8, // same size
		gc.TUINT8<<16 | gc.TUINT8,
		gc.TINT16<<16 | gc.TUINT8,
		// truncate
		gc.TUINT16<<16 | gc.TUINT8,
		gc.TINT32<<16 | gc.TUINT8,
		gc.TUINT32<<16 | gc.TUINT8,
		gc.TINT64<<16 | gc.TUINT8,
		gc.TUINT64<<16 | gc.TUINT8:
		a = ppc64.AMOVBZ

	case gc.TINT16<<16 | gc.TINT16, // same size
		gc.TUINT16<<16 | gc.TINT16,
		gc.TINT32<<16 | gc.TINT16,
		// truncate
		gc.TUINT32<<16 | gc.TINT16,
		gc.TINT64<<16 | gc.TINT16,
		gc.TUINT64<<16 | gc.TINT16:
		a = ppc64.AMOVH

	case gc.TINT16<<16 | gc.TUINT16, // same size
		gc.TUINT16<<16 | gc.TUINT16,
		gc.TINT32<<16 | gc.TUINT16,
		// truncate
		gc.TUINT32<<16 | gc.TUINT16,
		gc.TINT64<<16 | gc.TUINT16,
		gc.TUINT64<<16 | gc.TUINT16:
		a = ppc64.AMOVHZ

	case gc.TINT32<<16 | gc.TINT32, // same size
		gc.TUINT32<<16 | gc.TINT32,
		gc.TINT64<<16 | gc.TINT32,
		// truncate
		gc.TUINT64<<16 | gc.TINT32:
		a = ppc64.AMOVW

	case gc.TINT32<<16 | gc.TUINT32, // same size
		gc.TUINT32<<16 | gc.TUINT32,
		gc.TINT64<<16 | gc.TUINT32,
		gc.TUINT64<<16 | gc.TUINT32:
		a = ppc64.AMOVWZ

	case gc.TINT64<<16 | gc.TINT64, // same size
		gc.TINT64<<16 | gc.TUINT64,
		gc.TUINT64<<16 | gc.TINT64,
		gc.TUINT64<<16 | gc.TUINT64:
		a = ppc64.AMOVD

		/*
		 * integer up-conversions
		 */
	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
		gc.TINT8<<16 | gc.TUINT16,
		gc.TINT8<<16 | gc.TINT32,
		gc.TINT8<<16 | gc.TUINT32,
		gc.TINT8<<16 | gc.TINT64,
		gc.TINT8<<16 | gc.TUINT64:
		a = ppc64.AMOVB

		goto rdst

	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
		gc.TUINT8<<16 | gc.TUINT16,
		gc.TUINT8<<16 | gc.TINT32,
		gc.TUINT8<<16 | gc.TUINT32,
		gc.TUINT8<<16 | gc.TINT64,
		gc.TUINT8<<16 | gc.TUINT64:
		a = ppc64.AMOVBZ

		goto rdst

	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
		gc.TINT16<<16 | gc.TUINT32,
		gc.TINT16<<16 | gc.TINT64,
		gc.TINT16<<16 | gc.TUINT64:
		a = ppc64.AMOVH

		goto rdst

	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
		gc.TUINT16<<16 | gc.TUINT32,
		gc.TUINT16<<16 | gc.TINT64,
		gc.TUINT16<<16 | gc.TUINT64:
		a = ppc64.AMOVHZ

		goto rdst

	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
		gc.TINT32<<16 | gc.TUINT64:
		a = ppc64.AMOVW

		goto rdst

	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
		gc.TUINT32<<16 | gc.TUINT64:
		a = ppc64.AMOVWZ

		goto rdst

		//warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
	//return;
	// algorithm is:
	//	if small enough, use native float64 -> int64 conversion.
	//	otherwise, subtract 2^63, convert, and add it back.
	/*
	* float to integer
	 */
	case gc.TFLOAT32<<16 | gc.TINT32,
		gc.TFLOAT64<<16 | gc.TINT32,
		gc.TFLOAT32<<16 | gc.TINT64,
		gc.TFLOAT64<<16 | gc.TINT64,
		gc.TFLOAT32<<16 | gc.TINT16,
		gc.TFLOAT32<<16 | gc.TINT8,
		gc.TFLOAT32<<16 | gc.TUINT16,
		gc.TFLOAT32<<16 | gc.TUINT8,
		gc.TFLOAT64<<16 | gc.TINT16,
		gc.TFLOAT64<<16 | gc.TINT8,
		gc.TFLOAT64<<16 | gc.TUINT16,
		gc.TFLOAT64<<16 | gc.TUINT8,
		gc.TFLOAT32<<16 | gc.TUINT32,
		gc.TFLOAT64<<16 | gc.TUINT32,
		gc.TFLOAT32<<16 | gc.TUINT64,
		gc.TFLOAT64<<16 | gc.TUINT64:
		bignodes()

		var r1 gc.Node
		regalloc(&r1, gc.Types[ft], f)
		gmove(f, &r1)
		if tt == gc.TUINT64 {
			regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
			gmove(&bigf, &r2)
			gins(ppc64.AFCMPU, &r1, &r2)
			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
			gins(ppc64.AFSUB, &r2, &r1)
			gc.Patch(p1, gc.Pc)
			regfree(&r2)
		}

		regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
		var r3 gc.Node
		regalloc(&r3, gc.Types[gc.TINT64], t)
		gins(ppc64.AFCTIDZ, &r1, &r2)
		p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
		p1.To.Type = obj.TYPE_MEM
		p1.To.Reg = ppc64.REGSP
		p1.To.Offset = -8
		p1 = gins(ppc64.AMOVD, nil, &r3)
		p1.From.Type = obj.TYPE_MEM
		p1.From.Reg = ppc64.REGSP
		p1.From.Offset = -8
		regfree(&r2)
		regfree(&r1)
		if tt == gc.TUINT64 {
			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
			gins(ppc64.AMOVD, &bigi, &r1)
			gins(ppc64.AADD, &r1, &r3)
			gc.Patch(p1, gc.Pc)
		}

		gmove(&r3, t)
		regfree(&r3)
		return

		//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
	//return;
	// algorithm is:
	//	if small enough, use native int64 -> uint64 conversion.
	//	otherwise, halve (rounding to odd?), convert, and double.
	/*
	 * integer to float
	 */
	case gc.TINT32<<16 | gc.TFLOAT32,
		gc.TINT32<<16 | gc.TFLOAT64,
		gc.TINT64<<16 | gc.TFLOAT32,
		gc.TINT64<<16 | gc.TFLOAT64,
		gc.TINT16<<16 | gc.TFLOAT32,
		gc.TINT16<<16 | gc.TFLOAT64,
		gc.TINT8<<16 | gc.TFLOAT32,
		gc.TINT8<<16 | gc.TFLOAT64,
		gc.TUINT16<<16 | gc.TFLOAT32,
		gc.TUINT16<<16 | gc.TFLOAT64,
		gc.TUINT8<<16 | gc.TFLOAT32,
		gc.TUINT8<<16 | gc.TFLOAT64,
		gc.TUINT32<<16 | gc.TFLOAT32,
		gc.TUINT32<<16 | gc.TFLOAT64,
		gc.TUINT64<<16 | gc.TFLOAT32,
		gc.TUINT64<<16 | gc.TFLOAT64:
		bignodes()

		var r1 gc.Node
		regalloc(&r1, gc.Types[gc.TINT64], nil)
		gmove(f, &r1)
		if ft == gc.TUINT64 {
			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
			gmove(&bigi, &r2)
			gins(ppc64.ACMPU, &r1, &r2)
			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
			p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
			p2.From.Type = obj.TYPE_CONST
			p2.From.Offset = 1
			gc.Patch(p1, gc.Pc)
		}

		regalloc(&r2, gc.Types[gc.TFLOAT64], t)
		p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
		p1.To.Type = obj.TYPE_MEM
		p1.To.Reg = ppc64.REGSP
		p1.To.Offset = -8
		p1 = gins(ppc64.AFMOVD, nil, &r2)
		p1.From.Type = obj.TYPE_MEM
		p1.From.Reg = ppc64.REGSP
		p1.From.Offset = -8
		gins(ppc64.AFCFID, &r2, &r2)
		regfree(&r1)
		if ft == gc.TUINT64 {
			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
			gins(ppc64.AFMUL, &r1, &r2)
			gc.Patch(p1, gc.Pc)
		}

		gmove(&r2, t)
		regfree(&r2)
		return

		/*
		 * float to float
		 */
	case gc.TFLOAT32<<16 | gc.TFLOAT32:
		a = ppc64.AFMOVS

	case gc.TFLOAT64<<16 | gc.TFLOAT64:
		a = ppc64.AFMOVD

	case gc.TFLOAT32<<16 | gc.TFLOAT64:
		a = ppc64.AFMOVS
		goto rdst

	case gc.TFLOAT64<<16 | gc.TFLOAT32:
		a = ppc64.AFRSP
		goto rdst
	}

	gins(a, f, t)
	return

	// requires register destination
rdst:
	{
		regalloc(&r1, t.Type, t)

		gins(a, f, &r1)
		gmove(&r1, t)
		regfree(&r1)
		return
	}

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return
}

/*
 * generate one instruction:
 *	as f, t
 */
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	// TODO(austin): Add self-move test like in 6g (but be careful
	// of truncation moves)

	af := obj.Addr(obj.Addr{})

	at := obj.Addr(obj.Addr{})
	if f != nil {
		af = gc.Naddr(f)
	}
	if t != nil {
		at = gc.Naddr(t)
	}
	p := (*obj.Prog)(gc.Prog(as))
	if f != nil {
		p.From = af
	}
	if t != nil {
		p.To = at
	}
	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := int32(0)
	switch as {
	case ppc64.AMOVB,
		ppc64.AMOVBU,
		ppc64.AMOVBZ,
		ppc64.AMOVBZU:
		w = 1

	case ppc64.AMOVH,
		ppc64.AMOVHU,
		ppc64.AMOVHZ,
		ppc64.AMOVHZU:
		w = 2

	case ppc64.AMOVW,
		ppc64.AMOVWU,
		ppc64.AMOVWZ,
		ppc64.AMOVWZU:
		w = 4

	case ppc64.AMOVD,
		ppc64.AMOVDU:
		if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR {
			break
		}
		w = 8
	}

	if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) {
		gc.Dump("f", f)
		gc.Dump("t", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
	}

	return p
}

func fixlargeoffset(n *gc.Node) {
	if n == nil {
		return
	}
	if n.Op != gc.OINDREG {
		return
	}
	if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large
		return
	}
	if n.Xoffset != int64(int32(n.Xoffset)) {
		// TODO(minux): offset too large, move into R31 and add to R31 instead.
		// this is used only in test/fixedbugs/issue6036.go.
		gc.Fatal("offset too large: %v", gc.Nconv(n, 0))

		a := gc.Node(*n)
		a.Op = gc.OREGISTER
		a.Type = gc.Types[gc.Tptr]
		a.Xoffset = 0
		gc.Cgen_checknil(&a)
		ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
		n.Xoffset = 0
	}
}

/*
 * return Axxx for Oxxx on type t.
 */
func optoas(op int, t *gc.Type) int {
	if t == nil {
		gc.Fatal("optoas: t is nil")
	}

	a := int(obj.AXXX)
	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
	default:
		gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))

	case gc.OEQ<<16 | gc.TBOOL,
		gc.OEQ<<16 | gc.TINT8,
		gc.OEQ<<16 | gc.TUINT8,
		gc.OEQ<<16 | gc.TINT16,
		gc.OEQ<<16 | gc.TUINT16,
		gc.OEQ<<16 | gc.TINT32,
		gc.OEQ<<16 | gc.TUINT32,
		gc.OEQ<<16 | gc.TINT64,
		gc.OEQ<<16 | gc.TUINT64,
		gc.OEQ<<16 | gc.TPTR32,
		gc.OEQ<<16 | gc.TPTR64,
		gc.OEQ<<16 | gc.TFLOAT32,
		gc.OEQ<<16 | gc.TFLOAT64:
		a = ppc64.ABEQ

	case gc.ONE<<16 | gc.TBOOL,
		gc.ONE<<16 | gc.TINT8,
		gc.ONE<<16 | gc.TUINT8,
		gc.ONE<<16 | gc.TINT16,
		gc.ONE<<16 | gc.TUINT16,
		gc.ONE<<16 | gc.TINT32,
		gc.ONE<<16 | gc.TUINT32,
		gc.ONE<<16 | gc.TINT64,
		gc.ONE<<16 | gc.TUINT64,
		gc.ONE<<16 | gc.TPTR32,
		gc.ONE<<16 | gc.TPTR64,
		gc.ONE<<16 | gc.TFLOAT32,
		gc.ONE<<16 | gc.TFLOAT64:
		a = ppc64.ABNE

	case gc.OLT<<16 | gc.TINT8, // ACMP
		gc.OLT<<16 | gc.TINT16,
		gc.OLT<<16 | gc.TINT32,
		gc.OLT<<16 | gc.TINT64,
		gc.OLT<<16 | gc.TUINT8,
		// ACMPU
		gc.OLT<<16 | gc.TUINT16,
		gc.OLT<<16 | gc.TUINT32,
		gc.OLT<<16 | gc.TUINT64,
		gc.OLT<<16 | gc.TFLOAT32,
		// AFCMPU
		gc.OLT<<16 | gc.TFLOAT64:
		a = ppc64.ABLT

	case gc.OLE<<16 | gc.TINT8, // ACMP
		gc.OLE<<16 | gc.TINT16,
		gc.OLE<<16 | gc.TINT32,
		gc.OLE<<16 | gc.TINT64,
		gc.OLE<<16 | gc.TUINT8,
		// ACMPU
		gc.OLE<<16 | gc.TUINT16,
		gc.OLE<<16 | gc.TUINT32,
		gc.OLE<<16 | gc.TUINT64,
		gc.OLE<<16 | gc.TFLOAT32,
		// AFCMPU
		gc.OLE<<16 | gc.TFLOAT64:
		a = ppc64.ABLE

	case gc.OGT<<16 | gc.TINT8,
		gc.OGT<<16 | gc.TINT16,
		gc.OGT<<16 | gc.TINT32,
		gc.OGT<<16 | gc.TINT64,
		gc.OGT<<16 | gc.TUINT8,
		gc.OGT<<16 | gc.TUINT16,
		gc.OGT<<16 | gc.TUINT32,
		gc.OGT<<16 | gc.TUINT64,
		gc.OGT<<16 | gc.TFLOAT32,
		gc.OGT<<16 | gc.TFLOAT64:
		a = ppc64.ABGT

	case gc.OGE<<16 | gc.TINT8,
		gc.OGE<<16 | gc.TINT16,
		gc.OGE<<16 | gc.TINT32,
		gc.OGE<<16 | gc.TINT64,
		gc.OGE<<16 | gc.TUINT8,
		gc.OGE<<16 | gc.TUINT16,
		gc.OGE<<16 | gc.TUINT32,
		gc.OGE<<16 | gc.TUINT64,
		gc.OGE<<16 | gc.TFLOAT32,
		gc.OGE<<16 | gc.TFLOAT64:
		a = ppc64.ABGE

	case gc.OCMP<<16 | gc.TBOOL,
		gc.OCMP<<16 | gc.TINT8,
		gc.OCMP<<16 | gc.TINT16,
		gc.OCMP<<16 | gc.TINT32,
		gc.OCMP<<16 | gc.TPTR32,
		gc.OCMP<<16 | gc.TINT64:
		a = ppc64.ACMP

	case gc.OCMP<<16 | gc.TUINT8,
		gc.OCMP<<16 | gc.TUINT16,
		gc.OCMP<<16 | gc.TUINT32,
		gc.OCMP<<16 | gc.TUINT64,
		gc.OCMP<<16 | gc.TPTR64:
		a = ppc64.ACMPU

	case gc.OCMP<<16 | gc.TFLOAT32,
		gc.OCMP<<16 | gc.TFLOAT64:
		a = ppc64.AFCMPU

	case gc.OAS<<16 | gc.TBOOL,
		gc.OAS<<16 | gc.TINT8:
		a = ppc64.AMOVB

	case gc.OAS<<16 | gc.TUINT8:
		a = ppc64.AMOVBZ

	case gc.OAS<<16 | gc.TINT16:
		a = ppc64.AMOVH

	case gc.OAS<<16 | gc.TUINT16:
		a = ppc64.AMOVHZ

	case gc.OAS<<16 | gc.TINT32:
		a = ppc64.AMOVW

	case gc.OAS<<16 | gc.TUINT32,
		gc.OAS<<16 | gc.TPTR32:
		a = ppc64.AMOVWZ

	case gc.OAS<<16 | gc.TINT64,
		gc.OAS<<16 | gc.TUINT64,
		gc.OAS<<16 | gc.TPTR64:
		a = ppc64.AMOVD

	case gc.OAS<<16 | gc.TFLOAT32:
		a = ppc64.AFMOVS

	case gc.OAS<<16 | gc.TFLOAT64:
		a = ppc64.AFMOVD

	case gc.OADD<<16 | gc.TINT8,
		gc.OADD<<16 | gc.TUINT8,
		gc.OADD<<16 | gc.TINT16,
		gc.OADD<<16 | gc.TUINT16,
		gc.OADD<<16 | gc.TINT32,
		gc.OADD<<16 | gc.TUINT32,
		gc.OADD<<16 | gc.TPTR32,
		gc.OADD<<16 | gc.TINT64,
		gc.OADD<<16 | gc.TUINT64,
		gc.OADD<<16 | gc.TPTR64:
		a = ppc64.AADD

	case gc.OADD<<16 | gc.TFLOAT32:
		a = ppc64.AFADDS

	case gc.OADD<<16 | gc.TFLOAT64:
		a = ppc64.AFADD

	case gc.OSUB<<16 | gc.TINT8,
		gc.OSUB<<16 | gc.TUINT8,
		gc.OSUB<<16 | gc.TINT16,
		gc.OSUB<<16 | gc.TUINT16,
		gc.OSUB<<16 | gc.TINT32,
		gc.OSUB<<16 | gc.TUINT32,
		gc.OSUB<<16 | gc.TPTR32,
		gc.OSUB<<16 | gc.TINT64,
		gc.OSUB<<16 | gc.TUINT64,
		gc.OSUB<<16 | gc.TPTR64:
		a = ppc64.ASUB

	case gc.OSUB<<16 | gc.TFLOAT32:
		a = ppc64.AFSUBS

	case gc.OSUB<<16 | gc.TFLOAT64:
		a = ppc64.AFSUB

	case gc.OMINUS<<16 | gc.TINT8,
		gc.OMINUS<<16 | gc.TUINT8,
		gc.OMINUS<<16 | gc.TINT16,
		gc.OMINUS<<16 | gc.TUINT16,
		gc.OMINUS<<16 | gc.TINT32,
		gc.OMINUS<<16 | gc.TUINT32,
		gc.OMINUS<<16 | gc.TPTR32,
		gc.OMINUS<<16 | gc.TINT64,
		gc.OMINUS<<16 | gc.TUINT64,
		gc.OMINUS<<16 | gc.TPTR64:
		a = ppc64.ANEG

	case gc.OAND<<16 | gc.TINT8,
		gc.OAND<<16 | gc.TUINT8,
		gc.OAND<<16 | gc.TINT16,
		gc.OAND<<16 | gc.TUINT16,
		gc.OAND<<16 | gc.TINT32,
		gc.OAND<<16 | gc.TUINT32,
		gc.OAND<<16 | gc.TPTR32,
		gc.OAND<<16 | gc.TINT64,
		gc.OAND<<16 | gc.TUINT64,
		gc.OAND<<16 | gc.TPTR64:
		a = ppc64.AAND

	case gc.OOR<<16 | gc.TINT8,
		gc.OOR<<16 | gc.TUINT8,
		gc.OOR<<16 | gc.TINT16,
		gc.OOR<<16 | gc.TUINT16,
		gc.OOR<<16 | gc.TINT32,
		gc.OOR<<16 | gc.TUINT32,
		gc.OOR<<16 | gc.TPTR32,
		gc.OOR<<16 | gc.TINT64,
		gc.OOR<<16 | gc.TUINT64,
		gc.OOR<<16 | gc.TPTR64:
		a = ppc64.AOR

	case gc.OXOR<<16 | gc.TINT8,
		gc.OXOR<<16 | gc.TUINT8,
		gc.OXOR<<16 | gc.TINT16,
		gc.OXOR<<16 | gc.TUINT16,
		gc.OXOR<<16 | gc.TINT32,
		gc.OXOR<<16 | gc.TUINT32,
		gc.OXOR<<16 | gc.TPTR32,
		gc.OXOR<<16 | gc.TINT64,
		gc.OXOR<<16 | gc.TUINT64,
		gc.OXOR<<16 | gc.TPTR64:
		a = ppc64.AXOR

		// TODO(minux): handle rotates
	//case CASE(OLROT, TINT8):
	//case CASE(OLROT, TUINT8):
	//case CASE(OLROT, TINT16):
	//case CASE(OLROT, TUINT16):
	//case CASE(OLROT, TINT32):
	//case CASE(OLROT, TUINT32):
	//case CASE(OLROT, TPTR32):
	//case CASE(OLROT, TINT64):
	//case CASE(OLROT, TUINT64):
	//case CASE(OLROT, TPTR64):
	//	a = 0//???; RLDC?
	//	break;

	case gc.OLSH<<16 | gc.TINT8,
		gc.OLSH<<16 | gc.TUINT8,
		gc.OLSH<<16 | gc.TINT16,
		gc.OLSH<<16 | gc.TUINT16,
		gc.OLSH<<16 | gc.TINT32,
		gc.OLSH<<16 | gc.TUINT32,
		gc.OLSH<<16 | gc.TPTR32,
		gc.OLSH<<16 | gc.TINT64,
		gc.OLSH<<16 | gc.TUINT64,
		gc.OLSH<<16 | gc.TPTR64:
		a = ppc64.ASLD

	case gc.ORSH<<16 | gc.TUINT8,
		gc.ORSH<<16 | gc.TUINT16,
		gc.ORSH<<16 | gc.TUINT32,
		gc.ORSH<<16 | gc.TPTR32,
		gc.ORSH<<16 | gc.TUINT64,
		gc.ORSH<<16 | gc.TPTR64:
		a = ppc64.ASRD

	case gc.ORSH<<16 | gc.TINT8,
		gc.ORSH<<16 | gc.TINT16,
		gc.ORSH<<16 | gc.TINT32,
		gc.ORSH<<16 | gc.TINT64:
		a = ppc64.ASRAD

		// TODO(minux): handle rotates
	//case CASE(ORROTC, TINT8):
	//case CASE(ORROTC, TUINT8):
	//case CASE(ORROTC, TINT16):
	//case CASE(ORROTC, TUINT16):
	//case CASE(ORROTC, TINT32):
	//case CASE(ORROTC, TUINT32):
	//case CASE(ORROTC, TINT64):
	//case CASE(ORROTC, TUINT64):
	//	a = 0//??? RLDC??
	//	break;

	case gc.OHMUL<<16 | gc.TINT64:
		a = ppc64.AMULHD

	case gc.OHMUL<<16 | gc.TUINT64,
		gc.OHMUL<<16 | gc.TPTR64:
		a = ppc64.AMULHDU

	case gc.OMUL<<16 | gc.TINT8,
		gc.OMUL<<16 | gc.TINT16,
		gc.OMUL<<16 | gc.TINT32,
		gc.OMUL<<16 | gc.TINT64:
		a = ppc64.AMULLD

	case gc.OMUL<<16 | gc.TUINT8,
		gc.OMUL<<16 | gc.TUINT16,
		gc.OMUL<<16 | gc.TUINT32,
		gc.OMUL<<16 | gc.TPTR32,
		// don't use word multiply, the high 32-bit are undefined.
		// fallthrough
		gc.OMUL<<16 | gc.TUINT64,
		gc.OMUL<<16 | gc.TPTR64:
		a = ppc64.AMULLD
		// for 64-bit multiplies, signedness doesn't matter.

	case gc.OMUL<<16 | gc.TFLOAT32:
		a = ppc64.AFMULS

	case gc.OMUL<<16 | gc.TFLOAT64:
		a = ppc64.AFMUL

	case gc.ODIV<<16 | gc.TINT8,
		gc.ODIV<<16 | gc.TINT16,
		gc.ODIV<<16 | gc.TINT32,
		gc.ODIV<<16 | gc.TINT64:
		a = ppc64.ADIVD

	case gc.ODIV<<16 | gc.TUINT8,
		gc.ODIV<<16 | gc.TUINT16,
		gc.ODIV<<16 | gc.TUINT32,
		gc.ODIV<<16 | gc.TPTR32,
		gc.ODIV<<16 | gc.TUINT64,
		gc.ODIV<<16 | gc.TPTR64:
		a = ppc64.ADIVDU

	case gc.ODIV<<16 | gc.TFLOAT32:
		a = ppc64.AFDIVS

	case gc.ODIV<<16 | gc.TFLOAT64:
		a = ppc64.AFDIV
	}

	return a
}

const (
	ODynam   = 1 << 0
	OAddable = 1 << 1
)

func xgen(n *gc.Node, a *gc.Node, o int) bool {
	// TODO(minux)

	return -1 != 0 /*TypeKind(100016)*/
}

func sudoclean() {
	return
}

/*
 * generate code to compute address of n,
 * a reference to a (perhaps nested) field inside
 * an array or struct.
 * return 0 on failure, 1 on success.
 * on success, leaves usable address in a.
 *
 * caller is responsible for calling sudoclean
 * after successful sudoaddable,
 * to release the register used for a.
 */
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
	// TODO(minux)

	*a = obj.Addr{}
	return false
}
コード例 #9
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	if(n == true) goto to;
 */
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nbgen", n)
	}

	if n == nil {
		n = gc.Nodbool(true)
	}

	if n.Ninit != nil {
		gc.Genlist(n.Ninit)
	}

	if n.Type == nil {
		gc.Convlit(&n, gc.Types[gc.TBOOL])
		if n.Type == nil {
			return
		}
	}

	et := int(n.Type.Etype)
	if et != gc.TBOOL {
		gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
		gc.Patch(gins(obj.AEND, nil, nil), to)
		return
	}

	for n.Op == gc.OCONVNOP {
		n = n.Left
		if n.Ninit != nil {
			gc.Genlist(n.Ninit)
		}
	}

	var nl *gc.Node
	var nr *gc.Node
	switch n.Op {
	default:
		goto def

		// need to ask if it is bool?
	case gc.OLITERAL:
		if !true_ == (n.Val.U.Bval == 0) {
			gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
		}
		return

	case gc.ONAME:
		if n.Addable == 0 {
			goto def
		}
		var n1 gc.Node
		gc.Nodconst(&n1, n.Type, 0)
		gins(optoas(gc.OCMP, n.Type), n, &n1)
		a := x86.AJNE
		if !true_ {
			a = x86.AJEQ
		}
		gc.Patch(gc.Gbranch(a, n.Type, likely), to)
		return

	case gc.OANDAND,
		gc.OOROR:
		if (n.Op == gc.OANDAND) == true_ {
			p1 := gc.Gbranch(obj.AJMP, nil, 0)
			p2 := gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, gc.Pc)
			bgen(n.Left, !true_, -likely, p2)
			bgen(n.Right, !true_, -likely, p2)
			p1 = gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, to)
			gc.Patch(p2, gc.Pc)
		} else {
			bgen(n.Left, true_, likely, to)
			bgen(n.Right, true_, likely, to)
		}

		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		nr = n.Right
		if nr == nil || nr.Type == nil {
			return
		}
		fallthrough

	case gc.ONOT: // unary
		nl = n.Left

		if nl == nil || nl.Type == nil {
			return
		}
	}

	switch n.Op {
	case gc.ONOT:
		bgen(nl, !true_, likely, to)
		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		a := int(n.Op)
		if !true_ {
			if gc.Isfloat[nr.Type.Etype] {
				// brcom is not valid on floats when NaN is involved.
				p1 := gc.Gbranch(obj.AJMP, nil, 0)

				p2 := gc.Gbranch(obj.AJMP, nil, 0)
				gc.Patch(p1, gc.Pc)
				ll := n.Ninit // avoid re-genning ninit
				n.Ninit = nil
				bgen(n, true, -likely, p2)
				n.Ninit = ll
				gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
				gc.Patch(p2, gc.Pc)
				return
			}

			a = gc.Brcom(a)
			true_ = !true_
		}

		// make simplest on right
		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
			a = gc.Brrev(a)
			r := nl
			nl = nr
			nr = r
		}

		if gc.Isslice(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal slice comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Xoffset += int64(gc.Array_array)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Isinter(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal interface comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Iscomplex[nl.Type.Etype] {
			gc.Complexbool(a, nl, nr, true_, likely, to)
			break
		}

		var n2 gc.Node
		var n1 gc.Node
		if nr.Ullman >= gc.UINF {
			regalloc(&n1, nl.Type, nil)
			cgen(nl, &n1)

			var tmp gc.Node
			gc.Tempname(&tmp, nl.Type)
			gmove(&n1, &tmp)
			regfree(&n1)

			regalloc(&n2, nr.Type, nil)
			cgen(nr, &n2)

			regalloc(&n1, nl.Type, nil)
			cgen(&tmp, &n1)

			goto cmp
		}

		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)

		if gc.Smallintconst(nr) {
			gins(optoas(gc.OCMP, nr.Type), &n1, nr)
			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
			regfree(&n1)
			break
		}

		regalloc(&n2, nr.Type, nil)
		cgen(nr, &n2)

		// only < and <= work right with NaN; reverse if needed
	cmp:
		l := &n1

		r := &n2
		if gc.Isfloat[nl.Type.Etype] && (a == gc.OGT || a == gc.OGE) {
			l = &n2
			r = &n1
			a = gc.Brrev(a)
		}

		gins(optoas(gc.OCMP, nr.Type), l, r)

		if gc.Isfloat[nr.Type.Etype] && (n.Op == gc.OEQ || n.Op == gc.ONE) {
			if n.Op == gc.OEQ {
				// neither NE nor P
				p1 := gc.Gbranch(x86.AJNE, nil, -likely)

				p2 := gc.Gbranch(x86.AJPS, nil, -likely)
				gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
				gc.Patch(p1, gc.Pc)
				gc.Patch(p2, gc.Pc)
			} else {
				// either NE or P
				gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)

				gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
			}
		} else {
			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
		}
		regfree(&n1)
		regfree(&n2)
	}

	return

def:
	var n1 gc.Node
	regalloc(&n1, n.Type, nil)
	cgen(n, &n1)
	var n2 gc.Node
	gc.Nodconst(&n2, n.Type, 0)
	gins(optoas(gc.OCMP, n.Type), &n1, &n2)
	a := x86.AJNE
	if !true_ {
		a = x86.AJEQ
	}
	gc.Patch(gc.Gbranch(a, n.Type, likely), to)
	regfree(&n1)
	return
}
コード例 #10
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	newreg = &n;
 *	res = newreg
 *
 * on exit, a has been changed to be *newreg.
 * caller must regfree(a).
 * The generated code checks that the result is not *nil.
 */
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nigen-n", n)
	}

	switch n.Op {
	case gc.ONAME:
		if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
			break
		}
		*a = *n
		return

		// Increase the refcount of the register so that igen's caller
	// has to call regfree.
	case gc.OINDREG:
		if n.Val.U.Reg != arm.REGSP {
			reg[n.Val.U.Reg]++
		}
		*a = *n
		return

	case gc.ODOT:
		igen(n.Left, a, res)
		a.Xoffset += n.Xoffset
		a.Type = n.Type
		return

	case gc.ODOTPTR:
		if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
			// igen-able nodes.
			var n1 gc.Node
			igen(n.Left, &n1, res)

			regalloc(a, gc.Types[gc.Tptr], &n1)
			gmove(&n1, a)
			regfree(&n1)
		} else {
			regalloc(a, gc.Types[gc.Tptr], res)
			cgen(n.Left, a)
		}

		gc.Cgen_checknil(a)
		a.Op = gc.OINDREG
		a.Xoffset = n.Xoffset
		a.Type = n.Type
		return

		// Release res so that it is available for cgen_call.
	// Pick it up again after the call.
	case gc.OCALLMETH,
		gc.OCALLFUNC,
		gc.OCALLINTER:
		r := -1

		if n.Ullman >= gc.UINF {
			if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
				r = int(res.Val.U.Reg)
				reg[r]--
			}
		}

		switch n.Op {
		case gc.OCALLMETH:
			gc.Cgen_callmeth(n, 0)

		case gc.OCALLFUNC:
			cgen_call(n, 0)

		case gc.OCALLINTER:
			cgen_callinter(n, nil, 0)
		}

		if r >= 0 {
			reg[r]++
		}
		regalloc(a, gc.Types[gc.Tptr], res)
		cgen_aret(n, a)
		a.Op = gc.OINDREG
		a.Type = n.Type
		return
	}

	agenr(n, a, res)
	a.Op = gc.OINDREG
	a.Type = n.Type
}
コード例 #11
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	newreg = &n;
 *
 * caller must regfree(a).
 * The generated code checks that the result is not nil.
 */
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("agenr-n", n)
	}

	nl := n.Left
	nr := n.Right

	switch n.Op {
	case gc.ODOT,
		gc.ODOTPTR,
		gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		var n1 gc.Node
		igen(n, &n1, res)
		regalloc(a, gc.Types[gc.Tptr], &n1)
		agen(&n1, a)
		regfree(&n1)

	case gc.OIND:
		cgenr(n.Left, a, res)
		gc.Cgen_checknil(a)

	case gc.OINDEX:
		var p2 *obj.Prog // to be patched to panicindex.
		w := uint32(n.Type.Width)
		bounded := gc.Debug['B'] != 0 || n.Bounded
		var n1 gc.Node
		var n3 gc.Node
		if nr.Addable != 0 {
			var tmp gc.Node
			if !gc.Isconst(nr, gc.CTINT) {
				gc.Tempname(&tmp, gc.Types[gc.TINT32])
			}
			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
			if !gc.Isconst(nr, gc.CTINT) {
				p2 = cgenindex(nr, &tmp, bounded)
				regalloc(&n1, tmp.Type, nil)
				gmove(&tmp, &n1)
			}
		} else if nl.Addable != 0 {
			if !gc.Isconst(nr, gc.CTINT) {
				var tmp gc.Node
				gc.Tempname(&tmp, gc.Types[gc.TINT32])
				p2 = cgenindex(nr, &tmp, bounded)
				regalloc(&n1, tmp.Type, nil)
				gmove(&tmp, &n1)
			}

			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
		} else {
			var tmp gc.Node
			gc.Tempname(&tmp, gc.Types[gc.TINT32])
			p2 = cgenindex(nr, &tmp, bounded)
			nr = &tmp
			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
			regalloc(&n1, tmp.Type, nil)
			gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
		}

		// &a is in &n3 (allocated in res)
		// i is in &n1 (if not constant)
		// w is width

		// constant index
		if gc.Isconst(nr, gc.CTINT) {
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Fatal("constant string constant index")
			}
			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
			var n2 gc.Node
			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				if gc.Debug['B'] == 0 && !n.Bounded {
					n1 = n3
					n1.Op = gc.OINDREG
					n1.Type = gc.Types[gc.Tptr]
					n1.Xoffset = int64(gc.Array_nel)
					var n4 gc.Node
					regalloc(&n4, n1.Type, nil)
					gmove(&n1, &n4)
					gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
					gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
					regfree(&n4)
					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
					ginscall(gc.Panicindex, 0)
					gc.Patch(p1, gc.Pc)
				}

				n1 = n3
				n1.Op = gc.OINDREG
				n1.Type = gc.Types[gc.Tptr]
				n1.Xoffset = int64(gc.Array_array)
				gmove(&n1, &n3)
			}

			gc.Nodconst(&n2, gc.Types[gc.Tptr], int64(v*uint64(w)))
			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
			*a = n3
			break
		}

		var n2 gc.Node
		regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
		gmove(&n1, &n2)
		regfree(&n1)

		var n4 gc.Node
		if gc.Debug['B'] == 0 && !n.Bounded {
			// check bounds
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval)))
			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				n1 = n3
				n1.Op = gc.OINDREG
				n1.Type = gc.Types[gc.Tptr]
				n1.Xoffset = int64(gc.Array_nel)
				regalloc(&n4, gc.Types[gc.TUINT32], nil)
				gmove(&n1, &n4)
			} else {
				gc.Nodconst(&n4, gc.Types[gc.TUINT32], nl.Type.Bound)
			}

			gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n2, &n4)
			if n4.Op == gc.OREGISTER {
				regfree(&n4)
			}
			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
			if p2 != nil {
				gc.Patch(p2, gc.Pc)
			}
			ginscall(gc.Panicindex, 0)
			gc.Patch(p1, gc.Pc)
		}

		if gc.Isconst(nl, gc.CTSTR) {
			regalloc(&n3, gc.Types[gc.Tptr], res)
			p1 := gins(arm.AMOVW, nil, &n3)
			gc.Datastring(nl.Val.U.Sval, &p1.From)
			p1.From.Type = obj.TYPE_ADDR
		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
			n1 = n3
			n1.Op = gc.OINDREG
			n1.Type = gc.Types[gc.Tptr]
			n1.Xoffset = int64(gc.Array_array)
			gmove(&n1, &n3)
		}

		if w == 0 {
		} else // nothing to do
		if w == 1 || w == 2 || w == 4 || w == 8 {
			n4 = gc.Node{}
			n4.Op = gc.OADDR
			n4.Left = &n2
			cgen(&n4, &n3)
			if w == 1 {
				gins(arm.AADD, &n2, &n3)
			} else if w == 2 {
				gshift(arm.AADD, &n2, arm.SHIFT_LL, 1, &n3)
			} else if w == 4 {
				gshift(arm.AADD, &n2, arm.SHIFT_LL, 2, &n3)
			} else if w == 8 {
				gshift(arm.AADD, &n2, arm.SHIFT_LL, 3, &n3)
			}
		} else {
			regalloc(&n4, gc.Types[gc.TUINT32], nil)
			gc.Nodconst(&n1, gc.Types[gc.TUINT32], int64(w))
			gmove(&n1, &n4)
			gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &n4, &n2)
			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
			regfree(&n4)
		}

		*a = n3
		regfree(&n2)

	default:
		regalloc(a, gc.Types[gc.Tptr], res)
		agen(n, a)
	}
}
コード例 #12
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	res = n;
 * simplifies and calls gmove.
 */
func cgen(n *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\ncgen-n", n)
		gc.Dump("cgen-res", res)
	}

	if n == nil || n.Type == nil {
		return
	}

	if res == nil || res.Type == nil {
		gc.Fatal("cgen: res nil")
	}

	switch n.Op {
	case gc.OSLICE,
		gc.OSLICEARR,
		gc.OSLICESTR,
		gc.OSLICE3,
		gc.OSLICE3ARR:
		if res.Op != gc.ONAME || res.Addable == 0 {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			gc.Cgen_slice(n, &n1)
			cgen(&n1, res)
		} else {
			gc.Cgen_slice(n, res)
		}
		return

	case gc.OEFACE:
		if res.Op != gc.ONAME || res.Addable == 0 {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			gc.Cgen_eface(n, &n1)
			cgen(&n1, res)
		} else {
			gc.Cgen_eface(n, res)
		}
		return
	}

	for n.Op == gc.OCONVNOP {
		n = n.Left
	}

	if n.Ullman >= gc.UINF {
		if n.Op == gc.OINDREG {
			gc.Fatal("cgen: this is going to misscompile")
		}
		if res.Ullman >= gc.UINF {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			cgen(n, &n1)
			cgen(&n1, res)
			return
		}
	}

	if gc.Isfat(n.Type) {
		if n.Type.Width < 0 {
			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
		}
		sgen(n, res, n.Type.Width)
		return
	}

	// update addressability for string, slice
	// can't do in walk because n->left->addable
	// changes if n->left is an escaping local variable.
	switch n.Op {
	case gc.OSPTR,
		gc.OLEN:
		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
			n.Addable = n.Left.Addable
		}

	case gc.OCAP:
		if gc.Isslice(n.Left.Type) {
			n.Addable = n.Left.Addable
		}

	case gc.OITAB:
		n.Addable = n.Left.Addable
	}

	// if both are addressable, move
	if n.Addable != 0 && res.Addable != 0 {
		if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] || gc.Iscomplex[res.Type.Etype] {
			gmove(n, res)
		} else {
			var n1 gc.Node
			regalloc(&n1, n.Type, nil)
			gmove(n, &n1)
			cgen(&n1, res)
			regfree(&n1)
		}

		return
	}

	// if both are not addressable, use a temporary.
	if n.Addable == 0 && res.Addable == 0 {
		// could use regalloc here sometimes,
		// but have to check for ullman >= UINF.
		var n1 gc.Node
		gc.Tempname(&n1, n.Type)

		cgen(n, &n1)
		cgen(&n1, res)
		return
	}

	// if result is not addressable directly but n is,
	// compute its address and then store via the address.
	if res.Addable == 0 {
		var n1 gc.Node
		igen(res, &n1, nil)
		cgen(n, &n1)
		regfree(&n1)
		return
	}

	if gc.Complexop(n, res) {
		gc.Complexgen(n, res)
		return
	}

	// if n is sudoaddable generate addr and move
	if !gc.Is64(n.Type) && !gc.Is64(res.Type) && !gc.Iscomplex[n.Type.Etype] && !gc.Iscomplex[res.Type.Etype] {
		a := optoas(gc.OAS, n.Type)
		var w int
		var addr obj.Addr
		if sudoaddable(a, n, &addr, &w) {
			if res.Op != gc.OREGISTER {
				var n2 gc.Node
				regalloc(&n2, res.Type, nil)
				p1 := gins(a, nil, &n2)
				p1.From = addr
				if gc.Debug['g'] != 0 {
					fmt.Printf("%v [ignore previous line]\n", p1)
				}
				gmove(&n2, res)
				regfree(&n2)
			} else {
				p1 := gins(a, nil, res)
				p1.From = addr
				if gc.Debug['g'] != 0 {
					fmt.Printf("%v [ignore previous line]\n", p1)
				}
			}

			sudoclean()
			return
		}
	}

	// otherwise, the result is addressable but n is not.
	// let's do some computation.

	nl := n.Left

	nr := n.Right

	if nl != nil && nl.Ullman >= gc.UINF {
		if nr != nil && nr.Ullman >= gc.UINF {
			var n1 gc.Node
			gc.Tempname(&n1, nl.Type)
			cgen(nl, &n1)
			n2 := *n
			n2.Left = &n1
			cgen(&n2, res)
			return
		}
	}

	// 64-bit ops are hard on 32-bit machine.
	if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
		switch n.Op {
		// math goes to cgen64.
		case gc.OMINUS,
			gc.OCOM,
			gc.OADD,
			gc.OSUB,
			gc.OMUL,
			gc.OLROT,
			gc.OLSH,
			gc.ORSH,
			gc.OAND,
			gc.OOR,
			gc.OXOR:
			cgen64(n, res)

			return
		}
	}

	var a int
	var f0 gc.Node
	var n1 gc.Node
	var n2 gc.Node
	if nl != nil && gc.Isfloat[n.Type.Etype] && gc.Isfloat[nl.Type.Etype] {
		// floating-point.
		regalloc(&f0, nl.Type, res)

		if nr != nil {
			goto flt2
		}

		if n.Op == gc.OMINUS {
			nr = gc.Nodintconst(-1)
			gc.Convlit(&nr, n.Type)
			n.Op = gc.OMUL
			goto flt2
		}

		// unary
		cgen(nl, &f0)

		if n.Op != gc.OCONV && n.Op != gc.OPLUS {
			gins(optoas(int(n.Op), n.Type), &f0, &f0)
		}
		gmove(&f0, res)
		regfree(&f0)
		return
	}
	switch n.Op {
	default:
		gc.Dump("cgen", n)
		gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))

	case gc.OREAL,
		gc.OIMAG,
		gc.OCOMPLEX:
		gc.Fatal("unexpected complex")

		// these call bgen to get a bool value
	case gc.OOROR,
		gc.OANDAND,
		gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OLE,
		gc.OGE,
		gc.OGT,
		gc.ONOT:
		p1 := gc.Gbranch(arm.AB, nil, 0)

		p2 := gc.Pc
		gmove(gc.Nodbool(true), res)
		p3 := gc.Gbranch(arm.AB, nil, 0)
		gc.Patch(p1, gc.Pc)
		bgen(n, true, 0, p2)
		gmove(gc.Nodbool(false), res)
		gc.Patch(p3, gc.Pc)
		return

	case gc.OPLUS:
		cgen(nl, res)
		return

		// unary
	case gc.OCOM:
		a := optoas(gc.OXOR, nl.Type)

		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)
		gc.Nodconst(&n2, nl.Type, -1)
		gins(a, &n2, &n1)
		goto norm

	case gc.OMINUS:
		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)
		gc.Nodconst(&n2, nl.Type, 0)
		gins(optoas(gc.OMINUS, nl.Type), &n2, &n1)
		goto norm

		// symmetric binary
	case gc.OAND,
		gc.OOR,
		gc.OXOR,
		gc.OADD,
		gc.OMUL:
		a = optoas(int(n.Op), nl.Type)

		// symmetric binary
		if nl.Ullman < nr.Ullman {
			r := nl
			nl = nr
			nr = r
		}
		goto abop

		// asymmetric binary
	case gc.OSUB:
		a = optoas(int(n.Op), nl.Type)

		goto abop

	case gc.OHMUL:
		cgen_hmul(nl, nr, res)

	case gc.OLROT,
		gc.OLSH,
		gc.ORSH:
		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)

	case gc.OCONV:
		if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
			cgen(nl, res)
			break
		}

		var n1 gc.Node
		if nl.Addable != 0 && !gc.Is64(nl.Type) {
			regalloc(&n1, nl.Type, res)
			gmove(nl, &n1)
		} else {
			if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] {
				gc.Tempname(&n1, nl.Type)
			} else {
				regalloc(&n1, nl.Type, res)
			}
			cgen(nl, &n1)
		}

		var n2 gc.Node
		if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] {
			gc.Tempname(&n2, n.Type)
		} else {
			regalloc(&n2, n.Type, nil)
		}
		gmove(&n1, &n2)
		gmove(&n2, res)
		if n1.Op == gc.OREGISTER {
			regfree(&n1)
		}
		if n2.Op == gc.OREGISTER {
			regfree(&n2)
		}

	case gc.ODOT,
		gc.ODOTPTR,
		gc.OINDEX,
		gc.OIND,
		gc.ONAME: // PHEAP or PPARAMREF var
		var n1 gc.Node
		igen(n, &n1, res)

		gmove(&n1, res)
		regfree(&n1)

		// interface table is first word of interface value
	case gc.OITAB:
		var n1 gc.Node
		igen(nl, &n1, res)

		n1.Type = n.Type
		gmove(&n1, res)
		regfree(&n1)

		// pointer is the first word of string or slice.
	case gc.OSPTR:
		if gc.Isconst(nl, gc.CTSTR) {
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)
			p1 := gins(arm.AMOVW, nil, &n1)
			gc.Datastring(nl.Val.U.Sval, &p1.From)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		var n1 gc.Node
		igen(nl, &n1, res)
		n1.Type = n.Type
		gmove(&n1, res)
		regfree(&n1)

	case gc.OLEN:
		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
			// map has len in the first 32-bit word.
			// a zero pointer means zero length
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)

			cgen(nl, &n1)

			var n2 gc.Node
			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
			gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)

			n2 = n1
			n2.Op = gc.OINDREG
			n2.Type = gc.Types[gc.TINT32]
			gmove(&n2, &n1)

			gc.Patch(p1, gc.Pc)

			gmove(&n1, res)
			regfree(&n1)
			break
		}

		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
			// both slice and string have len one pointer into the struct.
			var n1 gc.Node
			igen(nl, &n1, res)

			n1.Type = gc.Types[gc.TUINT32]
			n1.Xoffset += int64(gc.Array_nel)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))

	case gc.OCAP:
		if gc.Istype(nl.Type, gc.TCHAN) {
			// chan has cap in the second 32-bit word.
			// a zero pointer means zero length
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)

			cgen(nl, &n1)

			var n2 gc.Node
			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
			gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)

			n2 = n1
			n2.Op = gc.OINDREG
			n2.Xoffset = 4
			n2.Type = gc.Types[gc.TINT32]
			gmove(&n2, &n1)

			gc.Patch(p1, gc.Pc)

			gmove(&n1, res)
			regfree(&n1)
			break
		}

		if gc.Isslice(nl.Type) {
			var n1 gc.Node
			igen(nl, &n1, res)
			n1.Type = gc.Types[gc.TUINT32]
			n1.Xoffset += int64(gc.Array_cap)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))

	case gc.OADDR:
		agen(nl, res)

		// Release res so that it is available for cgen_call.
	// Pick it up again after the call.
	case gc.OCALLMETH,
		gc.OCALLFUNC:
		rg := -1

		if n.Ullman >= gc.UINF {
			if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
				rg = int(res.Val.U.Reg)
				reg[rg]--
			}
		}

		if n.Op == gc.OCALLMETH {
			gc.Cgen_callmeth(n, 0)
		} else {
			cgen_call(n, 0)
		}
		if rg >= 0 {
			reg[rg]++
		}
		cgen_callret(n, res)

	case gc.OCALLINTER:
		cgen_callinter(n, res, 0)
		cgen_callret(n, res)

	case gc.OMOD,
		gc.ODIV:
		a = optoas(int(n.Op), nl.Type)
		goto abop
	}

	return

	// TODO(kaib): use fewer registers here.
abop: // asymmetric binary
	if nl.Ullman >= nr.Ullman {
		regalloc(&n1, nl.Type, res)
		cgen(nl, &n1)
		switch n.Op {
		case gc.OADD,
			gc.OSUB,
			gc.OAND,
			gc.OOR,
			gc.OXOR:
			if gc.Smallintconst(nr) {
				n2 = *nr
				break
			}
			fallthrough

		default:
			regalloc(&n2, nr.Type, nil)
			cgen(nr, &n2)
		}
	} else {
		switch n.Op {
		case gc.OADD,
			gc.OSUB,
			gc.OAND,
			gc.OOR,
			gc.OXOR:
			if gc.Smallintconst(nr) {
				n2 = *nr
				break
			}
			fallthrough

		default:
			regalloc(&n2, nr.Type, res)
			cgen(nr, &n2)
		}

		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)
	}

	gins(a, &n2, &n1)

	// Normalize result for types smaller than word.
norm:
	if n.Type.Width < int64(gc.Widthptr) {
		switch n.Op {
		case gc.OADD,
			gc.OSUB,
			gc.OMUL,
			gc.OCOM,
			gc.OMINUS:
			gins(optoas(gc.OAS, n.Type), &n1, &n1)
		}
	}

	gmove(&n1, res)
	regfree(&n1)
	if n2.Op != gc.OLITERAL {
		regfree(&n2)
	}
	return

flt2: // binary
	var f1 gc.Node
	if nl.Ullman >= nr.Ullman {
		cgen(nl, &f0)
		regalloc(&f1, n.Type, nil)
		gmove(&f0, &f1)
		cgen(nr, &f0)
		gins(optoas(int(n.Op), n.Type), &f0, &f1)
	} else {
		cgen(nr, &f0)
		regalloc(&f1, n.Type, nil)
		cgen(nl, &f1)
		gins(optoas(int(n.Op), n.Type), &f0, &f1)
	}

	gmove(&f1, res)
	regfree(&f0)
	regfree(&f1)
	return
}
コード例 #13
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * block copy:
 *	memmove(&res, &n, w);
 * NB: character copy assumed little endian architecture
 */
func sgen(n *gc.Node, res *gc.Node, w int64) {
	if gc.Debug['g'] != 0 {
		fmt.Printf("\nsgen w=%d\n", w)
		gc.Dump("r", n)
		gc.Dump("res", res)
	}

	if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
		gc.Fatal("sgen UINF")
	}

	if w < 0 || int64(int32(w)) != w {
		gc.Fatal("sgen copy %d", w)
	}

	if n.Type == nil {
		gc.Fatal("sgen: missing type")
	}

	if w == 0 {
		// evaluate side effects only.
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], nil)

		agen(res, &dst)
		agen(n, &dst)
		regfree(&dst)
		return
	}

	// If copying .args, that's all the results, so record definition sites
	// for them for the liveness analysis.
	if res.Op == gc.ONAME && res.Sym.Name == ".args" {
		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
			if l.N.Class == gc.PPARAMOUT {
				gc.Gvardef(l.N)
			}
		}
	}

	// Avoid taking the address for simple enough types.
	if componentgen(n, res) {
		return
	}

	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op int
	switch align {
	default:
		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))

	case 1:
		op = arm.AMOVB

	case 2:
		op = arm.AMOVH

	case 4:
		op = arm.AMOVW
	}

	if w%int64(align) != 0 {
		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
	}
	c := int32(w / int64(align))

	// offset on the stack
	osrc := stkof(n)

	odst := stkof(res)
	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
		// osrc and odst both on stack, and at least one is in
		// an unknown position.  Could generate code to test
		// for forward/backward copy, but instead just copy
		// to a temporary location first.
		var tmp gc.Node
		gc.Tempname(&tmp, n.Type)

		sgen(n, &tmp, w)
		sgen(&tmp, res, w)
		return
	}

	if osrc%int32(align) != 0 || odst%int32(align) != 0 {
		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align

	if osrc < odst && int64(odst) < int64(osrc)+w {
		dir = -dir
	}

	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
		var r0 gc.Node
		r0.Op = gc.OREGISTER
		r0.Val.U.Reg = REGALLOC_R0
		var r1 gc.Node
		r1.Op = gc.OREGISTER
		r1.Val.U.Reg = REGALLOC_R0 + 1
		var r2 gc.Node
		r2.Op = gc.OREGISTER
		r2.Val.U.Reg = REGALLOC_R0 + 2

		var src gc.Node
		regalloc(&src, gc.Types[gc.Tptr], &r1)
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], &r2)
		if n.Ullman >= res.Ullman {
			// eval n first
			agen(n, &src)

			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			agen(res, &dst)
		} else {
			// eval res first
			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			agen(res, &dst)
			agen(n, &src)
		}

		var tmp gc.Node
		regalloc(&tmp, gc.Types[gc.Tptr], &r0)
		f := gc.Sysfunc("duffcopy")
		p := gins(obj.ADUFFCOPY, nil, f)
		gc.Afunclit(&p.To, f)

		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
		p.To.Offset = 8 * (128 - int64(c))

		regfree(&tmp)
		regfree(&src)
		regfree(&dst)
		return
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		agenr(n, &dst, res) // temporarily use dst
		regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(arm.AMOVW, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agenr(res, &dst, res)
		agenr(n, &src, nil)
	}

	var tmp gc.Node
	regalloc(&tmp, gc.Types[gc.TUINT32], nil)

	// set up end marker
	var nend gc.Node

	if c >= 4 {
		regalloc(&nend, gc.Types[gc.TUINT32], nil)

		p := gins(arm.AMOVW, &src, &nend)
		p.From.Type = obj.TYPE_ADDR
		if dir < 0 {
			p.From.Offset = int64(dir)
		} else {
			p.From.Offset = w
		}
	}

	// move src and dest to the end of block if necessary
	if dir < 0 {
		p := gins(arm.AMOVW, &src, &src)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)

		p = gins(arm.AMOVW, &dst, &dst)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)
	}

	// move
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		p.Scond |= arm.C_PBIT
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)
		p.Scond |= arm.C_PBIT

		p = gins(arm.ACMP, &src, nil)
		raddr(&nend, p)

		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
		regfree(&nend)
	} else {
		var p *obj.Prog
		for {
			tmp14 := c
			c--
			if tmp14 <= 0 {
				break
			}
			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)
			p.Scond |= arm.C_PBIT

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
			p.Scond |= arm.C_PBIT
		}
	}

	regfree(&dst)
	regfree(&src)
	regfree(&tmp)
}
コード例 #14
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	if(n == true) goto to;
 */
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nbgen", n)
	}

	if n == nil {
		n = gc.Nodbool(true)
	}

	if n.Ninit != nil {
		gc.Genlist(n.Ninit)
	}

	if n.Type == nil {
		gc.Convlit(&n, gc.Types[gc.TBOOL])
		if n.Type == nil {
			return
		}
	}

	et := int(n.Type.Etype)
	if et != gc.TBOOL {
		gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
		gc.Patch(gins(obj.AEND, nil, nil), to)
		return
	}

	var nr *gc.Node

	var nl *gc.Node
	switch n.Op {
	default:
		a := gc.ONE
		if !true_ {
			a = gc.OEQ
		}
		gencmp0(n, n.Type, a, likely, to)
		return

		// need to ask if it is bool?
	case gc.OLITERAL:
		if !true_ == (n.Val.U.Bval == 0) {
			gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
		}
		return

	case gc.OANDAND,
		gc.OOROR:
		if (n.Op == gc.OANDAND) == true_ {
			p1 := gc.Gbranch(obj.AJMP, nil, 0)
			p2 := gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, gc.Pc)
			bgen(n.Left, !true_, -likely, p2)
			bgen(n.Right, !true_, -likely, p2)
			p1 = gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, to)
			gc.Patch(p2, gc.Pc)
		} else {
			bgen(n.Left, true_, likely, to)
			bgen(n.Right, true_, likely, to)
		}

		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		nr = n.Right
		if nr == nil || nr.Type == nil {
			return
		}
		fallthrough

	case gc.ONOT: // unary
		nl = n.Left

		if nl == nil || nl.Type == nil {
			return
		}
	}

	switch n.Op {
	case gc.ONOT:
		bgen(nl, !true_, likely, to)
		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		a := int(n.Op)
		if !true_ {
			if gc.Isfloat[nl.Type.Etype] {
				// brcom is not valid on floats when NaN is involved.
				p1 := gc.Gbranch(arm.AB, nil, 0)

				p2 := gc.Gbranch(arm.AB, nil, 0)
				gc.Patch(p1, gc.Pc)
				ll := n.Ninit
				n.Ninit = nil
				bgen(n, true, -likely, p2)
				n.Ninit = ll
				gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
				gc.Patch(p2, gc.Pc)
				return
			}

			a = gc.Brcom(a)
			true_ = !true_
		}

		// make simplest on right
		if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
			a = gc.Brrev(a)
			r := nl
			nl = nr
			nr = r
		}

		if gc.Isslice(nl.Type) {
			// only valid to cmp darray to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal array comparison")
				break
			}

			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Xoffset += int64(gc.Array_array)
			n1.Type = gc.Types[gc.Tptr]
			gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
			regfree(&n1)
			break
		}

		if gc.Isinter(nl.Type) {
			// front end shold only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal interface comparison")
				break
			}

			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Type = gc.Types[gc.Tptr]
			n1.Xoffset += 0
			gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
			regfree(&n1)
			break
		}

		if gc.Iscomplex[nl.Type.Etype] {
			gc.Complexbool(a, nl, nr, true_, likely, to)
			break
		}

		if gc.Is64(nr.Type) {
			if nl.Addable == 0 {
				var n1 gc.Node
				gc.Tempname(&n1, nl.Type)
				cgen(nl, &n1)
				nl = &n1
			}

			if nr.Addable == 0 {
				var n2 gc.Node
				gc.Tempname(&n2, nr.Type)
				cgen(nr, &n2)
				nr = &n2
			}

			cmp64(nl, nr, a, likely, to)
			break
		}

		if nr.Op == gc.OLITERAL {
			if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
				gencmp0(nl, nl.Type, a, likely, to)
				break
			}

			if nr.Val.Ctype == gc.CTNIL {
				gencmp0(nl, nl.Type, a, likely, to)
				break
			}
		}

		a = optoas(a, nr.Type)

		if nr.Ullman >= gc.UINF {
			var n1 gc.Node
			regalloc(&n1, nl.Type, nil)
			cgen(nl, &n1)

			var tmp gc.Node
			gc.Tempname(&tmp, nl.Type)
			gmove(&n1, &tmp)
			regfree(&n1)

			var n2 gc.Node
			regalloc(&n2, nr.Type, nil)
			cgen(nr, &n2)

			regalloc(&n1, nl.Type, nil)
			cgen(&tmp, &n1)

			gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
			gc.Patch(gc.Gbranch(a, nr.Type, likely), to)

			regfree(&n1)
			regfree(&n2)
			break
		}

		var n3 gc.Node
		gc.Tempname(&n3, nl.Type)
		cgen(nl, &n3)

		var tmp gc.Node
		gc.Tempname(&tmp, nr.Type)
		cgen(nr, &tmp)

		var n1 gc.Node
		regalloc(&n1, nl.Type, nil)
		gmove(&n3, &n1)

		var n2 gc.Node
		regalloc(&n2, nr.Type, nil)
		gmove(&tmp, &n2)

		gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
		if gc.Isfloat[nl.Type.Etype] {
			if n.Op == gc.ONE {
				p1 := gc.Gbranch(arm.ABVS, nr.Type, likely)
				gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
				gc.Patch(p1, to)
			} else {
				p1 := gc.Gbranch(arm.ABVS, nr.Type, -likely)
				gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
				gc.Patch(p1, gc.Pc)
			}
		} else {
			gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
		}

		regfree(&n1)
		regfree(&n2)
	}

	return
}
コード例 #15
0
ファイル: gsubr.go プロジェクト: klueska/go-akaros
func anyregalloc() bool {
	var j int

	for i := int(0); i < len(reg); i++ {
		if reg[i] == 0 {
			goto ok
		}
		for j = 0; j < len(resvd); j++ {
			if resvd[j] == i {
				goto ok
			}
		}
		return true
	ok:
	}

	return false
}

/*
 * allocate register of type t, leave in n.
 * if o != N, o is desired fixed register.
 * caller must regfree(n).
 */
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
	if t == nil {
		gc.Fatal("regalloc: t nil")
	}
	et := int(int(gc.Simtype[t.Etype]))

	if gc.Debug['r'] != 0 {
		fixfree := int(0)
		fltfree := int(0)
		for i := int(arm64.REG_R0); i < arm64.REG_F31; i++ {
			if reg[i-arm64.REG_R0] == 0 {
				if i < arm64.REG_F0 {
					fixfree++
				} else {
					fltfree++
				}
			}
		}

		fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
	}

	var i int
	switch et {
	case gc.TINT8,
		gc.TUINT8,
		gc.TINT16,
		gc.TUINT16,
		gc.TINT32,
		gc.TUINT32,
		gc.TINT64,
		gc.TUINT64,
		gc.TPTR32,
		gc.TPTR64,
		gc.TBOOL:
		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= arm64.REGMIN && i <= arm64.REGMAX {
				goto out
			}
		}

		for i = arm64.REGMIN; i <= arm64.REGMAX; i++ {
			if reg[i-arm64.REG_R0] == 0 {
				regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
				goto out
			}
		}

		gc.Flusherrors()
		for i := int(arm64.REG_R0); i < arm64.REG_R0+arm64.NREG; i++ {
			fmt.Printf("R%d %p\n", i, regpc[i-arm64.REG_R0])
		}
		gc.Fatal("out of fixed registers")

	case gc.TFLOAT32,
		gc.TFLOAT64:
		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= arm64.FREGMIN && i <= arm64.FREGMAX {
				goto out
			}
		}

		for i = arm64.FREGMIN; i <= arm64.FREGMAX; i++ {
			if reg[i-arm64.REG_R0] == 0 {
				regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
				goto out
			}
		}

		gc.Flusherrors()
		for i := int(arm64.REG_F0); i < arm64.REG_F0+arm64.NREG; i++ {
			fmt.Printf("F%d %p\n", i, regpc[i-arm64.REG_R0])
		}
		gc.Fatal("out of floating registers")

	case gc.TCOMPLEX64,
		gc.TCOMPLEX128:
		gc.Tempname(n, t)
		return
	}

	gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
	return

out:
	reg[i-arm64.REG_R0]++
	gc.Nodreg(n, t, i)
}

func regfree(n *gc.Node) {
	if n.Op == gc.ONAME {
		return
	}
	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
		gc.Fatal("regfree: not a register")
	}
	i := int(int(n.Val.U.Reg) - arm64.REG_R0)
	if i == arm64.REGSP-arm64.REG_R0 {
		return
	}
	if i < 0 || i >= len(reg) {
		gc.Fatal("regfree: reg out of range")
	}
	if reg[i] <= 0 {
		gc.Fatal("regfree: reg not allocated")
	}
	reg[i]--
	if reg[i] == 0 {
		regpc[i] = 0
	}
}

/*
 * generate
 *	as $c, n
 */
func ginscon(as int, c int64, n2 *gc.Node) {
	var n1 gc.Node

	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)

	if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) {
		// cannot have more than 16-bit of immediate in ADD, etc.
		// instead, MOV into register first.
		var ntmp gc.Node
		regalloc(&ntmp, gc.Types[gc.TINT64], nil)

		gins(arm64.AMOVD, &n1, &ntmp)
		gins(as, &ntmp, n2)
		regfree(&ntmp)
		return
	}

	gins(as, &n1, n2)
}

/*
 * generate
 *	as n, $c (CMP)
 */
func ginscon2(as int, n2 *gc.Node, c int64) {
	var n1 gc.Node

	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)

	switch as {
	default:
		gc.Fatal("ginscon2")

	case arm64.ACMP:
		if -arm64.BIG <= c && c <= arm64.BIG {
			gcmp(as, n2, &n1)
			return
		}
	}

	// MOV n1 into register first
	var ntmp gc.Node
	regalloc(&ntmp, gc.Types[gc.TINT64], nil)

	gins(arm64.AMOVD, &n1, &ntmp)
	gcmp(as, n2, &ntmp)
	regfree(&ntmp)
}

/*
 * generate move:
 *	t = f
 * hard part is conversions.
 */
func gmove(f *gc.Node, t *gc.Node) {
	if gc.Debug['M'] != 0 {
		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
	}

	ft := int(gc.Simsimtype(f.Type))
	tt := int(gc.Simsimtype(t.Type))
	cvt := (*gc.Type)(t.Type)

	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
		gc.Complexmove(f, t)
		return
	}

	// cannot have two memory operands
	var r1 gc.Node
	var a int
	if gc.Ismem(f) && gc.Ismem(t) {
		goto hard
	}

	// convert constant to desired type
	if f.Op == gc.OLITERAL {
		var con gc.Node
		switch tt {
		default:
			gc.Convconst(&con, t.Type, &f.Val)

		case gc.TINT32,
			gc.TINT16,
			gc.TINT8:
			var con gc.Node
			gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
			var r1 gc.Node
			regalloc(&r1, con.Type, t)
			gins(arm64.AMOVD, &con, &r1)
			gmove(&r1, t)
			regfree(&r1)
			return

		case gc.TUINT32,
			gc.TUINT16,
			gc.TUINT8:
			var con gc.Node
			gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
			var r1 gc.Node
			regalloc(&r1, con.Type, t)
			gins(arm64.AMOVD, &con, &r1)
			gmove(&r1, t)
			regfree(&r1)
			return
		}

		f = &con
		ft = tt // so big switch will choose a simple mov

		// constants can't move directly to memory.
		if gc.Ismem(t) {
			goto hard
		}
	}

	// value -> value copy, first operand in memory.
	// any floating point operand requires register
	// src, so goto hard to copy to register first.
	if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
		cvt = gc.Types[ft]
		goto hard
	}

	// value -> value copy, only one memory operand.
	// figure out the instruction to use.
	// break out of switch for one-instruction gins.
	// goto rdst for "destination must be register".
	// goto hard for "convert to cvt type first".
	// otherwise handle and return.

	switch uint32(ft)<<16 | uint32(tt) {
	default:
		gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))

		/*
		 * integer copy and truncate
		 */
	case gc.TINT8<<16 | gc.TINT8, // same size
		gc.TUINT8<<16 | gc.TINT8,
		gc.TINT16<<16 | gc.TINT8,
		// truncate
		gc.TUINT16<<16 | gc.TINT8,
		gc.TINT32<<16 | gc.TINT8,
		gc.TUINT32<<16 | gc.TINT8,
		gc.TINT64<<16 | gc.TINT8,
		gc.TUINT64<<16 | gc.TINT8:
		a = arm64.AMOVB

	case gc.TINT8<<16 | gc.TUINT8, // same size
		gc.TUINT8<<16 | gc.TUINT8,
		gc.TINT16<<16 | gc.TUINT8,
		// truncate
		gc.TUINT16<<16 | gc.TUINT8,
		gc.TINT32<<16 | gc.TUINT8,
		gc.TUINT32<<16 | gc.TUINT8,
		gc.TINT64<<16 | gc.TUINT8,
		gc.TUINT64<<16 | gc.TUINT8:
		a = arm64.AMOVBU

	case gc.TINT16<<16 | gc.TINT16, // same size
		gc.TUINT16<<16 | gc.TINT16,
		gc.TINT32<<16 | gc.TINT16,
		// truncate
		gc.TUINT32<<16 | gc.TINT16,
		gc.TINT64<<16 | gc.TINT16,
		gc.TUINT64<<16 | gc.TINT16:
		a = arm64.AMOVH

	case gc.TINT16<<16 | gc.TUINT16, // same size
		gc.TUINT16<<16 | gc.TUINT16,
		gc.TINT32<<16 | gc.TUINT16,
		// truncate
		gc.TUINT32<<16 | gc.TUINT16,
		gc.TINT64<<16 | gc.TUINT16,
		gc.TUINT64<<16 | gc.TUINT16:
		a = arm64.AMOVHU

	case gc.TINT32<<16 | gc.TINT32, // same size
		gc.TUINT32<<16 | gc.TINT32,
		gc.TINT64<<16 | gc.TINT32,
		// truncate
		gc.TUINT64<<16 | gc.TINT32:
		a = arm64.AMOVW

	case gc.TINT32<<16 | gc.TUINT32, // same size
		gc.TUINT32<<16 | gc.TUINT32,
		gc.TINT64<<16 | gc.TUINT32,
		gc.TUINT64<<16 | gc.TUINT32:
		a = arm64.AMOVWU

	case gc.TINT64<<16 | gc.TINT64, // same size
		gc.TINT64<<16 | gc.TUINT64,
		gc.TUINT64<<16 | gc.TINT64,
		gc.TUINT64<<16 | gc.TUINT64:
		a = arm64.AMOVD

		/*
		 * integer up-conversions
		 */
	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
		gc.TINT8<<16 | gc.TUINT16,
		gc.TINT8<<16 | gc.TINT32,
		gc.TINT8<<16 | gc.TUINT32,
		gc.TINT8<<16 | gc.TINT64,
		gc.TINT8<<16 | gc.TUINT64:
		a = arm64.AMOVB

		goto rdst

	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
		gc.TUINT8<<16 | gc.TUINT16,
		gc.TUINT8<<16 | gc.TINT32,
		gc.TUINT8<<16 | gc.TUINT32,
		gc.TUINT8<<16 | gc.TINT64,
		gc.TUINT8<<16 | gc.TUINT64:
		a = arm64.AMOVBU

		goto rdst

	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
		gc.TINT16<<16 | gc.TUINT32,
		gc.TINT16<<16 | gc.TINT64,
		gc.TINT16<<16 | gc.TUINT64:
		a = arm64.AMOVH

		goto rdst

	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
		gc.TUINT16<<16 | gc.TUINT32,
		gc.TUINT16<<16 | gc.TINT64,
		gc.TUINT16<<16 | gc.TUINT64:
		a = arm64.AMOVHU

		goto rdst

	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
		gc.TINT32<<16 | gc.TUINT64:
		a = arm64.AMOVW

		goto rdst

	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
		gc.TUINT32<<16 | gc.TUINT64:
		a = arm64.AMOVWU

		goto rdst

	/*
	* float to integer
	 */
	case gc.TFLOAT32<<16 | gc.TINT32:
		a = arm64.AFCVTZSSW
		goto rdst

	case gc.TFLOAT64<<16 | gc.TINT32:
		a = arm64.AFCVTZSDW
		goto rdst

	case gc.TFLOAT32<<16 | gc.TINT64:
		a = arm64.AFCVTZSS
		goto rdst

	case gc.TFLOAT64<<16 | gc.TINT64:
		a = arm64.AFCVTZSD
		goto rdst

	case gc.TFLOAT32<<16 | gc.TUINT32:
		a = arm64.AFCVTZUSW
		goto rdst

	case gc.TFLOAT64<<16 | gc.TUINT32:
		a = arm64.AFCVTZUDW
		goto rdst

	case gc.TFLOAT32<<16 | gc.TUINT64:
		a = arm64.AFCVTZUS
		goto rdst

	case gc.TFLOAT64<<16 | gc.TUINT64:
		a = arm64.AFCVTZUD
		goto rdst

	case gc.TFLOAT32<<16 | gc.TINT16,
		gc.TFLOAT32<<16 | gc.TINT8,
		gc.TFLOAT64<<16 | gc.TINT16,
		gc.TFLOAT64<<16 | gc.TINT8:
		cvt = gc.Types[gc.TINT32]

		goto hard

	case gc.TFLOAT32<<16 | gc.TUINT16,
		gc.TFLOAT32<<16 | gc.TUINT8,
		gc.TFLOAT64<<16 | gc.TUINT16,
		gc.TFLOAT64<<16 | gc.TUINT8:
		cvt = gc.Types[gc.TUINT32]

		goto hard

	/*
	 * integer to float
	 */
	case gc.TINT8<<16 | gc.TFLOAT32,
		gc.TINT16<<16 | gc.TFLOAT32,
		gc.TINT32<<16 | gc.TFLOAT32:
		a = arm64.ASCVTFWS

		goto rdst

	case gc.TINT8<<16 | gc.TFLOAT64,
		gc.TINT16<<16 | gc.TFLOAT64,
		gc.TINT32<<16 | gc.TFLOAT64:
		a = arm64.ASCVTFWD

		goto rdst

	case gc.TINT64<<16 | gc.TFLOAT32:
		a = arm64.ASCVTFS
		goto rdst

	case gc.TINT64<<16 | gc.TFLOAT64:
		a = arm64.ASCVTFD
		goto rdst

	case gc.TUINT8<<16 | gc.TFLOAT32,
		gc.TUINT16<<16 | gc.TFLOAT32,
		gc.TUINT32<<16 | gc.TFLOAT32:
		a = arm64.AUCVTFWS

		goto rdst

	case gc.TUINT8<<16 | gc.TFLOAT64,
		gc.TUINT16<<16 | gc.TFLOAT64,
		gc.TUINT32<<16 | gc.TFLOAT64:
		a = arm64.AUCVTFWD

		goto rdst

	case gc.TUINT64<<16 | gc.TFLOAT32:
		a = arm64.AUCVTFS
		goto rdst

	case gc.TUINT64<<16 | gc.TFLOAT64:
		a = arm64.AUCVTFD
		goto rdst

		/*
		 * float to float
		 */
	case gc.TFLOAT32<<16 | gc.TFLOAT32:
		a = arm64.AFMOVS

	case gc.TFLOAT64<<16 | gc.TFLOAT64:
		a = arm64.AFMOVD

	case gc.TFLOAT32<<16 | gc.TFLOAT64:
		a = arm64.AFCVTSD
		goto rdst

	case gc.TFLOAT64<<16 | gc.TFLOAT32:
		a = arm64.AFCVTDS
		goto rdst
	}

	gins(a, f, t)
	return

	// requires register destination
rdst:
	regalloc(&r1, t.Type, t)

	gins(a, f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return
}

/*
 * generate one instruction:
 *	as f, t
 */
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	// TODO(austin): Add self-move test like in 6g (but be careful
	// of truncation moves)

	af := obj.Addr(obj.Addr{})

	at := obj.Addr(obj.Addr{})
	if f != nil {
		af = gc.Naddr(f)
	}
	if t != nil {
		at = gc.Naddr(t)
	}
	p := (*obj.Prog)(gc.Prog(as))
	if f != nil {
		p.From = af
	}
	if t != nil {
		p.To = at
	}
	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := int32(0)
	switch as {
	case arm64.AMOVB,
		arm64.AMOVBU:
		w = 1

	case arm64.AMOVH,
		arm64.AMOVHU:
		w = 2

	case arm64.AMOVW,
		arm64.AMOVWU:
		w = 4

	case arm64.AMOVD:
		if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR {
			break
		}
		w = 8
	}

	if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) {
		gc.Dump("f", f)
		gc.Dump("t", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
	}

	return p
}

func fixlargeoffset(n *gc.Node) {
	if n == nil {
		return
	}
	if n.Op != gc.OINDREG {
		return
	}
	if -4096 <= n.Xoffset && n.Xoffset < 4096 {
		return
	}
	a := gc.Node(*n)
	a.Op = gc.OREGISTER
	a.Type = gc.Types[gc.Tptr]
	a.Xoffset = 0
	gc.Cgen_checknil(&a)
	ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
	n.Xoffset = 0
}

/*
 * insert n into reg slot of p
 */
func raddr(n *gc.Node, p *obj.Prog) {
	var a obj.Addr

	a = gc.Naddr(n)
	if a.Type != obj.TYPE_REG {
		if n != nil {
			gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
		} else {
			gc.Fatal("bad in raddr: <null>")
		}
		p.Reg = 0
	} else {
		p.Reg = a.Reg
	}
}

func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
	if lhs.Op != gc.OREGISTER {
		gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
	}

	p := gins(as, rhs, nil)
	raddr(lhs, p)
	return p
}

/*
 * return Axxx for Oxxx on type t.
 */
func optoas(op int, t *gc.Type) int {
	if t == nil {
		gc.Fatal("optoas: t is nil")
	}

	a := int(obj.AXXX)
	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
	default:
		gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))

	case gc.OEQ<<16 | gc.TBOOL,
		gc.OEQ<<16 | gc.TINT8,
		gc.OEQ<<16 | gc.TUINT8,
		gc.OEQ<<16 | gc.TINT16,
		gc.OEQ<<16 | gc.TUINT16,
		gc.OEQ<<16 | gc.TINT32,
		gc.OEQ<<16 | gc.TUINT32,
		gc.OEQ<<16 | gc.TINT64,
		gc.OEQ<<16 | gc.TUINT64,
		gc.OEQ<<16 | gc.TPTR32,
		gc.OEQ<<16 | gc.TPTR64,
		gc.OEQ<<16 | gc.TFLOAT32,
		gc.OEQ<<16 | gc.TFLOAT64:
		a = arm64.ABEQ

	case gc.ONE<<16 | gc.TBOOL,
		gc.ONE<<16 | gc.TINT8,
		gc.ONE<<16 | gc.TUINT8,
		gc.ONE<<16 | gc.TINT16,
		gc.ONE<<16 | gc.TUINT16,
		gc.ONE<<16 | gc.TINT32,
		gc.ONE<<16 | gc.TUINT32,
		gc.ONE<<16 | gc.TINT64,
		gc.ONE<<16 | gc.TUINT64,
		gc.ONE<<16 | gc.TPTR32,
		gc.ONE<<16 | gc.TPTR64,
		gc.ONE<<16 | gc.TFLOAT32,
		gc.ONE<<16 | gc.TFLOAT64:
		a = arm64.ABNE

	case gc.OLT<<16 | gc.TINT8,
		gc.OLT<<16 | gc.TINT16,
		gc.OLT<<16 | gc.TINT32,
		gc.OLT<<16 | gc.TINT64:
		a = arm64.ABLT

	case gc.OLT<<16 | gc.TUINT8,
		gc.OLT<<16 | gc.TUINT16,
		gc.OLT<<16 | gc.TUINT32,
		gc.OLT<<16 | gc.TUINT64,
		gc.OLT<<16 | gc.TFLOAT32,
		gc.OLT<<16 | gc.TFLOAT64:
		a = arm64.ABLO

	case gc.OLE<<16 | gc.TINT8,
		gc.OLE<<16 | gc.TINT16,
		gc.OLE<<16 | gc.TINT32,
		gc.OLE<<16 | gc.TINT64:
		a = arm64.ABLE

	case gc.OLE<<16 | gc.TUINT8,
		gc.OLE<<16 | gc.TUINT16,
		gc.OLE<<16 | gc.TUINT32,
		gc.OLE<<16 | gc.TUINT64,
		gc.OLE<<16 | gc.TFLOAT32,
		gc.OLE<<16 | gc.TFLOAT64:
		a = arm64.ABLS

	case gc.OGT<<16 | gc.TINT8,
		gc.OGT<<16 | gc.TINT16,
		gc.OGT<<16 | gc.TINT32,
		gc.OGT<<16 | gc.TINT64,
		gc.OGT<<16 | gc.TFLOAT32,
		gc.OGT<<16 | gc.TFLOAT64:
		a = arm64.ABGT

	case gc.OGT<<16 | gc.TUINT8,
		gc.OGT<<16 | gc.TUINT16,
		gc.OGT<<16 | gc.TUINT32,
		gc.OGT<<16 | gc.TUINT64:
		a = arm64.ABHI

	case gc.OGE<<16 | gc.TINT8,
		gc.OGE<<16 | gc.TINT16,
		gc.OGE<<16 | gc.TINT32,
		gc.OGE<<16 | gc.TINT64,
		gc.OGE<<16 | gc.TFLOAT32,
		gc.OGE<<16 | gc.TFLOAT64:
		a = arm64.ABGE

	case gc.OGE<<16 | gc.TUINT8,
		gc.OGE<<16 | gc.TUINT16,
		gc.OGE<<16 | gc.TUINT32,
		gc.OGE<<16 | gc.TUINT64:
		a = arm64.ABHS

	case gc.OCMP<<16 | gc.TBOOL,
		gc.OCMP<<16 | gc.TINT8,
		gc.OCMP<<16 | gc.TINT16,
		gc.OCMP<<16 | gc.TINT32,
		gc.OCMP<<16 | gc.TPTR32,
		gc.OCMP<<16 | gc.TINT64,
		gc.OCMP<<16 | gc.TUINT8,
		gc.OCMP<<16 | gc.TUINT16,
		gc.OCMP<<16 | gc.TUINT32,
		gc.OCMP<<16 | gc.TUINT64,
		gc.OCMP<<16 | gc.TPTR64:
		a = arm64.ACMP

	case gc.OCMP<<16 | gc.TFLOAT32:
		a = arm64.AFCMPS

	case gc.OCMP<<16 | gc.TFLOAT64:
		a = arm64.AFCMPD

	case gc.OAS<<16 | gc.TBOOL,
		gc.OAS<<16 | gc.TINT8:
		a = arm64.AMOVB

	case gc.OAS<<16 | gc.TUINT8:
		a = arm64.AMOVBU

	case gc.OAS<<16 | gc.TINT16:
		a = arm64.AMOVH

	case gc.OAS<<16 | gc.TUINT16:
		a = arm64.AMOVHU

	case gc.OAS<<16 | gc.TINT32:
		a = arm64.AMOVW

	case gc.OAS<<16 | gc.TUINT32,
		gc.OAS<<16 | gc.TPTR32:
		a = arm64.AMOVWU

	case gc.OAS<<16 | gc.TINT64,
		gc.OAS<<16 | gc.TUINT64,
		gc.OAS<<16 | gc.TPTR64:
		a = arm64.AMOVD

	case gc.OAS<<16 | gc.TFLOAT32:
		a = arm64.AFMOVS

	case gc.OAS<<16 | gc.TFLOAT64:
		a = arm64.AFMOVD

	case gc.OADD<<16 | gc.TINT8,
		gc.OADD<<16 | gc.TUINT8,
		gc.OADD<<16 | gc.TINT16,
		gc.OADD<<16 | gc.TUINT16,
		gc.OADD<<16 | gc.TINT32,
		gc.OADD<<16 | gc.TUINT32,
		gc.OADD<<16 | gc.TPTR32,
		gc.OADD<<16 | gc.TINT64,
		gc.OADD<<16 | gc.TUINT64,
		gc.OADD<<16 | gc.TPTR64:
		a = arm64.AADD

	case gc.OADD<<16 | gc.TFLOAT32:
		a = arm64.AFADDS

	case gc.OADD<<16 | gc.TFLOAT64:
		a = arm64.AFADDD

	case gc.OSUB<<16 | gc.TINT8,
		gc.OSUB<<16 | gc.TUINT8,
		gc.OSUB<<16 | gc.TINT16,
		gc.OSUB<<16 | gc.TUINT16,
		gc.OSUB<<16 | gc.TINT32,
		gc.OSUB<<16 | gc.TUINT32,
		gc.OSUB<<16 | gc.TPTR32,
		gc.OSUB<<16 | gc.TINT64,
		gc.OSUB<<16 | gc.TUINT64,
		gc.OSUB<<16 | gc.TPTR64:
		a = arm64.ASUB

	case gc.OSUB<<16 | gc.TFLOAT32:
		a = arm64.AFSUBS

	case gc.OSUB<<16 | gc.TFLOAT64:
		a = arm64.AFSUBD

	case gc.OMINUS<<16 | gc.TINT8,
		gc.OMINUS<<16 | gc.TUINT8,
		gc.OMINUS<<16 | gc.TINT16,
		gc.OMINUS<<16 | gc.TUINT16,
		gc.OMINUS<<16 | gc.TINT32,
		gc.OMINUS<<16 | gc.TUINT32,
		gc.OMINUS<<16 | gc.TPTR32,
		gc.OMINUS<<16 | gc.TINT64,
		gc.OMINUS<<16 | gc.TUINT64,
		gc.OMINUS<<16 | gc.TPTR64:
		a = arm64.ANEG

	case gc.OMINUS<<16 | gc.TFLOAT32:
		a = arm64.AFNEGS

	case gc.OMINUS<<16 | gc.TFLOAT64:
		a = arm64.AFNEGD

	case gc.OAND<<16 | gc.TINT8,
		gc.OAND<<16 | gc.TUINT8,
		gc.OAND<<16 | gc.TINT16,
		gc.OAND<<16 | gc.TUINT16,
		gc.OAND<<16 | gc.TINT32,
		gc.OAND<<16 | gc.TUINT32,
		gc.OAND<<16 | gc.TPTR32,
		gc.OAND<<16 | gc.TINT64,
		gc.OAND<<16 | gc.TUINT64,
		gc.OAND<<16 | gc.TPTR64:
		a = arm64.AAND

	case gc.OOR<<16 | gc.TINT8,
		gc.OOR<<16 | gc.TUINT8,
		gc.OOR<<16 | gc.TINT16,
		gc.OOR<<16 | gc.TUINT16,
		gc.OOR<<16 | gc.TINT32,
		gc.OOR<<16 | gc.TUINT32,
		gc.OOR<<16 | gc.TPTR32,
		gc.OOR<<16 | gc.TINT64,
		gc.OOR<<16 | gc.TUINT64,
		gc.OOR<<16 | gc.TPTR64:
		a = arm64.AORR

	case gc.OXOR<<16 | gc.TINT8,
		gc.OXOR<<16 | gc.TUINT8,
		gc.OXOR<<16 | gc.TINT16,
		gc.OXOR<<16 | gc.TUINT16,
		gc.OXOR<<16 | gc.TINT32,
		gc.OXOR<<16 | gc.TUINT32,
		gc.OXOR<<16 | gc.TPTR32,
		gc.OXOR<<16 | gc.TINT64,
		gc.OXOR<<16 | gc.TUINT64,
		gc.OXOR<<16 | gc.TPTR64:
		a = arm64.AEOR

		// TODO(minux): handle rotates
	//case CASE(OLROT, TINT8):
	//case CASE(OLROT, TUINT8):
	//case CASE(OLROT, TINT16):
	//case CASE(OLROT, TUINT16):
	//case CASE(OLROT, TINT32):
	//case CASE(OLROT, TUINT32):
	//case CASE(OLROT, TPTR32):
	//case CASE(OLROT, TINT64):
	//case CASE(OLROT, TUINT64):
	//case CASE(OLROT, TPTR64):
	//	a = 0//???; RLDC?
	//	break;

	case gc.OLSH<<16 | gc.TINT8,
		gc.OLSH<<16 | gc.TUINT8,
		gc.OLSH<<16 | gc.TINT16,
		gc.OLSH<<16 | gc.TUINT16,
		gc.OLSH<<16 | gc.TINT32,
		gc.OLSH<<16 | gc.TUINT32,
		gc.OLSH<<16 | gc.TPTR32,
		gc.OLSH<<16 | gc.TINT64,
		gc.OLSH<<16 | gc.TUINT64,
		gc.OLSH<<16 | gc.TPTR64:
		a = arm64.ALSL

	case gc.ORSH<<16 | gc.TUINT8,
		gc.ORSH<<16 | gc.TUINT16,
		gc.ORSH<<16 | gc.TUINT32,
		gc.ORSH<<16 | gc.TPTR32,
		gc.ORSH<<16 | gc.TUINT64,
		gc.ORSH<<16 | gc.TPTR64:
		a = arm64.ALSR

	case gc.ORSH<<16 | gc.TINT8,
		gc.ORSH<<16 | gc.TINT16,
		gc.ORSH<<16 | gc.TINT32,
		gc.ORSH<<16 | gc.TINT64:
		a = arm64.AASR

		// TODO(minux): handle rotates
	//case CASE(ORROTC, TINT8):
	//case CASE(ORROTC, TUINT8):
	//case CASE(ORROTC, TINT16):
	//case CASE(ORROTC, TUINT16):
	//case CASE(ORROTC, TINT32):
	//case CASE(ORROTC, TUINT32):
	//case CASE(ORROTC, TINT64):
	//case CASE(ORROTC, TUINT64):
	//	a = 0//??? RLDC??
	//	break;

	case gc.OHMUL<<16 | gc.TINT64:
		a = arm64.ASMULH

	case gc.OHMUL<<16 | gc.TUINT64,
		gc.OHMUL<<16 | gc.TPTR64:
		a = arm64.AUMULH

	case gc.OMUL<<16 | gc.TINT8,
		gc.OMUL<<16 | gc.TINT16,
		gc.OMUL<<16 | gc.TINT32:
		a = arm64.ASMULL

	case gc.OMUL<<16 | gc.TINT64:
		a = arm64.AMUL

	case gc.OMUL<<16 | gc.TUINT8,
		gc.OMUL<<16 | gc.TUINT16,
		gc.OMUL<<16 | gc.TUINT32,
		gc.OMUL<<16 | gc.TPTR32:
		// don't use word multiply, the high 32-bit are undefined.
		a = arm64.AUMULL

	case gc.OMUL<<16 | gc.TUINT64,
		gc.OMUL<<16 | gc.TPTR64:
		a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.

	case gc.OMUL<<16 | gc.TFLOAT32:
		a = arm64.AFMULS

	case gc.OMUL<<16 | gc.TFLOAT64:
		a = arm64.AFMULD

	case gc.ODIV<<16 | gc.TINT8,
		gc.ODIV<<16 | gc.TINT16,
		gc.ODIV<<16 | gc.TINT32,
		gc.ODIV<<16 | gc.TINT64:
		a = arm64.ASDIV

	case gc.ODIV<<16 | gc.TUINT8,
		gc.ODIV<<16 | gc.TUINT16,
		gc.ODIV<<16 | gc.TUINT32,
		gc.ODIV<<16 | gc.TPTR32,
		gc.ODIV<<16 | gc.TUINT64,
		gc.ODIV<<16 | gc.TPTR64:
		a = arm64.AUDIV

	case gc.ODIV<<16 | gc.TFLOAT32:
		a = arm64.AFDIVS

	case gc.ODIV<<16 | gc.TFLOAT64:
		a = arm64.AFDIVD
	}

	return a
}

const (
	ODynam   = 1 << 0
	OAddable = 1 << 1
)

func xgen(n *gc.Node, a *gc.Node, o int) bool {
	// TODO(minux)

	return -1 != 0 /*TypeKind(100016)*/
}

func sudoclean() {
	return
}

/*
 * generate code to compute address of n,
 * a reference to a (perhaps nested) field inside
 * an array or struct.
 * return 0 on failure, 1 on success.
 * on success, leaves usable address in a.
 *
 * caller is responsible for calling sudoclean
 * after successful sudoaddable,
 * to release the register used for a.
 */
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
	// TODO(minux)

	*a = obj.Addr{}
	return false
}
コード例 #16
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * block copy:
 *	memmove(&ns, &n, w);
 */
func sgen(n *gc.Node, ns *gc.Node, w int64) {
	if gc.Debug['g'] != 0 {
		fmt.Printf("\nsgen w=%d\n", w)
		gc.Dump("r", n)
		gc.Dump("res", ns)
	}

	if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
		gc.Fatal("sgen UINF")
	}

	if w < 0 {
		gc.Fatal("sgen copy %d", w)
	}

	// If copying .args, that's all the results, so record definition sites
	// for them for the liveness analysis.
	if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
			if l.N.Class == gc.PPARAMOUT {
				gc.Gvardef(l.N)
			}
		}
	}

	// Avoid taking the address for simple enough types.
	if componentgen(n, ns) {
		return
	}

	if w == 0 {
		// evaluate side effects only
		var nodr gc.Node
		regalloc(&nodr, gc.Types[gc.Tptr], nil)

		agen(ns, &nodr)
		agen(n, &nodr)
		regfree(&nodr)
		return
	}

	// offset on the stack
	osrc := stkof(n)

	odst := stkof(ns)

	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
		// osrc and odst both on stack, and at least one is in
		// an unknown position.  Could generate code to test
		// for forward/backward copy, but instead just copy
		// to a temporary location first.
		var tmp gc.Node
		gc.Tempname(&tmp, n.Type)

		sgen(n, &tmp, w)
		sgen(&tmp, ns, w)
		return
	}

	var noddi gc.Node
	gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
	var nodsi gc.Node
	gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)

	var nodl gc.Node
	var nodr gc.Node
	if n.Ullman >= ns.Ullman {
		agenr(n, &nodr, &nodsi)
		if ns.Op == gc.ONAME {
			gc.Gvardef(ns)
		}
		agenr(ns, &nodl, &noddi)
	} else {
		if ns.Op == gc.ONAME {
			gc.Gvardef(ns)
		}
		agenr(ns, &nodl, &noddi)
		agenr(n, &nodr, &nodsi)
	}

	if nodl.Val.U.Reg != x86.REG_DI {
		gmove(&nodl, &noddi)
	}
	if nodr.Val.U.Reg != x86.REG_SI {
		gmove(&nodr, &nodsi)
	}
	regfree(&nodl)
	regfree(&nodr)

	c := w % 8 // bytes
	q := w / 8 // quads

	var oldcx gc.Node
	var cx gc.Node
	savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	if osrc < odst && odst < osrc+w {
		// reverse direction
		gins(x86.ASTD, nil, nil) // set direction flag
		if c > 0 {
			gconreg(addptr, w-1, x86.REG_SI)
			gconreg(addptr, w-1, x86.REG_DI)

			gconreg(movptr, c, x86.REG_CX)
			gins(x86.AREP, nil, nil)   // repeat
			gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
		}

		if q > 0 {
			if c > 0 {
				gconreg(addptr, -7, x86.REG_SI)
				gconreg(addptr, -7, x86.REG_DI)
			} else {
				gconreg(addptr, w-8, x86.REG_SI)
				gconreg(addptr, w-8, x86.REG_DI)
			}

			gconreg(movptr, q, x86.REG_CX)
			gins(x86.AREP, nil, nil)   // repeat
			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
		}

		// we leave with the flag clear
		gins(x86.ACLD, nil, nil)
	} else {
		// normal direction
		if q > 128 || (gc.Nacl && q >= 4) {
			gconreg(movptr, q, x86.REG_CX)
			gins(x86.AREP, nil, nil)   // repeat
			gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
		} else if q >= 4 {
			p := gins(obj.ADUFFCOPY, nil, nil)
			p.To.Type = obj.TYPE_ADDR
			p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))

			// 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
			p.To.Offset = 14 * (128 - q)
		} else if !gc.Nacl && c == 0 {
			// We don't need the MOVSQ side-effect of updating SI and DI,
			// and issuing a sequence of MOVQs directly is faster.
			nodsi.Op = gc.OINDREG

			noddi.Op = gc.OINDREG
			for q > 0 {
				gmove(&nodsi, &cx) // MOVQ x+(SI),CX
				gmove(&cx, &noddi) // MOVQ CX,x+(DI)
				nodsi.Xoffset += 8
				noddi.Xoffset += 8
				q--
			}
		} else {
			for q > 0 {
				gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
				q--
			}
		}

		// copy the remaining c bytes
		if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
			for c > 0 {
				gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
				c--
			}
		} else if w < 8 || c <= 4 {
			nodsi.Op = gc.OINDREG
			noddi.Op = gc.OINDREG
			cx.Type = gc.Types[gc.TINT32]
			nodsi.Type = gc.Types[gc.TINT32]
			noddi.Type = gc.Types[gc.TINT32]
			if c > 4 {
				nodsi.Xoffset = 0
				noddi.Xoffset = 0
				gmove(&nodsi, &cx)
				gmove(&cx, &noddi)
			}

			nodsi.Xoffset = c - 4
			noddi.Xoffset = c - 4
			gmove(&nodsi, &cx)
			gmove(&cx, &noddi)
		} else {
			nodsi.Op = gc.OINDREG
			noddi.Op = gc.OINDREG
			cx.Type = gc.Types[gc.TINT64]
			nodsi.Type = gc.Types[gc.TINT64]
			noddi.Type = gc.Types[gc.TINT64]
			nodsi.Xoffset = c - 8
			noddi.Xoffset = c - 8
			gmove(&nodsi, &cx)
			gmove(&cx, &noddi)
		}
	}

	restx(&cx, &oldcx)
}
コード例 #17
0
ファイル: gsubr.go プロジェクト: tidatida/go
/*
 * generate one instruction:
 *	as f, t
 */
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	// TODO(austin): Add self-move test like in 6g (but be careful
	// of truncation moves)

	p := gc.Prog(as)
	gc.Naddr(&p.From, f)
	gc.Naddr(&p.To, t)

	switch as {
	case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD:
		if t != nil {
			if f.Op != gc.OREGISTER {
				gc.Fatal("bad operands to gcmp")
			}
			p.From = p.To
			p.To = obj.Addr{}
			raddr(f, p)
		}
	}

	// Bad things the front end has done to us. Crash to find call stack.
	switch as {
	case arm64.AAND, arm64.AMUL:
		if p.From.Type == obj.TYPE_CONST {
			gc.Debug['h'] = 1
			gc.Fatal("bad inst: %v", p)
		}
	case arm64.ACMP:
		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
			gc.Debug['h'] = 1
			gc.Fatal("bad inst: %v", p)
		}
	}

	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := int32(0)
	switch as {
	case arm64.AMOVB,
		arm64.AMOVBU:
		w = 1

	case arm64.AMOVH,
		arm64.AMOVHU:
		w = 2

	case arm64.AMOVW,
		arm64.AMOVWU:
		w = 4

	case arm64.AMOVD:
		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
			break
		}
		w = 8
	}

	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
		gc.Dump("f", f)
		gc.Dump("t", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
	}

	return p
}
コード例 #18
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * allocate a register (reusing res if possible) and generate
 * a = &n
 * The caller must call regfree(a).
 * The generated code checks that the result is not nil.
 */
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nagenr-n", n)
	}

	nl := n.Left
	nr := n.Right

	switch n.Op {
	case gc.ODOT,
		gc.ODOTPTR,
		gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		var n1 gc.Node
		igen(n, &n1, res)
		regalloc(a, gc.Types[gc.Tptr], &n1)
		agen(&n1, a)
		regfree(&n1)

	case gc.OIND:
		cgenr(n.Left, a, res)
		gc.Cgen_checknil(a)

	case gc.OINDEX:
		freelen := 0
		w := uint64(n.Type.Width)

		// Generate the non-addressable child first.
		var n3 gc.Node
		var nlen gc.Node
		var tmp gc.Node
		var n1 gc.Node
		if nr.Addable != 0 {
			goto irad
		}
		if nl.Addable != 0 {
			cgenr(nr, &n1, nil)
			if !gc.Isconst(nl, gc.CTSTR) {
				if gc.Isfixedarray(nl.Type) {
					agenr(nl, &n3, res)
				} else {
					igen(nl, &nlen, res)
					freelen = 1
					nlen.Type = gc.Types[gc.Tptr]
					nlen.Xoffset += int64(gc.Array_array)
					regalloc(&n3, gc.Types[gc.Tptr], res)
					gmove(&nlen, &n3)
					nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
					nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
				}
			}

			goto index
		}

		gc.Tempname(&tmp, nr.Type)
		cgen(nr, &tmp)
		nr = &tmp

	irad:
		if !gc.Isconst(nl, gc.CTSTR) {
			if gc.Isfixedarray(nl.Type) {
				agenr(nl, &n3, res)
			} else {
				if nl.Addable == 0 {
					// igen will need an addressable node.
					var tmp2 gc.Node
					gc.Tempname(&tmp2, nl.Type)

					cgen(nl, &tmp2)
					nl = &tmp2
				}

				igen(nl, &nlen, res)
				freelen = 1
				nlen.Type = gc.Types[gc.Tptr]
				nlen.Xoffset += int64(gc.Array_array)
				regalloc(&n3, gc.Types[gc.Tptr], res)
				gmove(&nlen, &n3)
				nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
				nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
			}
		}

		if !gc.Isconst(nr, gc.CTINT) {
			cgenr(nr, &n1, nil)
		}

		goto index

		// &a is in &n3 (allocated in res)
		// i is in &n1 (if not constant)
		// len(a) is in nlen (if needed)
		// w is width

		// constant index
	index:
		if gc.Isconst(nr, gc.CTINT) {
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Fatal("constant string constant index") // front end should handle
			}
			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				if gc.Debug['B'] == 0 && !n.Bounded {
					var n2 gc.Node
					gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
					if gc.Smallintconst(nr) {
						gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
					} else {
						regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
						gmove(&n2, &tmp)
						gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp)
						regfree(&tmp)
					}

					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
					ginscall(gc.Panicindex, -1)
					gc.Patch(p1, gc.Pc)
				}

				regfree(&nlen)
			}

			if v*w != 0 {
				ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3)
			}
			*a = n3
			break
		}

		// type of the index
		t := gc.Types[gc.TUINT64]

		if gc.Issigned[n1.Type.Etype] {
			t = gc.Types[gc.TINT64]
		}

		var n2 gc.Node
		regalloc(&n2, t, &n1) // i
		gmove(&n1, &n2)
		regfree(&n1)

		if gc.Debug['B'] == 0 && !n.Bounded {
			// check bounds
			t = gc.Types[gc.Simtype[gc.TUINT]]

			if gc.Is64(nr.Type) {
				t = gc.Types[gc.TUINT64]
			}
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval)))
			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				if gc.Is64(nr.Type) {
					var n5 gc.Node
					regalloc(&n5, t, nil)
					gmove(&nlen, &n5)
					regfree(&nlen)
					nlen = n5
				}
			} else {
				gc.Nodconst(&nlen, t, nl.Type.Bound)
				if !gc.Smallintconst(&nlen) {
					var n5 gc.Node
					regalloc(&n5, t, nil)
					gmove(&nlen, &n5)
					nlen = n5
					freelen = 1
				}
			}

			gins(optoas(gc.OCMP, t), &n2, &nlen)
			p1 := gc.Gbranch(optoas(gc.OLT, t), nil, +1)
			ginscall(gc.Panicindex, -1)
			gc.Patch(p1, gc.Pc)
		}

		if gc.Isconst(nl, gc.CTSTR) {
			regalloc(&n3, gc.Types[gc.Tptr], res)
			p1 := gins(x86.ALEAQ, nil, &n3)
			gc.Datastring(nl.Val.U.Sval, &p1.From)
			gins(x86.AADDQ, &n2, &n3)
			goto indexdone
		}

		if w == 0 {
		} else // nothing to do
		if w == 1 || w == 2 || w == 4 || w == 8 {
			p1 := gins(x86.ALEAQ, &n2, &n3)
			p1.From.Type = obj.TYPE_MEM
			p1.From.Scale = int16(w)
			p1.From.Index = p1.From.Reg
			p1.From.Reg = p1.To.Reg
		} else {
			ginscon(optoas(gc.OMUL, t), int64(w), &n2)
			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
		}

	indexdone:
		*a = n3
		regfree(&n2)
		if freelen != 0 {
			regfree(&nlen)
		}

	default:
		regalloc(a, gc.Types[gc.Tptr], res)
		agen(n, a)
	}
}
コード例 #19
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	if(n == true) goto to;
 */
func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nbgen", n)
	}

	if n == nil {
		n = gc.Nodbool(true)
	}

	if n.Ninit != nil {
		gc.Genlist(n.Ninit)
	}

	if n.Type == nil {
		gc.Convlit(&n, gc.Types[gc.TBOOL])
		if n.Type == nil {
			return
		}
	}

	et := int(n.Type.Etype)
	if et != gc.TBOOL {
		gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
		gc.Patch(gins(obj.AEND, nil, nil), to)
		return
	}

	var nr *gc.Node

	for n.Op == gc.OCONVNOP {
		n = n.Left
		if n.Ninit != nil {
			gc.Genlist(n.Ninit)
		}
	}

	var nl *gc.Node
	switch n.Op {
	default:
		var n1 gc.Node
		regalloc(&n1, n.Type, nil)
		cgen(n, &n1)
		var n2 gc.Node
		gc.Nodconst(&n2, n.Type, 0)
		gins(optoas(gc.OCMP, n.Type), &n1, &n2)
		a := ppc64.ABNE
		if !true_ {
			a = ppc64.ABEQ
		}
		gc.Patch(gc.Gbranch(a, n.Type, likely), to)
		regfree(&n1)
		return

		// need to ask if it is bool?
	case gc.OLITERAL:
		if !true_ == (n.Val.U.Bval == 0) {
			gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
		}
		return

	case gc.OANDAND,
		gc.OOROR:
		if (n.Op == gc.OANDAND) == true_ {
			p1 := gc.Gbranch(obj.AJMP, nil, 0)
			p2 := gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, gc.Pc)
			bgen(n.Left, !true_, -likely, p2)
			bgen(n.Right, !true_, -likely, p2)
			p1 = gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, to)
			gc.Patch(p2, gc.Pc)
		} else {
			bgen(n.Left, true_, likely, to)
			bgen(n.Right, true_, likely, to)
		}

		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		nr = n.Right
		if nr == nil || nr.Type == nil {
			return
		}
		fallthrough

	case gc.ONOT: // unary
		nl = n.Left

		if nl == nil || nl.Type == nil {
			return
		}
	}

	switch n.Op {
	case gc.ONOT:
		bgen(nl, !true_, likely, to)
		return

	case gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OGT,
		gc.OLE,
		gc.OGE:
		a := int(n.Op)
		if !true_ {
			if gc.Isfloat[nr.Type.Etype] {
				// brcom is not valid on floats when NaN is involved.
				p1 := gc.Gbranch(ppc64.ABR, nil, 0)

				p2 := gc.Gbranch(ppc64.ABR, nil, 0)
				gc.Patch(p1, gc.Pc)
				ll := n.Ninit // avoid re-genning ninit
				n.Ninit = nil
				bgen(n, true, -likely, p2)
				n.Ninit = ll
				gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
				gc.Patch(p2, gc.Pc)
				return
			}

			a = gc.Brcom(a)
			true_ = !true_
		}

		// make simplest on right
		if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
			a = gc.Brrev(a)
			r := nl
			nl = nr
			nr = r
		}

		if gc.Isslice(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal slice comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Xoffset += int64(gc.Array_array)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			var n2 gc.Node
			regalloc(&n2, gc.Types[gc.Tptr], &n1)
			gmove(&n1, &n2)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
			regfree(&n2)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Isinter(nl.Type) {
			// front end should only leave cmp to literal nil
			if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
				gc.Yyerror("illegal interface comparison")
				break
			}

			a = optoas(a, gc.Types[gc.Tptr])
			var n1 gc.Node
			igen(nl, &n1, nil)
			n1.Type = gc.Types[gc.Tptr]
			var tmp gc.Node
			gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
			var n2 gc.Node
			regalloc(&n2, gc.Types[gc.Tptr], &n1)
			gmove(&n1, &n2)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
			regfree(&n2)
			gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
			regfree(&n1)
			break
		}

		if gc.Iscomplex[nl.Type.Etype] {
			gc.Complexbool(a, nl, nr, true_, likely, to)
			break
		}

		var n1 gc.Node
		var n2 gc.Node
		if nr.Ullman >= gc.UINF {
			regalloc(&n1, nl.Type, nil)
			cgen(nl, &n1)

			var tmp gc.Node
			gc.Tempname(&tmp, nl.Type)
			gmove(&n1, &tmp)
			regfree(&n1)

			regalloc(&n2, nr.Type, nil)
			cgen(nr, &n2)

			regalloc(&n1, nl.Type, nil)
			cgen(&tmp, &n1)

			goto cmp
		}

		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)

		// TODO(minux): cmpi does accept 16-bit signed immediate as p->to.
		// and cmpli accepts 16-bit unsigned immediate.
		//if(smallintconst(nr)) {
		//	gins(optoas(OCMP, nr->type), &n1, nr);
		//	patch(gbranch(optoas(a, nr->type), nr->type, likely), to);
		//	regfree(&n1);
		//	break;
		//}

		regalloc(&n2, nr.Type, nil)

		cgen(nr, &n2)

	cmp:
		l := &n1
		r := &n2
		gins(optoas(gc.OCMP, nr.Type), l, r)
		if gc.Isfloat[nr.Type.Etype] && (a == gc.OLE || a == gc.OGE) {
			// To get NaN right, must rewrite x <= y into separate x < y or x = y.
			switch a {
			case gc.OLE:
				a = gc.OLT

			case gc.OGE:
				a = gc.OGT
			}

			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
			gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to)
		} else {
			gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
		}

		regfree(&n1)
		regfree(&n2)
	}

	return
}
コード例 #20
0
ファイル: gsubr.go プロジェクト: klueska/go-akaros
func anyregalloc() bool {
	var j int

	for i := x86.REG_AX; i <= x86.REG_DI; i++ {
		if reg[i] == 0 {
			goto ok
		}
		for j = 0; j < len(resvd); j++ {
			if resvd[j] == i {
				goto ok
			}
		}
		return true
	ok:
	}

	for i := x86.REG_X0; i <= x86.REG_X7; i++ {
		if reg[i] != 0 {
			return true
		}
	}
	return false
}

/*
 * allocate register of type t, leave in n.
 * if o != N, o is desired fixed register.
 * caller must regfree(n).
 */
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
	if t == nil {
		gc.Fatal("regalloc: t nil")
	}
	et := int(gc.Simtype[t.Etype])

	var i int
	switch et {
	case gc.TINT64,
		gc.TUINT64:
		gc.Fatal("regalloc64")

	case gc.TINT8,
		gc.TUINT8,
		gc.TINT16,
		gc.TUINT16,
		gc.TINT32,
		gc.TUINT32,
		gc.TPTR32,
		gc.TPTR64,
		gc.TBOOL:
		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= x86.REG_AX && i <= x86.REG_DI {
				goto out
			}
		}

		for i = x86.REG_AX; i <= x86.REG_DI; i++ {
			if reg[i] == 0 {
				goto out
			}
		}

		fmt.Printf("registers allocated at\n")
		for i := x86.REG_AX; i <= x86.REG_DI; i++ {
			fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
		}
		gc.Fatal("out of fixed registers")
		goto err

	case gc.TFLOAT32,
		gc.TFLOAT64:
		if gc.Use_sse == 0 {
			i = x86.REG_F0
			goto out
		}

		if o != nil && o.Op == gc.OREGISTER {
			i = int(o.Val.U.Reg)
			if i >= x86.REG_X0 && i <= x86.REG_X7 {
				goto out
			}
		}

		for i = x86.REG_X0; i <= x86.REG_X7; i++ {
			if reg[i] == 0 {
				goto out
			}
		}
		fmt.Printf("registers allocated at\n")
		for i := x86.REG_X0; i <= x86.REG_X7; i++ {
			fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
		}
		gc.Fatal("out of floating registers")
	}

	gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))

err:
	gc.Nodreg(n, t, 0)
	return

out:
	if i == x86.REG_SP {
		fmt.Printf("alloc SP\n")
	}
	if reg[i] == 0 {
		regpc[i] = uint32(obj.Getcallerpc(&n))
		if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP {
			gc.Dump("regalloc-o", o)
			gc.Fatal("regalloc %v", obj.Rconv(i))
		}
	}

	reg[i]++
	gc.Nodreg(n, t, i)
}

func regfree(n *gc.Node) {
	if n.Op == gc.ONAME {
		return
	}
	if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
		gc.Fatal("regfree: not a register")
	}
	i := int(n.Val.U.Reg)
	if i == x86.REG_SP {
		return
	}
	if i < 0 || i >= len(reg) {
		gc.Fatal("regfree: reg out of range")
	}
	if reg[i] <= 0 {
		gc.Fatal("regfree: reg not allocated")
	}
	reg[i]--
	if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) {
		gc.Fatal("regfree %v", obj.Rconv(i))
	}
}

/*
 * generate
 *	as $c, reg
 */
func gconreg(as int, c int64, reg int) {
	var n1 gc.Node
	var n2 gc.Node

	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
	gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
	gins(as, &n1, &n2)
}

/*
 * swap node contents
 */
func nswap(a *gc.Node, b *gc.Node) {
	t := *a
	*a = *b
	*b = t
}

/*
 * return constant i node.
 * overwritten by next call, but useful in calls to gins.
 */

var ncon_n gc.Node

func ncon(i uint32) *gc.Node {
	if ncon_n.Type == nil {
		gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
	}
	gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
	return &ncon_n
}

var sclean [10]gc.Node

var nsclean int

/*
 * n is a 64-bit value.  fill in lo and hi to refer to its 32-bit halves.
 */
func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
	if !gc.Is64(n.Type) {
		gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
	}

	if nsclean >= len(sclean) {
		gc.Fatal("split64 clean")
	}
	sclean[nsclean].Op = gc.OEMPTY
	nsclean++
	switch n.Op {
	default:
		switch n.Op {
		default:
			var n1 gc.Node
			if !dotaddable(n, &n1) {
				igen(n, &n1, nil)
				sclean[nsclean-1] = n1
			}

			n = &n1

		case gc.ONAME:
			if n.Class == gc.PPARAMREF {
				var n1 gc.Node
				cgen(n.Heapaddr, &n1)
				sclean[nsclean-1] = n1
				n = &n1
			}

			// nothing
		case gc.OINDREG:
			break
		}

		*lo = *n
		*hi = *n
		lo.Type = gc.Types[gc.TUINT32]
		if n.Type.Etype == gc.TINT64 {
			hi.Type = gc.Types[gc.TINT32]
		} else {
			hi.Type = gc.Types[gc.TUINT32]
		}
		hi.Xoffset += 4

	case gc.OLITERAL:
		var n1 gc.Node
		gc.Convconst(&n1, n.Type, &n.Val)
		i := gc.Mpgetfix(n1.Val.U.Xval)
		gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
		i >>= 32
		if n.Type.Etype == gc.TINT64 {
			gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
		} else {
			gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
		}
	}
}

func splitclean() {
	if nsclean <= 0 {
		gc.Fatal("splitclean")
	}
	nsclean--
	if sclean[nsclean].Op != gc.OEMPTY {
		regfree(&sclean[nsclean])
	}
}

/*
 * set up nodes representing fp constants
 */
var zerof gc.Node

var two64f gc.Node

var two63f gc.Node

var bignodes_did int

func bignodes() {
	if bignodes_did != 0 {
		return
	}
	bignodes_did = 1

	two64f = *ncon(0)
	two64f.Type = gc.Types[gc.TFLOAT64]
	two64f.Val.Ctype = gc.CTFLT
	two64f.Val.U.Fval = new(gc.Mpflt)
	gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.)

	two63f = two64f
	two63f.Val.U.Fval = new(gc.Mpflt)
	gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.)

	zerof = two64f
	zerof.Val.U.Fval = new(gc.Mpflt)
	gc.Mpmovecflt(zerof.Val.U.Fval, 0)
}

func memname(n *gc.Node, t *gc.Type) {
	gc.Tempname(n, t)
	n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
	n.Orig.Sym = n.Sym
}

func gmove(f *gc.Node, t *gc.Node) {
	if gc.Debug['M'] != 0 {
		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
	}

	ft := gc.Simsimtype(f.Type)
	tt := gc.Simsimtype(t.Type)
	cvt := t.Type

	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
		gc.Complexmove(f, t)
		return
	}

	if gc.Isfloat[ft] || gc.Isfloat[tt] {
		floatmove(f, t)
		return
	}

	// cannot have two integer memory operands;
	// except 64-bit, which always copies via registers anyway.
	var r1 gc.Node
	var a int
	if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
		goto hard
	}

	// convert constant to desired type
	if f.Op == gc.OLITERAL {
		var con gc.Node
		gc.Convconst(&con, t.Type, &f.Val)
		f = &con
		ft = gc.Simsimtype(con.Type)
	}

	// value -> value copy, only one memory operand.
	// figure out the instruction to use.
	// break out of switch for one-instruction gins.
	// goto rdst for "destination must be register".
	// goto hard for "convert to cvt type first".
	// otherwise handle and return.

	switch uint32(ft)<<16 | uint32(tt) {
	default:
		// should not happen
		gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
		return

		/*
		 * integer copy and truncate
		 */
	case gc.TINT8<<16 | gc.TINT8, // same size
		gc.TINT8<<16 | gc.TUINT8,
		gc.TUINT8<<16 | gc.TINT8,
		gc.TUINT8<<16 | gc.TUINT8:
		a = x86.AMOVB

	case gc.TINT16<<16 | gc.TINT8, // truncate
		gc.TUINT16<<16 | gc.TINT8,
		gc.TINT32<<16 | gc.TINT8,
		gc.TUINT32<<16 | gc.TINT8,
		gc.TINT16<<16 | gc.TUINT8,
		gc.TUINT16<<16 | gc.TUINT8,
		gc.TINT32<<16 | gc.TUINT8,
		gc.TUINT32<<16 | gc.TUINT8:
		a = x86.AMOVB

		goto rsrc

	case gc.TINT64<<16 | gc.TINT8, // truncate low word
		gc.TUINT64<<16 | gc.TINT8,
		gc.TINT64<<16 | gc.TUINT8,
		gc.TUINT64<<16 | gc.TUINT8:
		var flo gc.Node
		var fhi gc.Node
		split64(f, &flo, &fhi)

		var r1 gc.Node
		gc.Nodreg(&r1, t.Type, x86.REG_AX)
		gmove(&flo, &r1)
		gins(x86.AMOVB, &r1, t)
		splitclean()
		return

	case gc.TINT16<<16 | gc.TINT16, // same size
		gc.TINT16<<16 | gc.TUINT16,
		gc.TUINT16<<16 | gc.TINT16,
		gc.TUINT16<<16 | gc.TUINT16:
		a = x86.AMOVW

	case gc.TINT32<<16 | gc.TINT16, // truncate
		gc.TUINT32<<16 | gc.TINT16,
		gc.TINT32<<16 | gc.TUINT16,
		gc.TUINT32<<16 | gc.TUINT16:
		a = x86.AMOVW

		goto rsrc

	case gc.TINT64<<16 | gc.TINT16, // truncate low word
		gc.TUINT64<<16 | gc.TINT16,
		gc.TINT64<<16 | gc.TUINT16,
		gc.TUINT64<<16 | gc.TUINT16:
		var flo gc.Node
		var fhi gc.Node
		split64(f, &flo, &fhi)

		var r1 gc.Node
		gc.Nodreg(&r1, t.Type, x86.REG_AX)
		gmove(&flo, &r1)
		gins(x86.AMOVW, &r1, t)
		splitclean()
		return

	case gc.TINT32<<16 | gc.TINT32, // same size
		gc.TINT32<<16 | gc.TUINT32,
		gc.TUINT32<<16 | gc.TINT32,
		gc.TUINT32<<16 | gc.TUINT32:
		a = x86.AMOVL

	case gc.TINT64<<16 | gc.TINT32, // truncate
		gc.TUINT64<<16 | gc.TINT32,
		gc.TINT64<<16 | gc.TUINT32,
		gc.TUINT64<<16 | gc.TUINT32:
		var fhi gc.Node
		var flo gc.Node
		split64(f, &flo, &fhi)

		var r1 gc.Node
		gc.Nodreg(&r1, t.Type, x86.REG_AX)
		gmove(&flo, &r1)
		gins(x86.AMOVL, &r1, t)
		splitclean()
		return

	case gc.TINT64<<16 | gc.TINT64, // same size
		gc.TINT64<<16 | gc.TUINT64,
		gc.TUINT64<<16 | gc.TINT64,
		gc.TUINT64<<16 | gc.TUINT64:
		var fhi gc.Node
		var flo gc.Node
		split64(f, &flo, &fhi)

		var tlo gc.Node
		var thi gc.Node
		split64(t, &tlo, &thi)
		if f.Op == gc.OLITERAL {
			gins(x86.AMOVL, &flo, &tlo)
			gins(x86.AMOVL, &fhi, &thi)
		} else {
			var r1 gc.Node
			gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX)
			var r2 gc.Node
			gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_DX)
			gins(x86.AMOVL, &flo, &r1)
			gins(x86.AMOVL, &fhi, &r2)
			gins(x86.AMOVL, &r1, &tlo)
			gins(x86.AMOVL, &r2, &thi)
		}

		splitclean()
		splitclean()
		return

		/*
		 * integer up-conversions
		 */
	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
		gc.TINT8<<16 | gc.TUINT16:
		a = x86.AMOVBWSX

		goto rdst

	case gc.TINT8<<16 | gc.TINT32,
		gc.TINT8<<16 | gc.TUINT32:
		a = x86.AMOVBLSX
		goto rdst

	case gc.TINT8<<16 | gc.TINT64, // convert via int32
		gc.TINT8<<16 | gc.TUINT64:
		cvt = gc.Types[gc.TINT32]

		goto hard

	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
		gc.TUINT8<<16 | gc.TUINT16:
		a = x86.AMOVBWZX

		goto rdst

	case gc.TUINT8<<16 | gc.TINT32,
		gc.TUINT8<<16 | gc.TUINT32:
		a = x86.AMOVBLZX
		goto rdst

	case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
		gc.TUINT8<<16 | gc.TUINT64:
		cvt = gc.Types[gc.TUINT32]

		goto hard

	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
		gc.TINT16<<16 | gc.TUINT32:
		a = x86.AMOVWLSX

		goto rdst

	case gc.TINT16<<16 | gc.TINT64, // convert via int32
		gc.TINT16<<16 | gc.TUINT64:
		cvt = gc.Types[gc.TINT32]

		goto hard

	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
		gc.TUINT16<<16 | gc.TUINT32:
		a = x86.AMOVWLZX

		goto rdst

	case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
		gc.TUINT16<<16 | gc.TUINT64:
		cvt = gc.Types[gc.TUINT32]

		goto hard

	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
		gc.TINT32<<16 | gc.TUINT64:
		var thi gc.Node
		var tlo gc.Node
		split64(t, &tlo, &thi)

		var flo gc.Node
		gc.Nodreg(&flo, tlo.Type, x86.REG_AX)
		var fhi gc.Node
		gc.Nodreg(&fhi, thi.Type, x86.REG_DX)
		gmove(f, &flo)
		gins(x86.ACDQ, nil, nil)
		gins(x86.AMOVL, &flo, &tlo)
		gins(x86.AMOVL, &fhi, &thi)
		splitclean()
		return

	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
		gc.TUINT32<<16 | gc.TUINT64:
		var tlo gc.Node
		var thi gc.Node
		split64(t, &tlo, &thi)

		gmove(f, &tlo)
		gins(x86.AMOVL, ncon(0), &thi)
		splitclean()
		return
	}

	gins(a, f, t)
	return

	// requires register source
rsrc:
	regalloc(&r1, f.Type, t)

	gmove(f, &r1)
	gins(a, &r1, t)
	regfree(&r1)
	return

	// requires register destination
rdst:
	{
		regalloc(&r1, t.Type, t)

		gins(a, f, &r1)
		gmove(&r1, t)
		regfree(&r1)
		return
	}

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return
}

func floatmove(f *gc.Node, t *gc.Node) {
	var r1 gc.Node

	ft := gc.Simsimtype(f.Type)
	tt := gc.Simsimtype(t.Type)
	cvt := t.Type

	// cannot have two floating point memory operands.
	if gc.Isfloat[ft] && gc.Isfloat[tt] && gc.Ismem(f) && gc.Ismem(t) {
		goto hard
	}

	// convert constant to desired type
	if f.Op == gc.OLITERAL {
		var con gc.Node
		gc.Convconst(&con, t.Type, &f.Val)
		f = &con
		ft = gc.Simsimtype(con.Type)

		// some constants can't move directly to memory.
		if gc.Ismem(t) {
			// float constants come from memory.
			if gc.Isfloat[tt] {
				goto hard
			}
		}
	}

	// value -> value copy, only one memory operand.
	// figure out the instruction to use.
	// break out of switch for one-instruction gins.
	// goto rdst for "destination must be register".
	// goto hard for "convert to cvt type first".
	// otherwise handle and return.

	switch uint32(ft)<<16 | uint32(tt) {
	default:
		if gc.Use_sse != 0 {
			floatmove_sse(f, t)
		} else {
			floatmove_387(f, t)
		}
		return

		// float to very long integer.
	case gc.TFLOAT32<<16 | gc.TINT64,
		gc.TFLOAT64<<16 | gc.TINT64:
		if f.Op == gc.OREGISTER {
			cvt = f.Type
			goto hardmem
		}

		var r1 gc.Node
		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
		if ft == gc.TFLOAT32 {
			gins(x86.AFMOVF, f, &r1)
		} else {
			gins(x86.AFMOVD, f, &r1)
		}

		// set round to zero mode during conversion
		var t1 gc.Node
		memname(&t1, gc.Types[gc.TUINT16])

		var t2 gc.Node
		memname(&t2, gc.Types[gc.TUINT16])
		gins(x86.AFSTCW, nil, &t1)
		gins(x86.AMOVW, ncon(0xf7f), &t2)
		gins(x86.AFLDCW, &t2, nil)
		if tt == gc.TINT16 {
			gins(x86.AFMOVWP, &r1, t)
		} else if tt == gc.TINT32 {
			gins(x86.AFMOVLP, &r1, t)
		} else {
			gins(x86.AFMOVVP, &r1, t)
		}
		gins(x86.AFLDCW, &t1, nil)
		return

	case gc.TFLOAT32<<16 | gc.TUINT64,
		gc.TFLOAT64<<16 | gc.TUINT64:
		if !gc.Ismem(f) {
			cvt = f.Type
			goto hardmem
		}

		bignodes()
		var f0 gc.Node
		gc.Nodreg(&f0, gc.Types[ft], x86.REG_F0)
		var f1 gc.Node
		gc.Nodreg(&f1, gc.Types[ft], x86.REG_F0+1)
		var ax gc.Node
		gc.Nodreg(&ax, gc.Types[gc.TUINT16], x86.REG_AX)

		if ft == gc.TFLOAT32 {
			gins(x86.AFMOVF, f, &f0)
		} else {
			gins(x86.AFMOVD, f, &f0)
		}

		// if 0 > v { answer = 0 }
		gins(x86.AFMOVD, &zerof, &f0)

		gins(x86.AFUCOMIP, &f0, &f1)
		p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)

		// if 1<<64 <= v { answer = 0 too }
		gins(x86.AFMOVD, &two64f, &f0)

		gins(x86.AFUCOMIP, &f0, &f1)
		p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
		gc.Patch(p1, gc.Pc)
		gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
		var thi gc.Node
		var tlo gc.Node
		split64(t, &tlo, &thi)
		gins(x86.AMOVL, ncon(0), &tlo)
		gins(x86.AMOVL, ncon(0), &thi)
		splitclean()
		p1 = gc.Gbranch(obj.AJMP, nil, 0)
		gc.Patch(p2, gc.Pc)

		// in range; algorithm is:
		//	if small enough, use native float64 -> int64 conversion.
		//	otherwise, subtract 2^63, convert, and add it back.

		// set round to zero mode during conversion
		var t1 gc.Node
		memname(&t1, gc.Types[gc.TUINT16])

		var t2 gc.Node
		memname(&t2, gc.Types[gc.TUINT16])
		gins(x86.AFSTCW, nil, &t1)
		gins(x86.AMOVW, ncon(0xf7f), &t2)
		gins(x86.AFLDCW, &t2, nil)

		// actual work
		gins(x86.AFMOVD, &two63f, &f0)

		gins(x86.AFUCOMIP, &f0, &f1)
		p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
		gins(x86.AFMOVVP, &f0, t)
		p3 := gc.Gbranch(obj.AJMP, nil, 0)
		gc.Patch(p2, gc.Pc)
		gins(x86.AFMOVD, &two63f, &f0)
		gins(x86.AFSUBDP, &f0, &f1)
		gins(x86.AFMOVVP, &f0, t)
		split64(t, &tlo, &thi)
		gins(x86.AXORL, ncon(0x80000000), &thi) // + 2^63
		gc.Patch(p3, gc.Pc)
		splitclean()

		// restore rounding mode
		gins(x86.AFLDCW, &t1, nil)

		gc.Patch(p1, gc.Pc)
		return

		/*
		 * integer to float
		 */
	case gc.TINT64<<16 | gc.TFLOAT32,
		gc.TINT64<<16 | gc.TFLOAT64:
		if t.Op == gc.OREGISTER {
			goto hardmem
		}
		var f0 gc.Node
		gc.Nodreg(&f0, t.Type, x86.REG_F0)
		gins(x86.AFMOVV, f, &f0)
		if tt == gc.TFLOAT32 {
			gins(x86.AFMOVFP, &f0, t)
		} else {
			gins(x86.AFMOVDP, &f0, t)
		}
		return

		// algorithm is:
	//	if small enough, use native int64 -> float64 conversion.
	//	otherwise, halve (rounding to odd?), convert, and double.
	case gc.TUINT64<<16 | gc.TFLOAT32,
		gc.TUINT64<<16 | gc.TFLOAT64:
		var ax gc.Node
		gc.Nodreg(&ax, gc.Types[gc.TUINT32], x86.REG_AX)

		var dx gc.Node
		gc.Nodreg(&dx, gc.Types[gc.TUINT32], x86.REG_DX)
		var cx gc.Node
		gc.Nodreg(&cx, gc.Types[gc.TUINT32], x86.REG_CX)
		var t1 gc.Node
		gc.Tempname(&t1, f.Type)
		var tlo gc.Node
		var thi gc.Node
		split64(&t1, &tlo, &thi)
		gmove(f, &t1)
		gins(x86.ACMPL, &thi, ncon(0))
		p1 := gc.Gbranch(x86.AJLT, nil, 0)

		// native
		var r1 gc.Node
		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)

		gins(x86.AFMOVV, &t1, &r1)
		if tt == gc.TFLOAT32 {
			gins(x86.AFMOVFP, &r1, t)
		} else {
			gins(x86.AFMOVDP, &r1, t)
		}
		p2 := gc.Gbranch(obj.AJMP, nil, 0)

		// simulated
		gc.Patch(p1, gc.Pc)

		gmove(&tlo, &ax)
		gmove(&thi, &dx)
		p1 = gins(x86.ASHRL, ncon(1), &ax)
		p1.From.Index = x86.REG_DX // double-width shift DX -> AX
		p1.From.Scale = 0
		gins(x86.AMOVL, ncon(0), &cx)
		gins(x86.ASETCC, nil, &cx)
		gins(x86.AORL, &cx, &ax)
		gins(x86.ASHRL, ncon(1), &dx)
		gmove(&dx, &thi)
		gmove(&ax, &tlo)
		gc.Nodreg(&r1, gc.Types[tt], x86.REG_F0)
		var r2 gc.Node
		gc.Nodreg(&r2, gc.Types[tt], x86.REG_F0+1)
		gins(x86.AFMOVV, &t1, &r1)
		gins(x86.AFMOVD, &r1, &r1)
		gins(x86.AFADDDP, &r1, &r2)
		if tt == gc.TFLOAT32 {
			gins(x86.AFMOVFP, &r1, t)
		} else {
			gins(x86.AFMOVDP, &r1, t)
		}
		gc.Patch(p2, gc.Pc)
		splitclean()
		return
	}

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return

	// requires memory intermediate
hardmem:
	gc.Tempname(&r1, cvt)

	gmove(f, &r1)
	gmove(&r1, t)
	return
}

func floatmove_387(f *gc.Node, t *gc.Node) {
	var r1 gc.Node
	var a int

	ft := gc.Simsimtype(f.Type)
	tt := gc.Simsimtype(t.Type)
	cvt := t.Type

	switch uint32(ft)<<16 | uint32(tt) {
	default:
		goto fatal

		/*
		* float to integer
		 */
	case gc.TFLOAT32<<16 | gc.TINT16,
		gc.TFLOAT32<<16 | gc.TINT32,
		gc.TFLOAT32<<16 | gc.TINT64,
		gc.TFLOAT64<<16 | gc.TINT16,
		gc.TFLOAT64<<16 | gc.TINT32,
		gc.TFLOAT64<<16 | gc.TINT64:
		if t.Op == gc.OREGISTER {
			goto hardmem
		}
		var r1 gc.Node
		gc.Nodreg(&r1, gc.Types[ft], x86.REG_F0)
		if f.Op != gc.OREGISTER {
			if ft == gc.TFLOAT32 {
				gins(x86.AFMOVF, f, &r1)
			} else {
				gins(x86.AFMOVD, f, &r1)
			}
		}

		// set round to zero mode during conversion
		var t1 gc.Node
		memname(&t1, gc.Types[gc.TUINT16])

		var t2 gc.Node
		memname(&t2, gc.Types[gc.TUINT16])
		gins(x86.AFSTCW, nil, &t1)
		gins(x86.AMOVW, ncon(0xf7f), &t2)
		gins(x86.AFLDCW, &t2, nil)
		if tt == gc.TINT16 {
			gins(x86.AFMOVWP, &r1, t)
		} else if tt == gc.TINT32 {
			gins(x86.AFMOVLP, &r1, t)
		} else {
			gins(x86.AFMOVVP, &r1, t)
		}
		gins(x86.AFLDCW, &t1, nil)
		return

		// convert via int32.
	case gc.TFLOAT32<<16 | gc.TINT8,
		gc.TFLOAT32<<16 | gc.TUINT16,
		gc.TFLOAT32<<16 | gc.TUINT8,
		gc.TFLOAT64<<16 | gc.TINT8,
		gc.TFLOAT64<<16 | gc.TUINT16,
		gc.TFLOAT64<<16 | gc.TUINT8:
		var t1 gc.Node
		gc.Tempname(&t1, gc.Types[gc.TINT32])

		gmove(f, &t1)
		switch tt {
		default:
			gc.Fatal("gmove %v", gc.Nconv(t, 0))

		case gc.TINT8:
			gins(x86.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
			gins(x86.ACMPL, &t1, ncon(0x7f))
			p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
			p3 := gc.Gbranch(obj.AJMP, nil, 0)
			gc.Patch(p1, gc.Pc)
			gc.Patch(p2, gc.Pc)
			gmove(ncon(-0x80&(1<<32-1)), &t1)
			gc.Patch(p3, gc.Pc)
			gmove(&t1, t)

		case gc.TUINT8:
			gins(x86.ATESTL, ncon(0xffffff00), &t1)
			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
			gins(x86.AMOVL, ncon(0), &t1)
			gc.Patch(p1, gc.Pc)
			gmove(&t1, t)

		case gc.TUINT16:
			gins(x86.ATESTL, ncon(0xffff0000), &t1)
			p1 := gc.Gbranch(x86.AJEQ, nil, +1)
			gins(x86.AMOVL, ncon(0), &t1)
			gc.Patch(p1, gc.Pc)
			gmove(&t1, t)
		}

		return

		// convert via int64.
	case gc.TFLOAT32<<16 | gc.TUINT32,
		gc.TFLOAT64<<16 | gc.TUINT32:
		cvt = gc.Types[gc.TINT64]

		goto hardmem

		/*
		 * integer to float
		 */
	case gc.TINT16<<16 | gc.TFLOAT32,
		gc.TINT16<<16 | gc.TFLOAT64,
		gc.TINT32<<16 | gc.TFLOAT32,
		gc.TINT32<<16 | gc.TFLOAT64,
		gc.TINT64<<16 | gc.TFLOAT32,
		gc.TINT64<<16 | gc.TFLOAT64:
		if t.Op != gc.OREGISTER {
			goto hard
		}
		if f.Op == gc.OREGISTER {
			cvt = f.Type
			goto hardmem
		}

		switch ft {
		case gc.TINT16:
			a = x86.AFMOVW

		case gc.TINT32:
			a = x86.AFMOVL

		default:
			a = x86.AFMOVV
		}

		// convert via int32 memory
	case gc.TINT8<<16 | gc.TFLOAT32,
		gc.TINT8<<16 | gc.TFLOAT64,
		gc.TUINT16<<16 | gc.TFLOAT32,
		gc.TUINT16<<16 | gc.TFLOAT64,
		gc.TUINT8<<16 | gc.TFLOAT32,
		gc.TUINT8<<16 | gc.TFLOAT64:
		cvt = gc.Types[gc.TINT32]

		goto hardmem

		// convert via int64 memory
	case gc.TUINT32<<16 | gc.TFLOAT32,
		gc.TUINT32<<16 | gc.TFLOAT64:
		cvt = gc.Types[gc.TINT64]

		goto hardmem

		// The way the code generator uses floating-point
	// registers, a move from F0 to F0 is intended as a no-op.
	// On the x86, it's not: it pushes a second copy of F0
	// on the floating point stack.  So toss it away here.
	// Also, F0 is the *only* register we ever evaluate
	// into, so we should only see register/register as F0/F0.
	/*
	 * float to float
	 */
	case gc.TFLOAT32<<16 | gc.TFLOAT32,
		gc.TFLOAT64<<16 | gc.TFLOAT64:
		if gc.Ismem(f) && gc.Ismem(t) {
			goto hard
		}
		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
			if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 {
				goto fatal
			}
			return
		}

		a = x86.AFMOVF
		if ft == gc.TFLOAT64 {
			a = x86.AFMOVD
		}
		if gc.Ismem(t) {
			if f.Op != gc.OREGISTER || f.Val.U.Reg != x86.REG_F0 {
				gc.Fatal("gmove %v", gc.Nconv(f, 0))
			}
			a = x86.AFMOVFP
			if ft == gc.TFLOAT64 {
				a = x86.AFMOVDP
			}
		}

	case gc.TFLOAT32<<16 | gc.TFLOAT64:
		if gc.Ismem(f) && gc.Ismem(t) {
			goto hard
		}
		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
			if f.Val.U.Reg != x86.REG_F0 || t.Val.U.Reg != x86.REG_F0 {
				goto fatal
			}
			return
		}

		if f.Op == gc.OREGISTER {
			gins(x86.AFMOVDP, f, t)
		} else {
			gins(x86.AFMOVF, f, t)
		}
		return

	case gc.TFLOAT64<<16 | gc.TFLOAT32:
		if gc.Ismem(f) && gc.Ismem(t) {
			goto hard
		}
		if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
			var r1 gc.Node
			gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
			gins(x86.AFMOVFP, f, &r1)
			gins(x86.AFMOVF, &r1, t)
			return
		}

		if f.Op == gc.OREGISTER {
			gins(x86.AFMOVFP, f, t)
		} else {
			gins(x86.AFMOVD, f, t)
		}
		return
	}

	gins(a, f, t)
	return

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return

	// requires memory intermediate
hardmem:
	gc.Tempname(&r1, cvt)

	gmove(f, &r1)
	gmove(&r1, t)
	return

	// should not happen
fatal:
	gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))

	return
}

func floatmove_sse(f *gc.Node, t *gc.Node) {
	var r1 gc.Node
	var cvt *gc.Type
	var a int

	ft := gc.Simsimtype(f.Type)
	tt := gc.Simsimtype(t.Type)

	switch uint32(ft)<<16 | uint32(tt) {
	// should not happen
	default:
		gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))

		return

		// convert via int32.
	/*
	* float to integer
	 */
	case gc.TFLOAT32<<16 | gc.TINT16,
		gc.TFLOAT32<<16 | gc.TINT8,
		gc.TFLOAT32<<16 | gc.TUINT16,
		gc.TFLOAT32<<16 | gc.TUINT8,
		gc.TFLOAT64<<16 | gc.TINT16,
		gc.TFLOAT64<<16 | gc.TINT8,
		gc.TFLOAT64<<16 | gc.TUINT16,
		gc.TFLOAT64<<16 | gc.TUINT8:
		cvt = gc.Types[gc.TINT32]

		goto hard

		// convert via int64.
	case gc.TFLOAT32<<16 | gc.TUINT32,
		gc.TFLOAT64<<16 | gc.TUINT32:
		cvt = gc.Types[gc.TINT64]

		goto hardmem

	case gc.TFLOAT32<<16 | gc.TINT32:
		a = x86.ACVTTSS2SL
		goto rdst

	case gc.TFLOAT64<<16 | gc.TINT32:
		a = x86.ACVTTSD2SL
		goto rdst

		// convert via int32 memory
	/*
	 * integer to float
	 */
	case gc.TINT8<<16 | gc.TFLOAT32,
		gc.TINT8<<16 | gc.TFLOAT64,
		gc.TINT16<<16 | gc.TFLOAT32,
		gc.TINT16<<16 | gc.TFLOAT64,
		gc.TUINT16<<16 | gc.TFLOAT32,
		gc.TUINT16<<16 | gc.TFLOAT64,
		gc.TUINT8<<16 | gc.TFLOAT32,
		gc.TUINT8<<16 | gc.TFLOAT64:
		cvt = gc.Types[gc.TINT32]

		goto hard

		// convert via int64 memory
	case gc.TUINT32<<16 | gc.TFLOAT32,
		gc.TUINT32<<16 | gc.TFLOAT64:
		cvt = gc.Types[gc.TINT64]

		goto hardmem

	case gc.TINT32<<16 | gc.TFLOAT32:
		a = x86.ACVTSL2SS
		goto rdst

	case gc.TINT32<<16 | gc.TFLOAT64:
		a = x86.ACVTSL2SD
		goto rdst

		/*
		 * float to float
		 */
	case gc.TFLOAT32<<16 | gc.TFLOAT32:
		a = x86.AMOVSS

	case gc.TFLOAT64<<16 | gc.TFLOAT64:
		a = x86.AMOVSD

	case gc.TFLOAT32<<16 | gc.TFLOAT64:
		a = x86.ACVTSS2SD
		goto rdst

	case gc.TFLOAT64<<16 | gc.TFLOAT32:
		a = x86.ACVTSD2SS
		goto rdst
	}

	gins(a, f, t)
	return

	// requires register intermediate
hard:
	regalloc(&r1, cvt, t)

	gmove(f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return

	// requires memory intermediate
hardmem:
	gc.Tempname(&r1, cvt)

	gmove(f, &r1)
	gmove(&r1, t)
	return

	// requires register destination
rdst:
	regalloc(&r1, t.Type, t)

	gins(a, f, &r1)
	gmove(&r1, t)
	regfree(&r1)
	return
}

func samaddr(f *gc.Node, t *gc.Node) bool {
	if f.Op != t.Op {
		return false
	}

	switch f.Op {
	case gc.OREGISTER:
		if f.Val.U.Reg != t.Val.U.Reg {
			break
		}
		return true
	}

	return false
}

/*
 * generate one instruction:
 *	as f, t
 */
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
		gc.Fatal("gins MOVF reg, reg")
	}
	if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
		gc.Fatal("gins CVTSD2SS const")
	}
	if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == x86.REG_F0 {
		gc.Fatal("gins MOVSD into F0")
	}

	switch as {
	case x86.AMOVB,
		x86.AMOVW,
		x86.AMOVL:
		if f != nil && t != nil && samaddr(f, t) {
			return nil
		}

	case x86.ALEAL:
		if f != nil && gc.Isconst(f, gc.CTNIL) {
			gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
		}
	}

	var af obj.Addr
	var at obj.Addr
	if f != nil {
		af = gc.Naddr(f)
	}
	if t != nil {
		at = gc.Naddr(t)
	}
	p := gc.Prog(as)
	if f != nil {
		p.From = af
	}
	if t != nil {
		p.To = at
	}
	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := 0
	switch as {
	case x86.AMOVB:
		w = 1

	case x86.AMOVW:
		w = 2

	case x86.AMOVL:
		w = 4
	}

	if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) {
		gc.Dump("bad width from:", f)
		gc.Dump("bad width to:", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
	}

	if p.To.Type == obj.TYPE_ADDR && w > 0 {
		gc.Fatal("bad use of addr: %v", p)
	}

	return p
}

func dotaddable(n *gc.Node, n1 *gc.Node) bool {
	if n.Op != gc.ODOT {
		return false
	}

	var oary [10]int64
	var nn *gc.Node
	o := gc.Dotoffset(n, oary[:], &nn)
	if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
		*n1 = *nn
		n1.Type = n.Type
		n1.Xoffset += oary[0]
		return true
	}

	return false
}

func sudoclean() {
}

func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
	*a = obj.Addr{}
	return false
}
コード例 #21
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	res = n;
 * simplifies and calls gmove.
 */
func cgen(n *gc.Node, res *gc.Node) {
	//print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
	if gc.Debug['g'] != 0 {
		gc.Dump("\ncgen-n", n)
		gc.Dump("cgen-res", res)
	}

	if n == nil || n.Type == nil {
		return
	}

	if res == nil || res.Type == nil {
		gc.Fatal("cgen: res nil")
	}

	for n.Op == gc.OCONVNOP {
		n = n.Left
	}

	switch n.Op {
	case gc.OSLICE,
		gc.OSLICEARR,
		gc.OSLICESTR,
		gc.OSLICE3,
		gc.OSLICE3ARR:
		if res.Op != gc.ONAME || res.Addable == 0 {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			gc.Cgen_slice(n, &n1)
			cgen(&n1, res)
		} else {
			gc.Cgen_slice(n, res)
		}
		return

	case gc.OEFACE:
		if res.Op != gc.ONAME || res.Addable == 0 {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			gc.Cgen_eface(n, &n1)
			cgen(&n1, res)
		} else {
			gc.Cgen_eface(n, res)
		}
		return
	}

	if n.Ullman >= gc.UINF {
		if n.Op == gc.OINDREG {
			gc.Fatal("cgen: this is going to misscompile")
		}
		if res.Ullman >= gc.UINF {
			var n1 gc.Node
			gc.Tempname(&n1, n.Type)
			cgen(n, &n1)
			cgen(&n1, res)
			return
		}
	}

	if gc.Isfat(n.Type) {
		if n.Type.Width < 0 {
			gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
		}
		sgen(n, res, n.Type.Width)
		return
	}

	if res.Addable == 0 {
		if n.Ullman > res.Ullman {
			var n1 gc.Node
			regalloc(&n1, n.Type, res)
			cgen(n, &n1)
			if n1.Ullman > res.Ullman {
				gc.Dump("n1", &n1)
				gc.Dump("res", res)
				gc.Fatal("loop in cgen")
			}

			cgen(&n1, res)
			regfree(&n1)
			return
		}

		var f int
		if res.Ullman >= gc.UINF {
			goto gen
		}

		if gc.Complexop(n, res) {
			gc.Complexgen(n, res)
			return
		}

		f = 1 // gen thru register
		switch n.Op {
		case gc.OLITERAL:
			if gc.Smallintconst(n) {
				f = 0
			}

		case gc.OREGISTER:
			f = 0
		}

		if !gc.Iscomplex[n.Type.Etype] {
			a := optoas(gc.OAS, res.Type)
			var addr obj.Addr
			if sudoaddable(a, res, &addr) {
				var p1 *obj.Prog
				if f != 0 {
					var n2 gc.Node
					regalloc(&n2, res.Type, nil)
					cgen(n, &n2)
					p1 = gins(a, &n2, nil)
					regfree(&n2)
				} else {
					p1 = gins(a, n, nil)
				}
				p1.To = addr
				if gc.Debug['g'] != 0 {
					fmt.Printf("%v [ignore previous line]\n", p1)
				}
				sudoclean()
				return
			}
		}

	gen:
		var n1 gc.Node
		igen(res, &n1, nil)
		cgen(n, &n1)
		regfree(&n1)
		return
	}

	// update addressability for string, slice
	// can't do in walk because n->left->addable
	// changes if n->left is an escaping local variable.
	switch n.Op {
	case gc.OSPTR,
		gc.OLEN:
		if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
			n.Addable = n.Left.Addable
		}

	case gc.OCAP:
		if gc.Isslice(n.Left.Type) {
			n.Addable = n.Left.Addable
		}

	case gc.OITAB:
		n.Addable = n.Left.Addable
	}

	if gc.Complexop(n, res) {
		gc.Complexgen(n, res)
		return
	}

	// if both are addressable, move
	if n.Addable != 0 {
		if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
			gmove(n, res)
		} else {
			var n1 gc.Node
			regalloc(&n1, n.Type, nil)
			gmove(n, &n1)
			cgen(&n1, res)
			regfree(&n1)
		}

		return
	}

	nl := n.Left
	nr := n.Right

	if nl != nil && nl.Ullman >= gc.UINF {
		if nr != nil && nr.Ullman >= gc.UINF {
			var n1 gc.Node
			gc.Tempname(&n1, nl.Type)
			cgen(nl, &n1)
			n2 := *n
			n2.Left = &n1
			cgen(&n2, res)
			return
		}
	}

	if !gc.Iscomplex[n.Type.Etype] {
		a := optoas(gc.OAS, n.Type)
		var addr obj.Addr
		if sudoaddable(a, n, &addr) {
			if res.Op == gc.OREGISTER {
				p1 := gins(a, nil, res)
				p1.From = addr
			} else {
				var n2 gc.Node
				regalloc(&n2, n.Type, nil)
				p1 := gins(a, nil, &n2)
				p1.From = addr
				gins(a, &n2, res)
				regfree(&n2)
			}

			sudoclean()
			return
		}
	}

	// TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
	// OGE, OLE, and ONE ourselves.
	// if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;

	var a int
	switch n.Op {
	default:
		gc.Dump("cgen", n)
		gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))

		// these call bgen to get a bool value
	case gc.OOROR,
		gc.OANDAND,
		gc.OEQ,
		gc.ONE,
		gc.OLT,
		gc.OLE,
		gc.OGE,
		gc.OGT,
		gc.ONOT:
		p1 := gc.Gbranch(ppc64.ABR, nil, 0)

		p2 := gc.Pc
		gmove(gc.Nodbool(true), res)
		p3 := gc.Gbranch(ppc64.ABR, nil, 0)
		gc.Patch(p1, gc.Pc)
		bgen(n, true, 0, p2)
		gmove(gc.Nodbool(false), res)
		gc.Patch(p3, gc.Pc)
		return

	case gc.OPLUS:
		cgen(nl, res)
		return

		// unary
	case gc.OCOM:
		a := optoas(gc.OXOR, nl.Type)

		var n1 gc.Node
		regalloc(&n1, nl.Type, nil)
		cgen(nl, &n1)
		var n2 gc.Node
		gc.Nodconst(&n2, nl.Type, -1)
		gins(a, &n2, &n1)
		gmove(&n1, res)
		regfree(&n1)
		return

	case gc.OMINUS:
		if gc.Isfloat[nl.Type.Etype] {
			nr = gc.Nodintconst(-1)
			gc.Convlit(&nr, n.Type)
			a = optoas(gc.OMUL, nl.Type)
			goto sbop
		}

		a := optoas(int(n.Op), nl.Type)
		// unary
		var n1 gc.Node
		regalloc(&n1, nl.Type, res)

		cgen(nl, &n1)
		gins(a, nil, &n1)
		gmove(&n1, res)
		regfree(&n1)
		return

		// symmetric binary
	case gc.OAND,
		gc.OOR,
		gc.OXOR,
		gc.OADD,
		gc.OMUL:
		a = optoas(int(n.Op), nl.Type)

		goto sbop

		// asymmetric binary
	case gc.OSUB:
		a = optoas(int(n.Op), nl.Type)

		goto abop

	case gc.OHMUL:
		cgen_hmul(nl, nr, res)

	case gc.OCONV:
		if n.Type.Width > nl.Type.Width {
			// If loading from memory, do conversion during load,
			// so as to avoid use of 8-bit register in, say, int(*byteptr).
			switch nl.Op {
			case gc.ODOT,
				gc.ODOTPTR,
				gc.OINDEX,
				gc.OIND,
				gc.ONAME:
				var n1 gc.Node
				igen(nl, &n1, res)
				var n2 gc.Node
				regalloc(&n2, n.Type, res)
				gmove(&n1, &n2)
				gmove(&n2, res)
				regfree(&n2)
				regfree(&n1)
				return
			}
		}

		var n1 gc.Node
		regalloc(&n1, nl.Type, res)
		var n2 gc.Node
		regalloc(&n2, n.Type, &n1)
		cgen(nl, &n1)

		// if we do the conversion n1 -> n2 here
		// reusing the register, then gmove won't
		// have to allocate its own register.
		gmove(&n1, &n2)

		gmove(&n2, res)
		regfree(&n2)
		regfree(&n1)

	case gc.ODOT,
		gc.ODOTPTR,
		gc.OINDEX,
		gc.OIND,
		gc.ONAME: // PHEAP or PPARAMREF var
		var n1 gc.Node
		igen(n, &n1, res)

		gmove(&n1, res)
		regfree(&n1)

		// interface table is first word of interface value
	case gc.OITAB:
		var n1 gc.Node
		igen(nl, &n1, res)

		n1.Type = n.Type
		gmove(&n1, res)
		regfree(&n1)

		// pointer is the first word of string or slice.
	case gc.OSPTR:
		if gc.Isconst(nl, gc.CTSTR) {
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)
			p1 := gins(ppc64.AMOVD, nil, &n1)
			gc.Datastring(nl.Val.U.Sval, &p1.From)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		var n1 gc.Node
		igen(nl, &n1, res)
		n1.Type = n.Type
		gmove(&n1, res)
		regfree(&n1)

	case gc.OLEN:
		if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
			// map and chan have len in the first int-sized word.
			// a zero pointer means zero length
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)

			cgen(nl, &n1)

			var n2 gc.Node
			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)

			n2 = n1
			n2.Op = gc.OINDREG
			n2.Type = gc.Types[gc.Simtype[gc.TINT]]
			gmove(&n2, &n1)

			gc.Patch(p1, gc.Pc)

			gmove(&n1, res)
			regfree(&n1)
			break
		}

		if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
			// both slice and string have len one pointer into the struct.
			// a zero pointer means zero length
			var n1 gc.Node
			igen(nl, &n1, res)

			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
			n1.Xoffset += int64(gc.Array_nel)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))

	case gc.OCAP:
		if gc.Istype(nl.Type, gc.TCHAN) {
			// chan has cap in the second int-sized word.
			// a zero pointer means zero length
			var n1 gc.Node
			regalloc(&n1, gc.Types[gc.Tptr], res)

			cgen(nl, &n1)

			var n2 gc.Node
			gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
			gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
			p1 := gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)

			n2 = n1
			n2.Op = gc.OINDREG
			n2.Xoffset = int64(gc.Widthint)
			n2.Type = gc.Types[gc.Simtype[gc.TINT]]
			gmove(&n2, &n1)

			gc.Patch(p1, gc.Pc)

			gmove(&n1, res)
			regfree(&n1)
			break
		}

		if gc.Isslice(nl.Type) {
			var n1 gc.Node
			igen(nl, &n1, res)
			n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
			n1.Xoffset += int64(gc.Array_cap)
			gmove(&n1, res)
			regfree(&n1)
			break
		}

		gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))

	case gc.OADDR:
		if n.Bounded { // let race detector avoid nil checks
			gc.Disable_checknil++
		}
		agen(nl, res)
		if n.Bounded {
			gc.Disable_checknil--
		}

	case gc.OCALLMETH:
		gc.Cgen_callmeth(n, 0)
		cgen_callret(n, res)

	case gc.OCALLINTER:
		cgen_callinter(n, res, 0)
		cgen_callret(n, res)

	case gc.OCALLFUNC:
		cgen_call(n, 0)
		cgen_callret(n, res)

	case gc.OMOD,
		gc.ODIV:
		if gc.Isfloat[n.Type.Etype] {
			a = optoas(int(n.Op), nl.Type)
			goto abop
		}

		if nl.Ullman >= nr.Ullman {
			var n1 gc.Node
			regalloc(&n1, nl.Type, res)
			cgen(nl, &n1)
			cgen_div(int(n.Op), &n1, nr, res)
			regfree(&n1)
		} else {
			var n2 gc.Node
			if !gc.Smallintconst(nr) {
				regalloc(&n2, nr.Type, res)
				cgen(nr, &n2)
			} else {
				n2 = *nr
			}

			cgen_div(int(n.Op), nl, &n2, res)
			if n2.Op != gc.OLITERAL {
				regfree(&n2)
			}
		}

	case gc.OLSH,
		gc.ORSH,
		gc.OLROT:
		cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
	}

	return

	/*
	 * put simplest on right - we'll generate into left
	 * and then adjust it using the computation of right.
	 * constants and variables have the same ullman
	 * count, so look for constants specially.
	 *
	 * an integer constant we can use as an immediate
	 * is simpler than a variable - we can use the immediate
	 * in the adjustment instruction directly - so it goes
	 * on the right.
	 *
	 * other constants, like big integers or floating point
	 * constants, require a mov into a register, so those
	 * might as well go on the left, so we can reuse that
	 * register for the computation.
	 */
sbop: // symmetric binary
	if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
		r := nl
		nl = nr
		nr = r
	}

abop: // asymmetric binary
	var n1 gc.Node
	var n2 gc.Node
	if nl.Ullman >= nr.Ullman {
		regalloc(&n1, nl.Type, res)
		cgen(nl, &n1)

		/*
			 * This generates smaller code - it avoids a MOV - but it's
			 * easily 10% slower due to not being able to
			 * optimize/manipulate the move.
			 * To see, run: go test -bench . crypto/md5
			 * with and without.
			 *
				if(sudoaddable(a, nr, &addr)) {
					p1 = gins(a, N, &n1);
					p1->from = addr;
					gmove(&n1, res);
					sudoclean();
					regfree(&n1);
					goto ret;
				}
			 *
		*/
		// TODO(minux): enable using constants directly in certain instructions.
		//if(smallintconst(nr))
		//	n2 = *nr;
		//else {
		regalloc(&n2, nr.Type, nil)

		cgen(nr, &n2)
	} else //}
	{
		//if(smallintconst(nr))
		//	n2 = *nr;
		//else {
		regalloc(&n2, nr.Type, res)

		cgen(nr, &n2)

		//}
		regalloc(&n1, nl.Type, nil)

		cgen(nl, &n1)
	}

	gins(a, &n2, &n1)

	// Normalize result for types smaller than word.
	if n.Type.Width < int64(gc.Widthreg) {
		switch n.Op {
		case gc.OADD,
			gc.OSUB,
			gc.OMUL,
			gc.OLSH:
			gins(optoas(gc.OAS, n.Type), &n1, &n1)
		}
	}

	gmove(&n1, res)
	regfree(&n1)
	if n2.Op != gc.OLITERAL {
		regfree(&n2)
	}
	return
}
コード例 #22
0
ファイル: cgen64.go プロジェクト: tidatida/go
/*
 * attempt to generate 64-bit
 *	res = n
 * return 1 on success, 0 if op not handled.
 */
func cgen64(n *gc.Node, res *gc.Node) {
	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
		gc.Dump("n", n)
		gc.Dump("res", res)
		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
	}

	l := n.Left
	var t1 gc.Node
	if l.Addable == 0 {
		gc.Tempname(&t1, l.Type)
		gc.Cgen(l, &t1)
		l = &t1
	}

	var hi1 gc.Node
	var lo1 gc.Node
	split64(l, &lo1, &hi1)
	switch n.Op {
	default:
		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))

	case gc.OMINUS:
		var lo2 gc.Node
		var hi2 gc.Node
		split64(res, &lo2, &hi2)

		gc.Regalloc(&t1, lo1.Type, nil)
		var al gc.Node
		gc.Regalloc(&al, lo1.Type, nil)
		var ah gc.Node
		gc.Regalloc(&ah, hi1.Type, nil)

		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)

		gmove(ncon(0), &t1)
		p1 := gins(arm.ASUB, &al, &t1)
		p1.Scond |= arm.C_SBIT
		gins(arm.AMOVW, &t1, &lo2)

		gmove(ncon(0), &t1)
		gins(arm.ASBC, &ah, &t1)
		gins(arm.AMOVW, &t1, &hi2)

		gc.Regfree(&t1)
		gc.Regfree(&al)
		gc.Regfree(&ah)
		splitclean()
		splitclean()
		return

	case gc.OCOM:
		gc.Regalloc(&t1, lo1.Type, nil)
		gmove(ncon(^uint32(0)), &t1)

		var lo2 gc.Node
		var hi2 gc.Node
		split64(res, &lo2, &hi2)
		var n1 gc.Node
		gc.Regalloc(&n1, lo1.Type, nil)

		gins(arm.AMOVW, &lo1, &n1)
		gins(arm.AEOR, &t1, &n1)
		gins(arm.AMOVW, &n1, &lo2)

		gins(arm.AMOVW, &hi1, &n1)
		gins(arm.AEOR, &t1, &n1)
		gins(arm.AMOVW, &n1, &hi2)

		gc.Regfree(&t1)
		gc.Regfree(&n1)
		splitclean()
		splitclean()
		return

		// binary operators.
	// common setup below.
	case gc.OADD,
		gc.OSUB,
		gc.OMUL,
		gc.OLSH,
		gc.ORSH,
		gc.OAND,
		gc.OOR,
		gc.OXOR,
		gc.OLROT:
		break
	}

	// setup for binary operators
	r := n.Right

	if r != nil && r.Addable == 0 {
		var t2 gc.Node
		gc.Tempname(&t2, r.Type)
		gc.Cgen(r, &t2)
		r = &t2
	}

	var hi2 gc.Node
	var lo2 gc.Node
	if gc.Is64(r.Type) {
		split64(r, &lo2, &hi2)
	}

	var al gc.Node
	gc.Regalloc(&al, lo1.Type, nil)
	var ah gc.Node
	gc.Regalloc(&ah, hi1.Type, nil)

	// Do op.  Leave result in ah:al.
	switch n.Op {
	default:
		gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))

		// TODO: Constants
	case gc.OADD:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi2, &bh)
		gins(arm.AMOVW, &lo2, &bl)
		p1 := gins(arm.AADD, &bl, &al)
		p1.Scond |= arm.C_SBIT
		gins(arm.AADC, &bh, &ah)
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO: Constants.
	case gc.OSUB:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo2, &bl)
		gins(arm.AMOVW, &hi2, &bh)
		p1 := gins(arm.ASUB, &bl, &al)
		p1.Scond |= arm.C_SBIT
		gins(arm.ASBC, &bh, &ah)
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO(kaib): this can be done with 4 regs and does not need 6
	case gc.OMUL:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		var cl gc.Node
		gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
		var ch gc.Node
		gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)

		// load args into bh:bl and bh:bl.
		gins(arm.AMOVW, &hi1, &bh)

		gins(arm.AMOVW, &lo1, &bl)
		gins(arm.AMOVW, &hi2, &ch)
		gins(arm.AMOVW, &lo2, &cl)

		// bl * cl -> ah al
		p1 := gins(arm.AMULLU, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bl.Val.U.Reg
		p1.Reg = cl.Val.U.Reg
		p1.To.Type = obj.TYPE_REGREG
		p1.To.Reg = ah.Val.U.Reg
		p1.To.Offset = int64(al.Val.U.Reg)

		//print("%P\n", p1);

		// bl * ch + ah -> ah
		p1 = gins(arm.AMULA, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bl.Val.U.Reg
		p1.Reg = ch.Val.U.Reg
		p1.To.Type = obj.TYPE_REGREG2
		p1.To.Reg = ah.Val.U.Reg
		p1.To.Offset = int64(ah.Val.U.Reg)

		//print("%P\n", p1);

		// bh * cl + ah -> ah
		p1 = gins(arm.AMULA, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bh.Val.U.Reg
		p1.Reg = cl.Val.U.Reg
		p1.To.Type = obj.TYPE_REGREG2
		p1.To.Reg = ah.Val.U.Reg
		p1.To.Offset = int64(ah.Val.U.Reg)

		//print("%P\n", p1);

		gc.Regfree(&bh)

		gc.Regfree(&bl)
		gc.Regfree(&ch)
		gc.Regfree(&cl)

		// We only rotate by a constant c in [0,64).
	// if c >= 32:
	//	lo, hi = hi, lo
	//	c -= 32
	// if c == 0:
	//	no-op
	// else:
	//	t = hi
	//	shld hi:lo, c
	//	shld lo:t, c
	case gc.OLROT:
		v := uint64(gc.Mpgetfix(r.Val.U.Xval))

		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		if v >= 32 {
			// reverse during load to do the first 32 bits of rotate
			v -= 32

			gins(arm.AMOVW, &hi1, &bl)
			gins(arm.AMOVW, &lo1, &bh)
		} else {
			gins(arm.AMOVW, &hi1, &bh)
			gins(arm.AMOVW, &lo1, &bl)
		}

		if v == 0 {
			gins(arm.AMOVW, &bh, &ah)
			gins(arm.AMOVW, &bl, &al)
		} else {
			// rotate by 1 <= v <= 31
			//	MOVW	bl<<v, al
			//	MOVW	bh<<v, ah
			//	OR		bl>>(32-v), ah
			//	OR		bh>>(32-v), al
			gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)

			gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
			gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
			gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
		}

		gc.Regfree(&bl)
		gc.Regfree(&bh)

	case gc.OLSH:
		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		gins(arm.AMOVW, &hi1, &bh)
		gins(arm.AMOVW, &lo1, &bl)

		var p6 *obj.Prog
		var s gc.Node
		var n1 gc.Node
		var creg gc.Node
		var p1 *obj.Prog
		var p2 *obj.Prog
		var p3 *obj.Prog
		var p4 *obj.Prog
		var p5 *obj.Prog
		if r.Op == gc.OLITERAL {
			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
			if v >= 64 {
				// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
				// here and below (verify it optimizes to EOR)
				gins(arm.AEOR, &al, &al)

				gins(arm.AEOR, &ah, &ah)
			} else if v > 32 {
				gins(arm.AEOR, &al, &al)

				//	MOVW	bl<<(v-32), ah
				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
			} else if v == 32 {
				gins(arm.AEOR, &al, &al)
				gins(arm.AMOVW, &bl, &ah)
			} else if v > 0 {
				//	MOVW	bl<<v, al
				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)

				//	MOVW	bh<<v, ah
				gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)

				//	OR		bl>>(32-v), ah
				gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
			} else {
				gins(arm.AMOVW, &bl, &al)
				gins(arm.AMOVW, &bh, &ah)
			}

			goto olsh_break
		}

		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
		if gc.Is64(r.Type) {
			// shift is >= 1<<32
			var cl gc.Node
			var ch gc.Node
			split64(r, &cl, &ch)

			gmove(&ch, &s)
			gins(arm.ATST, &s, nil)
			p6 = gc.Gbranch(arm.ABNE, nil, 0)
			gmove(&cl, &s)
			splitclean()
		} else {
			gmove(r, &s)
			p6 = nil
		}

		gins(arm.ATST, &s, nil)

		// shift == 0
		p1 = gins(arm.AMOVW, &bl, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bh, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p2 = gc.Gbranch(arm.ABEQ, nil, 0)

		// shift is < 32
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		bl<<s, al
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW.LO		bh<<s, ah
		p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		s, creg
		p1 = gins(arm.ASUB, &s, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	OR.LO		bl>>creg, ah
		p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)

		p1.Scond = arm.C_SCOND_LO

		//	BLO	end
		p3 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift == 32
		p1 = gins(arm.AEOR, &al, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bl, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p4 = gc.Gbranch(arm.ABEQ, nil, 0)

		// shift is < 64
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	EOR.LO	al, al
		p1 = gins(arm.AEOR, &al, &al)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW.LO		creg>>1, creg
		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		creg, s
		p1 = gins(arm.ASUB, &creg, &s)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW	bl<<s, ah
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)

		p1.Scond = arm.C_SCOND_LO

		p5 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift >= 64
		if p6 != nil {
			gc.Patch(p6, gc.Pc)
		}
		gins(arm.AEOR, &al, &al)
		gins(arm.AEOR, &ah, &ah)

		gc.Patch(p2, gc.Pc)
		gc.Patch(p3, gc.Pc)
		gc.Patch(p4, gc.Pc)
		gc.Patch(p5, gc.Pc)
		gc.Regfree(&s)
		gc.Regfree(&creg)

	olsh_break:
		gc.Regfree(&bl)
		gc.Regfree(&bh)

	case gc.ORSH:
		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		gins(arm.AMOVW, &hi1, &bh)
		gins(arm.AMOVW, &lo1, &bl)

		var p4 *obj.Prog
		var p5 *obj.Prog
		var n1 gc.Node
		var p6 *obj.Prog
		var s gc.Node
		var p1 *obj.Prog
		var p2 *obj.Prog
		var creg gc.Node
		var p3 *obj.Prog
		if r.Op == gc.OLITERAL {
			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
			if v >= 64 {
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->31, al
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)

					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					gins(arm.AEOR, &al, &al)
					gins(arm.AEOR, &ah, &ah)
				}
			} else if v > 32 {
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->(v-32), al
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)

					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					//	MOVW	bh>>(v-32), al
					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)

					gins(arm.AEOR, &ah, &ah)
				}
			} else if v == 32 {
				gins(arm.AMOVW, &bh, &al)
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					gins(arm.AEOR, &ah, &ah)
				}
			} else if v > 0 {
				//	MOVW	bl>>v, al
				gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)

				//	OR		bh<<(32-v), al
				gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)

				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->v, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
				} else {
					//	MOVW	bh>>v, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
				}
			} else {
				gins(arm.AMOVW, &bl, &al)
				gins(arm.AMOVW, &bh, &ah)
			}

			goto orsh_break
		}

		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
		if gc.Is64(r.Type) {
			// shift is >= 1<<32
			var ch gc.Node
			var cl gc.Node
			split64(r, &cl, &ch)

			gmove(&ch, &s)
			gins(arm.ATST, &s, nil)
			var p1 *obj.Prog
			if bh.Type.Etype == gc.TINT32 {
				p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
			} else {
				p1 = gins(arm.AEOR, &ah, &ah)
			}
			p1.Scond = arm.C_SCOND_NE
			p6 = gc.Gbranch(arm.ABNE, nil, 0)
			gmove(&cl, &s)
			splitclean()
		} else {
			gmove(r, &s)
			p6 = nil
		}

		gins(arm.ATST, &s, nil)

		// shift == 0
		p1 = gins(arm.AMOVW, &bl, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bh, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p2 = gc.Gbranch(arm.ABEQ, nil, 0)

		// check if shift is < 32
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		bl>>s, al
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		s,creg
		p1 = gins(arm.ASUB, &s, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	OR.LO		bh<<(32-s), al
		p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)

		p1.Scond = arm.C_SCOND_LO

		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->s, ah
			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
		} else {
			//	MOVW	bh>>s, ah
			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
		}

		p1.Scond = arm.C_SCOND_LO

		//	BLO	end
		p3 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift == 32
		p1 = gins(arm.AMOVW, &bh, &al)

		p1.Scond = arm.C_SCOND_EQ
		if bh.Type.Etype == gc.TINT32 {
			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
		} else {
			gins(arm.AEOR, &ah, &ah)
		}
		p4 = gc.Gbranch(arm.ABEQ, nil, 0)

		// check if shift is < 64
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		creg>>1, creg
		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		creg, s
		p1 = gins(arm.ASUB, &creg, &s)

		p1.Scond = arm.C_SCOND_LO

		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->(s-32), al
			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)

			p1.Scond = arm.C_SCOND_LO
		} else {
			//	MOVW	bh>>(v-32), al
			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)

			p1.Scond = arm.C_SCOND_LO
		}

		//	BLO	end
		p5 = gc.Gbranch(arm.ABLO, nil, 0)

		// s >= 64
		if p6 != nil {
			gc.Patch(p6, gc.Pc)
		}
		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->31, al
			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
		} else {
			gins(arm.AEOR, &al, &al)
		}

		gc.Patch(p2, gc.Pc)
		gc.Patch(p3, gc.Pc)
		gc.Patch(p4, gc.Pc)
		gc.Patch(p5, gc.Pc)
		gc.Regfree(&s)
		gc.Regfree(&creg)

	orsh_break:
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO(kaib): literal optimizations
	// make constant the right side (it usually is anyway).
	//		if(lo1.op == OLITERAL) {
	//			nswap(&lo1, &lo2);
	//			nswap(&hi1, &hi2);
	//		}
	//		if(lo2.op == OLITERAL) {
	//			// special cases for constants.
	//			lv = mpgetfix(lo2.val.u.xval);
	//			hv = mpgetfix(hi2.val.u.xval);
	//			splitclean();	// right side
	//			split64(res, &lo2, &hi2);
	//			switch(n->op) {
	//			case OXOR:
	//				gmove(&lo1, &lo2);
	//				gmove(&hi1, &hi2);
	//				switch(lv) {
	//				case 0:
	//					break;
	//				case 0xffffffffu:
	//					gins(ANOTL, N, &lo2);
	//					break;
	//				default:
	//					gins(AXORL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					break;
	//				case 0xffffffffu:
	//					gins(ANOTL, N, &hi2);
	//					break;
	//				default:
	//					gins(AXORL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;

	//			case OAND:
	//				switch(lv) {
	//				case 0:
	//					gins(AMOVL, ncon(0), &lo2);
	//					break;
	//				default:
	//					gmove(&lo1, &lo2);
	//					if(lv != 0xffffffffu)
	//						gins(AANDL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					gins(AMOVL, ncon(0), &hi2);
	//					break;
	//				default:
	//					gmove(&hi1, &hi2);
	//					if(hv != 0xffffffffu)
	//						gins(AANDL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;

	//			case OOR:
	//				switch(lv) {
	//				case 0:
	//					gmove(&lo1, &lo2);
	//					break;
	//				case 0xffffffffu:
	//					gins(AMOVL, ncon(0xffffffffu), &lo2);
	//					break;
	//				default:
	//					gmove(&lo1, &lo2);
	//					gins(AORL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					gmove(&hi1, &hi2);
	//					break;
	//				case 0xffffffffu:
	//					gins(AMOVL, ncon(0xffffffffu), &hi2);
	//					break;
	//				default:
	//					gmove(&hi1, &hi2);
	//					gins(AORL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;
	//			}
	//			splitclean();
	//			splitclean();
	//			goto out;
	//		}
	case gc.OXOR,
		gc.OAND,
		gc.OOR:
		var n1 gc.Node
		gc.Regalloc(&n1, lo1.Type, nil)

		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo2, &n1)
		gins(optoas(int(n.Op), lo1.Type), &n1, &al)
		gins(arm.AMOVW, &hi2, &n1)
		gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
		gc.Regfree(&n1)
	}

	if gc.Is64(r.Type) {
		splitclean()
	}
	splitclean()

	split64(res, &lo1, &hi1)
	gins(arm.AMOVW, &al, &lo1)
	gins(arm.AMOVW, &ah, &hi1)
	splitclean()

	//out:
	gc.Regfree(&al)

	gc.Regfree(&ah)
}
コード例 #23
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * allocate a register (reusing res if possible) and generate
 * a = &n
 * The caller must call regfree(a).
 * The generated code checks that the result is not nil.
 */
func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("agenr-n", n)
	}

	nl := n.Left
	nr := n.Right

	switch n.Op {
	case gc.ODOT,
		gc.ODOTPTR,
		gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		var n1 gc.Node
		igen(n, &n1, res)
		regalloc(a, gc.Types[gc.Tptr], &n1)
		agen(&n1, a)
		regfree(&n1)

	case gc.OIND:
		cgenr(n.Left, a, res)
		gc.Cgen_checknil(a)

	case gc.OINDEX:
		var p2 *obj.Prog // to be patched to panicindex.
		w := uint32(n.Type.Width)

		//bounded = debug['B'] || n->bounded;
		var n3 gc.Node
		var n1 gc.Node
		if nr.Addable != 0 {
			var tmp gc.Node
			if !gc.Isconst(nr, gc.CTINT) {
				gc.Tempname(&tmp, gc.Types[gc.TINT64])
			}
			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
			if !gc.Isconst(nr, gc.CTINT) {
				cgen(nr, &tmp)
				regalloc(&n1, tmp.Type, nil)
				gmove(&tmp, &n1)
			}
		} else if nl.Addable != 0 {
			if !gc.Isconst(nr, gc.CTINT) {
				var tmp gc.Node
				gc.Tempname(&tmp, gc.Types[gc.TINT64])
				cgen(nr, &tmp)
				regalloc(&n1, tmp.Type, nil)
				gmove(&tmp, &n1)
			}

			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
		} else {
			var tmp gc.Node
			gc.Tempname(&tmp, gc.Types[gc.TINT64])
			cgen(nr, &tmp)
			nr = &tmp
			if !gc.Isconst(nl, gc.CTSTR) {
				agenr(nl, &n3, res)
			}
			regalloc(&n1, tmp.Type, nil)
			gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
		}

		// &a is in &n3 (allocated in res)
		// i is in &n1 (if not constant)
		// w is width

		// constant index
		if gc.Isconst(nr, gc.CTINT) {
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Fatal("constant string constant index")
			}
			v := uint64(gc.Mpgetfix(nr.Val.U.Xval))
			if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				if gc.Debug['B'] == 0 && !n.Bounded {
					n1 = n3
					n1.Op = gc.OINDREG
					n1.Type = gc.Types[gc.Tptr]
					n1.Xoffset = int64(gc.Array_nel)
					var n4 gc.Node
					regalloc(&n4, n1.Type, nil)
					gmove(&n1, &n4)
					ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
					regfree(&n4)
					p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
					ginscall(gc.Panicindex, 0)
					gc.Patch(p1, gc.Pc)
				}

				n1 = n3
				n1.Op = gc.OINDREG
				n1.Type = gc.Types[gc.Tptr]
				n1.Xoffset = int64(gc.Array_array)
				gmove(&n1, &n3)
			}

			if v*uint64(w) != 0 {
				ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
			}

			*a = n3
			break
		}

		var n2 gc.Node
		regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
		gmove(&n1, &n2)
		regfree(&n1)

		var n4 gc.Node
		if gc.Debug['B'] == 0 && !n.Bounded {
			// check bounds
			if gc.Isconst(nl, gc.CTSTR) {
				gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval)))
			} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
				n1 = n3
				n1.Op = gc.OINDREG
				n1.Type = gc.Types[gc.Tptr]
				n1.Xoffset = int64(gc.Array_nel)
				regalloc(&n4, gc.Types[gc.TUINT64], nil)
				gmove(&n1, &n4)
			} else {
				if nl.Type.Bound < (1<<15)-1 {
					gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
				} else {
					regalloc(&n4, gc.Types[gc.TUINT64], nil)
					p1 := gins(ppc64.AMOVD, nil, &n4)
					p1.From.Type = obj.TYPE_CONST
					p1.From.Offset = nl.Type.Bound
				}
			}

			gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
			if n4.Op == gc.OREGISTER {
				regfree(&n4)
			}
			p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
			if p2 != nil {
				gc.Patch(p2, gc.Pc)
			}
			ginscall(gc.Panicindex, 0)
			gc.Patch(p1, gc.Pc)
		}

		if gc.Isconst(nl, gc.CTSTR) {
			regalloc(&n3, gc.Types[gc.Tptr], res)
			p1 := gins(ppc64.AMOVD, nil, &n3)
			gc.Datastring(nl.Val.U.Sval, &p1.From)
			p1.From.Type = obj.TYPE_ADDR
		} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
			n1 = n3
			n1.Op = gc.OINDREG
			n1.Type = gc.Types[gc.Tptr]
			n1.Xoffset = int64(gc.Array_array)
			gmove(&n1, &n3)
		}

		if w == 0 {
		} else // nothing to do
		if w == 1 {
			/* w already scaled */
			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
			/* else if(w == 2 || w == 4 || w == 8) {
				// TODO(minux): scale using shift
			} */
		} else {
			regalloc(&n4, gc.Types[gc.TUINT64], nil)
			gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w))
			gmove(&n1, &n4)
			gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2)
			gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
			regfree(&n4)
		}

		*a = n3
		regfree(&n2)

	default:
		regalloc(a, gc.Types[gc.Tptr], res)
		agen(n, a)
	}
}
コード例 #24
0
ファイル: gsubr.go プロジェクト: bibbyflyaway/go
/*
 * generate one instruction:
 *	as f, t
 */
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	//	Node nod;

	//	if(f != N && f->op == OINDEX) {
	//		gc.Regalloc(&nod, &regnode, Z);
	//		v = constnode.vconst;
	//		gc.Cgen(f->right, &nod);
	//		constnode.vconst = v;
	//		idx.reg = nod.reg;
	//		gc.Regfree(&nod);
	//	}
	//	if(t != N && t->op == OINDEX) {
	//		gc.Regalloc(&nod, &regnode, Z);
	//		v = constnode.vconst;
	//		gc.Cgen(t->right, &nod);
	//		constnode.vconst = v;
	//		idx.reg = nod.reg;
	//		gc.Regfree(&nod);
	//	}

	if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) {
		// Turn MOVL $xxx into LEAL xxx.
		// These should be equivalent but most of the backend
		// only expects to see LEAL, because that's what we had
		// historically generated. Various hidden assumptions are baked in by now.
		if as == x86.AMOVL {
			as = x86.ALEAL
		} else {
			as = x86.ALEAQ
		}
		f = f.Left
	}

	switch as {
	case x86.AMOVB,
		x86.AMOVW,
		x86.AMOVL,
		x86.AMOVQ,
		x86.AMOVSS,
		x86.AMOVSD:
		if f != nil && t != nil && samaddr(f, t) {
			return nil
		}

	case x86.ALEAQ:
		if f != nil && gc.Isconst(f, gc.CTNIL) {
			gc.Fatal("gins LEAQ nil %v", f.Type)
		}
	}

	p := gc.Prog(as)
	gc.Naddr(&p.From, f)
	gc.Naddr(&p.To, t)

	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := int32(0)
	switch as {
	case x86.AMOVB:
		w = 1

	case x86.AMOVW:
		w = 2

	case x86.AMOVL:
		w = 4

	case x86.AMOVQ:
		w = 8
	}

	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) {
		gc.Dump("f", f)
		gc.Dump("t", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
	}

	if p.To.Type == obj.TYPE_ADDR && w > 0 {
		gc.Fatal("bad use of addr: %v", p)
	}

	return p
}
コード例 #25
0
ファイル: gsubr.go プロジェクト: xslonepiece/goios
/*
 * generate one instruction:
 *	as f, t
 */
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
	if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
		gc.Fatal("gins MOVF reg, reg")
	}
	if as == x86.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
		gc.Fatal("gins CVTSD2SS const")
	}
	if as == x86.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Reg == x86.REG_F0 {
		gc.Fatal("gins MOVSD into F0")
	}

	if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
		// Turn MOVL $xxx(FP/SP) into LEAL xxx.
		// These should be equivalent but most of the backend
		// only expects to see LEAL, because that's what we had
		// historically generated. Various hidden assumptions are baked in by now.
		as = x86.ALEAL
		f = f.Left
	}

	switch as {
	case x86.AMOVB,
		x86.AMOVW,
		x86.AMOVL:
		if f != nil && t != nil && samaddr(f, t) {
			return nil
		}

	case x86.ALEAL:
		if f != nil && gc.Isconst(f, gc.CTNIL) {
			gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
		}
	}

	p := gc.Prog(as)
	gc.Naddr(&p.From, f)
	gc.Naddr(&p.To, t)

	if gc.Debug['g'] != 0 {
		fmt.Printf("%v\n", p)
	}

	w := 0
	switch as {
	case x86.AMOVB:
		w = 1

	case x86.AMOVW:
		w = 2

	case x86.AMOVL:
		w = 4
	}

	if true && w != 0 && f != nil && (p.From.Width > int64(w) || p.To.Width > int64(w)) {
		gc.Dump("bad width from:", f)
		gc.Dump("bad width to:", t)
		gc.Fatal("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
	}

	if p.To.Type == obj.TYPE_ADDR && w > 0 {
		gc.Fatal("bad use of addr: %v", p)
	}

	return p
}
コード例 #26
0
ファイル: cgen64.go プロジェクト: xslonepiece/goios
/*
 * attempt to generate 64-bit
 *	res = n
 * return 1 on success, 0 if op not handled.
 */
func cgen64(n *gc.Node, res *gc.Node) {
	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
		gc.Dump("n", n)
		gc.Dump("res", res)
		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
	}

	switch n.Op {
	default:
		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))

	case gc.OMINUS:
		gc.Cgen(n.Left, res)
		var hi1 gc.Node
		var lo1 gc.Node
		split64(res, &lo1, &hi1)
		gins(x86.ANEGL, nil, &lo1)
		gins(x86.AADCL, ncon(0), &hi1)
		gins(x86.ANEGL, nil, &hi1)
		splitclean()
		return

	case gc.OCOM:
		gc.Cgen(n.Left, res)
		var lo1 gc.Node
		var hi1 gc.Node
		split64(res, &lo1, &hi1)
		gins(x86.ANOTL, nil, &lo1)
		gins(x86.ANOTL, nil, &hi1)
		splitclean()
		return

		// binary operators.
	// common setup below.
	case gc.OADD,
		gc.OSUB,
		gc.OMUL,
		gc.OLROT,
		gc.OLSH,
		gc.ORSH,
		gc.OAND,
		gc.OOR,
		gc.OXOR:
		break
	}

	l := n.Left
	r := n.Right
	if !l.Addable {
		var t1 gc.Node
		gc.Tempname(&t1, l.Type)
		gc.Cgen(l, &t1)
		l = &t1
	}

	if r != nil && !r.Addable {
		var t2 gc.Node
		gc.Tempname(&t2, r.Type)
		gc.Cgen(r, &t2)
		r = &t2
	}

	var ax gc.Node
	gc.Nodreg(&ax, gc.Types[gc.TINT32], x86.REG_AX)
	var cx gc.Node
	gc.Nodreg(&cx, gc.Types[gc.TINT32], x86.REG_CX)
	var dx gc.Node
	gc.Nodreg(&dx, gc.Types[gc.TINT32], x86.REG_DX)

	// Setup for binary operation.
	var hi1 gc.Node
	var lo1 gc.Node
	split64(l, &lo1, &hi1)

	var lo2 gc.Node
	var hi2 gc.Node
	if gc.Is64(r.Type) {
		split64(r, &lo2, &hi2)
	}

	// Do op.  Leave result in DX:AX.
	switch n.Op {
	// TODO: Constants
	case gc.OADD:
		gins(x86.AMOVL, &lo1, &ax)

		gins(x86.AMOVL, &hi1, &dx)
		gins(x86.AADDL, &lo2, &ax)
		gins(x86.AADCL, &hi2, &dx)

		// TODO: Constants.
	case gc.OSUB:
		gins(x86.AMOVL, &lo1, &ax)

		gins(x86.AMOVL, &hi1, &dx)
		gins(x86.ASUBL, &lo2, &ax)
		gins(x86.ASBBL, &hi2, &dx)

		// let's call the next two EX and FX.
	case gc.OMUL:
		var ex gc.Node
		gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)

		var fx gc.Node
		gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)

		// load args into DX:AX and EX:CX.
		gins(x86.AMOVL, &lo1, &ax)

		gins(x86.AMOVL, &hi1, &dx)
		gins(x86.AMOVL, &lo2, &cx)
		gins(x86.AMOVL, &hi2, &ex)

		// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
		gins(x86.AMOVL, &dx, &fx)

		gins(x86.AORL, &ex, &fx)
		p1 := gc.Gbranch(x86.AJNE, nil, 0)
		gins(x86.AMULL, &cx, nil) // implicit &ax
		p2 := gc.Gbranch(obj.AJMP, nil, 0)
		gc.Patch(p1, gc.Pc)

		// full 64x64 -> 64, from 32x32 -> 64.
		gins(x86.AIMULL, &cx, &dx)

		gins(x86.AMOVL, &ax, &fx)
		gins(x86.AIMULL, &ex, &fx)
		gins(x86.AADDL, &dx, &fx)
		gins(x86.AMOVL, &cx, &dx)
		gins(x86.AMULL, &dx, nil) // implicit &ax
		gins(x86.AADDL, &fx, &dx)
		gc.Patch(p2, gc.Pc)

		gc.Regfree(&ex)
		gc.Regfree(&fx)

		// We only rotate by a constant c in [0,64).
	// if c >= 32:
	//	lo, hi = hi, lo
	//	c -= 32
	// if c == 0:
	//	no-op
	// else:
	//	t = hi
	//	shld hi:lo, c
	//	shld lo:t, c
	case gc.OLROT:
		v := uint64(gc.Mpgetfix(r.Val.U.Xval))

		if v >= 32 {
			// reverse during load to do the first 32 bits of rotate
			v -= 32

			gins(x86.AMOVL, &lo1, &dx)
			gins(x86.AMOVL, &hi1, &ax)
		} else {
			gins(x86.AMOVL, &lo1, &ax)
			gins(x86.AMOVL, &hi1, &dx)
		}

		if v == 0 {
		} else // done
		{
			gins(x86.AMOVL, &dx, &cx)
			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
			p1.From.Index = x86.REG_AX // double-width shift
			p1.From.Scale = 0
			p1 = gins(x86.ASHLL, ncon(uint32(v)), &ax)
			p1.From.Index = x86.REG_CX // double-width shift
			p1.From.Scale = 0
		}

	case gc.OLSH:
		if r.Op == gc.OLITERAL {
			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
			if v >= 64 {
				if gc.Is64(r.Type) {
					splitclean()
				}
				splitclean()
				split64(res, &lo2, &hi2)
				gins(x86.AMOVL, ncon(0), &lo2)
				gins(x86.AMOVL, ncon(0), &hi2)
				splitclean()
				return
			}

			if v >= 32 {
				if gc.Is64(r.Type) {
					splitclean()
				}
				split64(res, &lo2, &hi2)
				gmove(&lo1, &hi2)
				if v > 32 {
					gins(x86.ASHLL, ncon(uint32(v-32)), &hi2)
				}

				gins(x86.AMOVL, ncon(0), &lo2)
				splitclean()
				splitclean()
				return
			}

			// general shift
			gins(x86.AMOVL, &lo1, &ax)

			gins(x86.AMOVL, &hi1, &dx)
			p1 := gins(x86.ASHLL, ncon(uint32(v)), &dx)
			p1.From.Index = x86.REG_AX // double-width shift
			p1.From.Scale = 0
			gins(x86.ASHLL, ncon(uint32(v)), &ax)
			break
		}

		// load value into DX:AX.
		gins(x86.AMOVL, &lo1, &ax)

		gins(x86.AMOVL, &hi1, &dx)

		// load shift value into register.
		// if high bits are set, zero value.
		var p1 *obj.Prog

		if gc.Is64(r.Type) {
			gins(x86.ACMPL, &hi2, ncon(0))
			p1 = gc.Gbranch(x86.AJNE, nil, +1)
			gins(x86.AMOVL, &lo2, &cx)
		} else {
			cx.Type = gc.Types[gc.TUINT32]
			gmove(r, &cx)
		}

		// if shift count is >=64, zero value
		gins(x86.ACMPL, &cx, ncon(64))

		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
		if p1 != nil {
			gc.Patch(p1, gc.Pc)
		}
		gins(x86.AXORL, &dx, &dx)
		gins(x86.AXORL, &ax, &ax)
		gc.Patch(p2, gc.Pc)

		// if shift count is >= 32, zero low.
		gins(x86.ACMPL, &cx, ncon(32))

		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
		gins(x86.AMOVL, &ax, &dx)
		gins(x86.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
		gins(x86.AXORL, &ax, &ax)
		p2 = gc.Gbranch(obj.AJMP, nil, 0)
		gc.Patch(p1, gc.Pc)

		// general shift
		p1 = gins(x86.ASHLL, &cx, &dx)

		p1.From.Index = x86.REG_AX // double-width shift
		p1.From.Scale = 0
		gins(x86.ASHLL, &cx, &ax)
		gc.Patch(p2, gc.Pc)

	case gc.ORSH:
		if r.Op == gc.OLITERAL {
			v := uint64(gc.Mpgetfix(r.Val.U.Xval))
			if v >= 64 {
				if gc.Is64(r.Type) {
					splitclean()
				}
				splitclean()
				split64(res, &lo2, &hi2)
				if hi1.Type.Etype == gc.TINT32 {
					gmove(&hi1, &lo2)
					gins(x86.ASARL, ncon(31), &lo2)
					gmove(&hi1, &hi2)
					gins(x86.ASARL, ncon(31), &hi2)
				} else {
					gins(x86.AMOVL, ncon(0), &lo2)
					gins(x86.AMOVL, ncon(0), &hi2)
				}

				splitclean()
				return
			}

			if v >= 32 {
				if gc.Is64(r.Type) {
					splitclean()
				}
				split64(res, &lo2, &hi2)
				gmove(&hi1, &lo2)
				if v > 32 {
					gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
				}
				if hi1.Type.Etype == gc.TINT32 {
					gmove(&hi1, &hi2)
					gins(x86.ASARL, ncon(31), &hi2)
				} else {
					gins(x86.AMOVL, ncon(0), &hi2)
				}
				splitclean()
				splitclean()
				return
			}

			// general shift
			gins(x86.AMOVL, &lo1, &ax)

			gins(x86.AMOVL, &hi1, &dx)
			p1 := gins(x86.ASHRL, ncon(uint32(v)), &ax)
			p1.From.Index = x86.REG_DX // double-width shift
			p1.From.Scale = 0
			gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
			break
		}

		// load value into DX:AX.
		gins(x86.AMOVL, &lo1, &ax)

		gins(x86.AMOVL, &hi1, &dx)

		// load shift value into register.
		// if high bits are set, zero value.
		var p1 *obj.Prog

		if gc.Is64(r.Type) {
			gins(x86.ACMPL, &hi2, ncon(0))
			p1 = gc.Gbranch(x86.AJNE, nil, +1)
			gins(x86.AMOVL, &lo2, &cx)
		} else {
			cx.Type = gc.Types[gc.TUINT32]
			gmove(r, &cx)
		}

		// if shift count is >=64, zero or sign-extend value
		gins(x86.ACMPL, &cx, ncon(64))

		p2 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
		if p1 != nil {
			gc.Patch(p1, gc.Pc)
		}
		if hi1.Type.Etype == gc.TINT32 {
			gins(x86.ASARL, ncon(31), &dx)
			gins(x86.AMOVL, &dx, &ax)
		} else {
			gins(x86.AXORL, &dx, &dx)
			gins(x86.AXORL, &ax, &ax)
		}

		gc.Patch(p2, gc.Pc)

		// if shift count is >= 32, sign-extend hi.
		gins(x86.ACMPL, &cx, ncon(32))

		p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
		gins(x86.AMOVL, &dx, &ax)
		if hi1.Type.Etype == gc.TINT32 {
			gins(x86.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
			gins(x86.ASARL, ncon(31), &dx)
		} else {
			gins(x86.ASHRL, &cx, &ax)
			gins(x86.AXORL, &dx, &dx)
		}

		p2 = gc.Gbranch(obj.AJMP, nil, 0)
		gc.Patch(p1, gc.Pc)

		// general shift
		p1 = gins(x86.ASHRL, &cx, &ax)

		p1.From.Index = x86.REG_DX // double-width shift
		p1.From.Scale = 0
		gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
		gc.Patch(p2, gc.Pc)

		// make constant the right side (it usually is anyway).
	case gc.OXOR,
		gc.OAND,
		gc.OOR:
		if lo1.Op == gc.OLITERAL {
			nswap(&lo1, &lo2)
			nswap(&hi1, &hi2)
		}

		if lo2.Op == gc.OLITERAL {
			// special cases for constants.
			lv := uint32(gc.Mpgetfix(lo2.Val.U.Xval))

			hv := uint32(gc.Mpgetfix(hi2.Val.U.Xval))
			splitclean() // right side
			split64(res, &lo2, &hi2)
			switch n.Op {
			case gc.OXOR:
				gmove(&lo1, &lo2)
				gmove(&hi1, &hi2)
				switch lv {
				case 0:
					break

				case 0xffffffff:
					gins(x86.ANOTL, nil, &lo2)

				default:
					gins(x86.AXORL, ncon(lv), &lo2)
				}

				switch hv {
				case 0:
					break

				case 0xffffffff:
					gins(x86.ANOTL, nil, &hi2)

				default:
					gins(x86.AXORL, ncon(hv), &hi2)
				}

			case gc.OAND:
				switch lv {
				case 0:
					gins(x86.AMOVL, ncon(0), &lo2)

				default:
					gmove(&lo1, &lo2)
					if lv != 0xffffffff {
						gins(x86.AANDL, ncon(lv), &lo2)
					}
				}

				switch hv {
				case 0:
					gins(x86.AMOVL, ncon(0), &hi2)

				default:
					gmove(&hi1, &hi2)
					if hv != 0xffffffff {
						gins(x86.AANDL, ncon(hv), &hi2)
					}
				}

			case gc.OOR:
				switch lv {
				case 0:
					gmove(&lo1, &lo2)

				case 0xffffffff:
					gins(x86.AMOVL, ncon(0xffffffff), &lo2)

				default:
					gmove(&lo1, &lo2)
					gins(x86.AORL, ncon(lv), &lo2)
				}

				switch hv {
				case 0:
					gmove(&hi1, &hi2)

				case 0xffffffff:
					gins(x86.AMOVL, ncon(0xffffffff), &hi2)

				default:
					gmove(&hi1, &hi2)
					gins(x86.AORL, ncon(hv), &hi2)
				}
			}

			splitclean()
			splitclean()
			return
		}

		gins(x86.AMOVL, &lo1, &ax)
		gins(x86.AMOVL, &hi1, &dx)
		gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
		gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
	}

	if gc.Is64(r.Type) {
		splitclean()
	}
	splitclean()

	split64(res, &lo1, &hi1)
	gins(x86.AMOVL, &ax, &lo1)
	gins(x86.AMOVL, &dx, &hi1)
	splitclean()
}
コード例 #27
0
ファイル: ggen.go プロジェクト: xslonepiece/goios
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		gc.Dump("\nclearfat", nl)
	}

	w := nl.Type.Width

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := w % 8 // bytes
	q := w / 8 // quads

	if q < 4 {
		// Write sequence of MOV 0, off(base) instead of using STOSQ.
		// The hope is that although the code will be slightly longer,
		// the MOVs will have no dependencies and pipeline better
		// than the unrolled STOSQ loop.
		// NOTE: Must use agen, not igen, so that optimizer sees address
		// being taken. We are not writing on field boundaries.
		var n1 gc.Node
		gc.Agenr(nl, &n1, nil)

		n1.Op = gc.OINDREG
		var z gc.Node
		gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
		for {
			tmp14 := q
			q--
			if tmp14 <= 0 {
				break
			}
			n1.Type = z.Type
			gins(x86.AMOVQ, &z, &n1)
			n1.Xoffset += 8
		}

		if c >= 4 {
			gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
			n1.Type = z.Type
			gins(x86.AMOVL, &z, &n1)
			n1.Xoffset += 4
			c -= 4
		}

		gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
		for {
			tmp15 := c
			c--
			if tmp15 <= 0 {
				break
			}
			n1.Type = z.Type
			gins(x86.AMOVB, &z, &n1)
			n1.Xoffset++
		}

		gc.Regfree(&n1)
		return
	}

	var oldn1 gc.Node
	var n1 gc.Node
	savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
	gc.Agen(nl, &n1)

	var ax gc.Node
	var oldax gc.Node
	savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
	gconreg(x86.AMOVL, 0, x86.REG_AX)

	if q > 128 || gc.Nacl {
		gconreg(movptr, q, x86.REG_CX)
		gins(x86.AREP, nil, nil)   // repeat
		gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
	} else {
		if di := dzDI(q); di != 0 {
			gconreg(addptr, di, x86.REG_DI)
		}
		p := gins(obj.ADUFFZERO, nil, nil)
		p.To.Type = obj.TYPE_ADDR
		p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
		p.To.Offset = dzOff(q)
	}

	z := ax
	di := n1
	if w >= 8 && c >= 4 {
		di.Op = gc.OINDREG
		z.Type = gc.Types[gc.TINT64]
		di.Type = z.Type
		p := gins(x86.AMOVQ, &z, &di)
		p.To.Scale = 1
		p.To.Offset = c - 8
	} else if c >= 4 {
		di.Op = gc.OINDREG
		z.Type = gc.Types[gc.TINT32]
		di.Type = z.Type
		gins(x86.AMOVL, &z, &di)
		if c > 4 {
			p := gins(x86.AMOVL, &z, &di)
			p.To.Scale = 1
			p.To.Offset = c - 4
		}
	} else {
		for c > 0 {
			gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
			c--
		}
	}

	restx(&n1, &oldn1)
	restx(&ax, &oldax)
}
コード例 #28
0
ファイル: ggen.go プロジェクト: bibbyflyaway/go
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		gc.Dump("\nclearfat", nl)
	}

	w := uint32(nl.Type.Width)

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := w % 4 // bytes
	q := w / 4 // quads

	var r0 gc.Node
	r0.Op = gc.OREGISTER

	r0.Reg = arm.REG_R0
	var r1 gc.Node
	r1.Op = gc.OREGISTER
	r1.Reg = arm.REG_R1
	var dst gc.Node
	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
	gc.Agen(nl, &dst)
	var nc gc.Node
	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
	var nz gc.Node
	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
	gc.Cgen(&nc, &nz)

	if q > 128 {
		var end gc.Node
		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
		p := gins(arm.AMOVW, &dst, &end)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(q) * 4

		p = gins(arm.AMOVW, &nz, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 4
		p.Scond |= arm.C_PBIT
		pl := p

		p = gins(arm.ACMP, &dst, nil)
		raddr(&end, p)
		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)

		gc.Regfree(&end)
	} else if q >= 4 && !gc.Nacl {
		f := gc.Sysfunc("duffzero")
		p := gins(obj.ADUFFZERO, nil, f)
		gc.Afunclit(&p.To, f)

		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
		p.To.Offset = 4 * (128 - int64(q))
	} else {
		var p *obj.Prog
		for q > 0 {
			p = gins(arm.AMOVW, &nz, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = 4
			p.Scond |= arm.C_PBIT

			//print("1. %P\n", p);
			q--
		}
	}

	var p *obj.Prog
	for c > 0 {
		p = gins(arm.AMOVB, &nz, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 1
		p.Scond |= arm.C_PBIT

		//print("2. %P\n", p);
		c--
	}

	gc.Regfree(&dst)
	gc.Regfree(&nz)
}
コード例 #29
0
ファイル: ggen.go プロジェクト: tidatida/go
func cgen_floatsse(n *gc.Node, res *gc.Node) {
	var a int

	nl := n.Left
	nr := n.Right
	switch n.Op {
	default:
		gc.Dump("cgen_floatsse", n)
		gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
		return

	case gc.OMINUS,
		gc.OCOM:
		nr = gc.Nodintconst(-1)
		gc.Convlit(&nr, n.Type)
		a = foptoas(gc.OMUL, nl.Type, 0)
		goto sbop

		// symmetric binary
	case gc.OADD,
		gc.OMUL:
		a = foptoas(int(n.Op), nl.Type, 0)

		goto sbop

		// asymmetric binary
	case gc.OSUB,
		gc.OMOD,
		gc.ODIV:
		a = foptoas(int(n.Op), nl.Type, 0)

		goto abop
	}

sbop: // symmetric binary
	if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
		r := nl
		nl = nr
		nr = r
	}

abop: // asymmetric binary
	if nl.Ullman >= nr.Ullman {
		var nt gc.Node
		gc.Tempname(&nt, nl.Type)
		gc.Cgen(nl, &nt)
		var n2 gc.Node
		gc.Mgen(nr, &n2, nil)
		var n1 gc.Node
		gc.Regalloc(&n1, nl.Type, res)
		gmove(&nt, &n1)
		gins(a, &n2, &n1)
		gmove(&n1, res)
		gc.Regfree(&n1)
		gc.Mfree(&n2)
	} else {
		var n2 gc.Node
		gc.Regalloc(&n2, nr.Type, res)
		gc.Cgen(nr, &n2)
		var n1 gc.Node
		gc.Regalloc(&n1, nl.Type, nil)
		gc.Cgen(nl, &n1)
		gins(a, &n2, &n1)
		gc.Regfree(&n2)
		gmove(&n1, res)
		gc.Regfree(&n1)
	}

	return
}
コード例 #30
0
ファイル: cgen.go プロジェクト: klueska/go-akaros
/*
 * generate:
 *	newreg = &n;
 *	res = newreg
 *
 * on exit, a has been changed to be *newreg.
 * caller must regfree(a).
 * The generated code checks that the result is not *nil.
 */
func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
	if gc.Debug['g'] != 0 {
		gc.Dump("\nigen-n", n)
	}

	switch n.Op {
	case gc.ONAME:
		if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
			break
		}
		*a = *n
		return

		// Increase the refcount of the register so that igen's caller
	// has to call regfree.
	case gc.OINDREG:
		if n.Val.U.Reg != x86.REG_SP {
			reg[n.Val.U.Reg]++
		}
		*a = *n
		return

	case gc.ODOT:
		igen(n.Left, a, res)
		a.Xoffset += n.Xoffset
		a.Type = n.Type
		return

	case gc.ODOTPTR:
		switch n.Left.Op {
		// igen-able nodes.
		case gc.ODOT,
			gc.ODOTPTR,
			gc.OCALLFUNC,
			gc.OCALLMETH,
			gc.OCALLINTER:
			var n1 gc.Node
			igen(n.Left, &n1, res)

			regalloc(a, gc.Types[gc.Tptr], &n1)
			gmove(&n1, a)
			regfree(&n1)

		default:
			regalloc(a, gc.Types[gc.Tptr], res)
			cgen(n.Left, a)
		}

		gc.Cgen_checknil(a)
		a.Op = gc.OINDREG
		a.Xoffset += n.Xoffset
		a.Type = n.Type
		return

	case gc.OCALLFUNC,
		gc.OCALLMETH,
		gc.OCALLINTER:
		switch n.Op {
		case gc.OCALLFUNC:
			cgen_call(n, 0)

		case gc.OCALLMETH:
			gc.Cgen_callmeth(n, 0)

		case gc.OCALLINTER:
			cgen_callinter(n, nil, 0)
		}

		var flist gc.Iter
		fp := gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
		*a = gc.Node{}
		a.Op = gc.OINDREG
		a.Val.U.Reg = x86.REG_SP
		a.Addable = 1
		a.Xoffset = fp.Width
		a.Type = n.Type
		return

		// Index of fixed-size array by constant can
	// put the offset in the addressing.
	// Could do the same for slice except that we need
	// to use the real index for the bounds checking.
	case gc.OINDEX:
		if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] && gc.Isfixedarray(n.Left.Left.Type)) {
			if gc.Isconst(n.Right, gc.CTINT) {
				// Compute &a.
				if !gc.Isptr[n.Left.Type.Etype] {
					igen(n.Left, a, res)
				} else {
					var n1 gc.Node
					igen(n.Left, &n1, res)
					gc.Cgen_checknil(&n1)
					regalloc(a, gc.Types[gc.Tptr], res)
					gmove(&n1, a)
					regfree(&n1)
					a.Op = gc.OINDREG
				}

				// Compute &a[i] as &a + i*width.
				a.Type = n.Type

				a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
				return
			}
		}
	}

	// release register for now, to avoid
	// confusing tempname.
	if res != nil && res.Op == gc.OREGISTER {
		reg[res.Val.U.Reg]--
	}
	var n1 gc.Node
	gc.Tempname(&n1, gc.Types[gc.Tptr])
	agen(n, &n1)
	if res != nil && res.Op == gc.OREGISTER {
		reg[res.Val.U.Reg]++
	}
	regalloc(a, gc.Types[gc.Tptr], res)
	gmove(&n1, a)
	a.Op = gc.OINDREG
	a.Type = n.Type
}