示例#1
0
文件: ggen.go 项目: achanda/go
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
	cnt := hi - lo
	if cnt == 0 {
		return p
	}
	if cnt < int64(4*gc.Widthptr) {
		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
			p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+frame+lo+i)
		}
	} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGRT1, 0)
		p.Reg = arm64.REGRT1
		p = gc.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
		gc.Naddr(&p.To, gc.Sysfunc("duffzero"))
		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
	} else {
		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, arm64.REGTMP, 0)
		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
		p.Reg = arm64.REGRT1
		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm64.REGTMP, 0)
		p = gc.Appendpp(p, arm64.AADD, obj.TYPE_REG, arm64.REGTMP, 0, obj.TYPE_REG, arm64.REGRT2, 0)
		p.Reg = arm64.REGRT1
		p = gc.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
		p.Scond = arm64.C_XPRE
		p1 := p
		p = gc.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
		p.Reg = arm64.REGRT2
		p = gc.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
		gc.Patch(p, p1)
	}

	return p
}
示例#2
0
文件: arm64.go 项目: Ericean/go
// ARM64Suffix handles the special suffix for the ARM64.
// It returns a boolean to indicate success; failure means
// cond was unrecognized.
func ARM64Suffix(prog *obj.Prog, cond string) bool {
	if cond == "" {
		return true
	}
	bits, ok := ParseARM64Suffix(cond)
	if !ok {
		return false
	}
	prog.Scond = bits
	return true
}
示例#3
0
文件: arm.go 项目: danny8002/go
// ARMConditionCodes handles the special condition code situation for the ARM.
// It returns a boolean to indicate success; failure means cond was unrecognized.
func ARMConditionCodes(prog *obj.Prog, cond string) bool {
	if cond == "" {
		return true
	}
	bits, ok := ParseARMCondition(cond)
	if !ok {
		return false
	}
	/* hack to make B.NE etc. work: turn it into the corresponding conditional */
	if prog.As == arm.AB {
		prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf])
		bits = (bits &^ 0xf) | arm.C_SCOND_NONE
	}
	prog.Scond = bits
	return true
}
示例#4
0
文件: ggen.go 项目: arnold8/go
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
	var reg int
	var p1 *obj.Prog

	for p := firstp; p != nil; p = p.Link {
		if p.As != obj.ACHECKNIL {
			continue
		}
		if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
			gc.Warnl(int(p.Lineno), "generated nil check")
		}
		if p.From.Type != obj.TYPE_REG {
			gc.Fatalf("invalid nil check %v", p)
		}
		reg = int(p.From.Reg)

		// check is
		//	CMP arg, $0
		//	MOV.EQ arg, 0(arg)
		p1 = gc.Ctxt.NewProg()

		gc.Clearp(p1)
		p1.Link = p.Link
		p.Link = p1
		p1.Lineno = p.Lineno
		p1.Pc = 9999
		p1.As = arm.AMOVW
		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = int16(reg)
		p1.To.Type = obj.TYPE_MEM
		p1.To.Reg = int16(reg)
		p1.To.Offset = 0
		p1.Scond = arm.C_SCOND_EQ
		p.As = arm.ACMP
		p.From.Type = obj.TYPE_CONST
		p.From.Reg = 0
		p.From.Offset = 0
		p.Reg = int16(reg)
	}
}
示例#5
0
文件: ggen.go 项目: arnold8/go
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
	cnt := hi - lo
	if cnt == 0 {
		return p
	}
	if *r0 == 0 {
		p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
		*r0 = 1
	}

	if cnt < int64(4*gc.Widthptr) {
		for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
			p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
		}
	} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
		p.Reg = arm.REGSP
		p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
		f := gc.Sysfunc("duffzero")
		gc.Naddr(&p.To, f)
		gc.Afunclit(&p.To, f)
		p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
	} else {
		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
		p.Reg = arm.REGSP
		p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
		p.Reg = arm.REG_R1
		p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
		p1 := p
		p.Scond |= arm.C_PBIT
		p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
		p.Reg = arm.REG_R2
		p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
		gc.Patch(p, p1)
	}

	return p
}
示例#6
0
文件: lex.go 项目: Ericean/go
func outcode(a, scond int32, g1 *obj.Addr, reg int32, g2 *obj.Addr) {
	var p *obj.Prog
	var pl *obj.Plist

	/* hack to make B.NE etc. work: turn it into the corresponding conditional */
	if a == arm.AB {
		a = int32(bcode[(scond^arm.C_SCOND_XOR)&0xf])
		scond = (scond &^ 0xf) | Always
	}

	if asm.Pass == 1 {
		goto out
	}

	p = new(obj.Prog)
	*p = obj.Prog{}
	p.Ctxt = asm.Ctxt
	p.As = int16(a)
	p.Lineno = stmtline
	p.Scond = uint8(scond)
	p.From = *g1
	p.Reg = int16(reg)
	p.To = *g2
	p.Pc = int64(asm.PC)

	if lastpc == nil {
		pl = obj.Linknewplist(asm.Ctxt)
		pl.Firstpc = p
	} else {
		lastpc.Link = p
	}
	lastpc = p

out:
	if a != obj.AGLOBL && a != obj.ADATA {
		asm.PC++
	}
}
示例#7
0
文件: cgen.go 项目: klueska/go-akaros
/*
 * block copy:
 *	memmove(&ns, &n, w);
 */
func sgen(n *gc.Node, ns *gc.Node, w int64) {
	var res *gc.Node = ns

	if gc.Debug['g'] != 0 {
		fmt.Printf("\nsgen w=%d\n", w)
		gc.Dump("r", n)
		gc.Dump("res", ns)
	}

	if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
		gc.Fatal("sgen UINF")
	}

	if w < 0 {
		gc.Fatal("sgen copy %d", w)
	}

	// If copying .args, that's all the results, so record definition sites
	// for them for the liveness analysis.
	if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
			if l.N.Class == gc.PPARAMOUT {
				gc.Gvardef(l.N)
			}
		}
	}

	// Avoid taking the address for simple enough types.
	//if(componentgen(n, ns))
	//	return;
	if w == 0 {
		// evaluate side effects only.
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], nil)

		agen(res, &dst)
		agen(n, &dst)
		regfree(&dst)
		return
	}

	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op int
	switch align {
	default:
		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))

	case 1:
		op = arm64.AMOVB

	case 2:
		op = arm64.AMOVH

	case 4:
		op = arm64.AMOVW

	case 8:
		op = arm64.AMOVD
	}

	if w%int64(align) != 0 {
		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
	}
	c := int32(w / int64(align))

	// offset on the stack
	osrc := int32(stkof(n))

	odst := int32(stkof(res))
	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
		// osrc and odst both on stack, and at least one is in
		// an unknown position.  Could generate code to test
		// for forward/backward copy, but instead just copy
		// to a temporary location first.
		var tmp gc.Node
		gc.Tempname(&tmp, n.Type)

		sgen(n, &tmp, w)
		sgen(&tmp, res, w)
		return
	}

	if osrc%int32(align) != 0 || odst%int32(align) != 0 {
		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align

	if osrc < odst && int64(odst) < int64(osrc)+w {
		dir = -dir
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		agenr(n, &dst, res) // temporarily use dst
		regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(arm64.AMOVD, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agenr(res, &dst, res)
		agenr(n, &src, nil)
	}

	var tmp gc.Node
	regalloc(&tmp, gc.Types[gc.Tptr], nil)

	// set up end marker
	var nend gc.Node

	// move src and dest to the end of block if necessary
	if dir < 0 {
		if c >= 4 {
			regalloc(&nend, gc.Types[gc.Tptr], nil)
			gins(arm64.AMOVD, &src, &nend)
		}

		p := gins(arm64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w

		p = gins(arm64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w
	} else {
		p := gins(arm64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		p = gins(arm64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		if c >= 4 {
			regalloc(&nend, gc.Types[gc.Tptr], nil)
			p := gins(arm64.AMOVD, &src, &nend)
			p.From.Type = obj.TYPE_ADDR
			p.From.Offset = w
		}
	}

	// move
	// TODO: enable duffcopy for larger copies.
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		p.Scond = arm64.C_XPRE
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)
		p.Scond = arm64.C_XPRE

		p = gcmp(arm64.ACMP, &src, &nend)

		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
		regfree(&nend)
	} else {
		// TODO(austin): Instead of generating ADD $-8,R8; ADD
		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
		// generate the offsets directly and eliminate the
		// ADDs.  That will produce shorter, more
		// pipeline-able code.
		var p *obj.Prog
		for {
			tmp14 := c
			c--
			if tmp14 <= 0 {
				break
			}

			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)
			p.Scond = arm64.C_XPRE

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
			p.Scond = arm64.C_XPRE
		}
	}

	regfree(&dst)
	regfree(&src)
	regfree(&tmp)
}
示例#8
0
文件: ggen.go 项目: arnold8/go
func clearfat(nl *gc.Node) {
	/* clear a fat object */
	if gc.Debug['g'] != 0 {
		gc.Dump("\nclearfat", nl)
	}

	w := uint32(nl.Type.Width)

	// Avoid taking the address for simple enough types.
	if gc.Componentgen(nil, nl) {
		return
	}

	c := w % 4 // bytes
	q := w / 4 // quads

	var r0 gc.Node
	r0.Op = gc.OREGISTER

	r0.Reg = arm.REG_R0
	var r1 gc.Node
	r1.Op = gc.OREGISTER
	r1.Reg = arm.REG_R1
	var dst gc.Node
	gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
	gc.Agen(nl, &dst)
	var nc gc.Node
	gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
	var nz gc.Node
	gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
	gc.Cgen(&nc, &nz)

	if q > 128 {
		var end gc.Node
		gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
		p := gins(arm.AMOVW, &dst, &end)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(q) * 4

		p = gins(arm.AMOVW, &nz, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 4
		p.Scond |= arm.C_PBIT
		pl := p

		p = gins(arm.ACMP, &dst, nil)
		raddr(&end, p)
		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)

		gc.Regfree(&end)
	} else if q >= 4 && !gc.Nacl {
		f := gc.Sysfunc("duffzero")
		p := gins(obj.ADUFFZERO, nil, f)
		gc.Afunclit(&p.To, f)

		// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
		p.To.Offset = 4 * (128 - int64(q))
	} else {
		var p *obj.Prog
		for q > 0 {
			p = gins(arm.AMOVW, &nz, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = 4
			p.Scond |= arm.C_PBIT

			//print("1. %v\n", p);
			q--
		}
	}

	var p *obj.Prog
	for c > 0 {
		p = gins(arm.AMOVB, &nz, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = 1
		p.Scond |= arm.C_PBIT

		//print("2. %v\n", p);
		c--
	}

	gc.Regfree(&dst)
	gc.Regfree(&nz)
}
示例#9
0
文件: ggen.go 项目: arnold8/go
/*
 * generate shift according to op, one of:
 *	res = nl << nr
 *	res = nl >> nr
 */
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
	if nl.Type.Width > 4 {
		gc.Fatalf("cgen_shift %v", nl.Type)
	}

	w := int(nl.Type.Width * 8)

	if op == gc.OLROT {
		v := nr.Int()
		var n1 gc.Node
		gc.Regalloc(&n1, nl.Type, res)
		if w == 32 {
			gc.Cgen(nl, &n1)
			gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
		} else {
			var n2 gc.Node
			gc.Regalloc(&n2, nl.Type, nil)
			gc.Cgen(nl, &n2)
			gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
			gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
			gc.Regfree(&n2)

			// Ensure sign/zero-extended result.
			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
		}

		gmove(&n1, res)
		gc.Regfree(&n1)
		return
	}

	if nr.Op == gc.OLITERAL {
		var n1 gc.Node
		gc.Regalloc(&n1, nl.Type, res)
		gc.Cgen(nl, &n1)
		sc := uint64(nr.Int())
		if sc == 0 {
		} else // nothing to do
		if sc >= uint64(nl.Type.Width*8) {
			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
			} else {
				gins(arm.AEOR, &n1, &n1)
			}
		} else {
			if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
				gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
			} else if op == gc.ORSH {
				gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
			} else {
				gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
			}
		}

		if w < 32 && op == gc.OLSH {
			gins(optoas(gc.OAS, nl.Type), &n1, &n1)
		}
		gmove(&n1, res)
		gc.Regfree(&n1)
		return
	}

	tr := nr.Type
	var t gc.Node
	var n1 gc.Node
	var n2 gc.Node
	var n3 gc.Node
	if tr.Width > 4 {
		var nt gc.Node
		gc.Tempname(&nt, nr.Type)
		if nl.Ullman >= nr.Ullman {
			gc.Regalloc(&n2, nl.Type, res)
			gc.Cgen(nl, &n2)
			gc.Cgen(nr, &nt)
			n1 = nt
		} else {
			gc.Cgen(nr, &nt)
			gc.Regalloc(&n2, nl.Type, res)
			gc.Cgen(nl, &n2)
		}

		var hi gc.Node
		var lo gc.Node
		split64(&nt, &lo, &hi)
		gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
		gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
		gmove(&lo, &n1)
		gmove(&hi, &n3)
		splitclean()
		gins(arm.ATST, &n3, nil)
		gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
		p1 := gins(arm.AMOVW, &t, &n1)
		p1.Scond = arm.C_SCOND_NE
		tr = gc.Types[gc.TUINT32]
		gc.Regfree(&n3)
	} else {
		if nl.Ullman >= nr.Ullman {
			gc.Regalloc(&n2, nl.Type, res)
			gc.Cgen(nl, &n2)
			gc.Regalloc(&n1, nr.Type, nil)
			gc.Cgen(nr, &n1)
		} else {
			gc.Regalloc(&n1, nr.Type, nil)
			gc.Cgen(nr, &n1)
			gc.Regalloc(&n2, nl.Type, res)
			gc.Cgen(nl, &n2)
		}
	}

	// test for shift being 0
	gins(arm.ATST, &n1, nil)

	p3 := gc.Gbranch(arm.ABEQ, nil, -1)

	// test and fix up large shifts
	// TODO: if(!bounded), don't emit some of this.
	gc.Regalloc(&n3, tr, nil)

	gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
	gmove(&t, &n3)
	gins(arm.ACMP, &n1, &n3)
	if op == gc.ORSH {
		var p1 *obj.Prog
		var p2 *obj.Prog
		if gc.Issigned[nl.Type.Etype] {
			p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
		} else {
			p1 = gins(arm.AEOR, &n2, &n2)
			p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
		}

		p1.Scond = arm.C_SCOND_HS
		p2.Scond = arm.C_SCOND_LO
	} else {
		p1 := gins(arm.AEOR, &n2, &n2)
		p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
		p1.Scond = arm.C_SCOND_HS
		p2.Scond = arm.C_SCOND_LO
	}

	gc.Regfree(&n3)

	gc.Patch(p3, gc.Pc)

	// Left-shift of smaller word must be sign/zero-extended.
	if w < 32 && op == gc.OLSH {
		gins(optoas(gc.OAS, nl.Type), &n2, &n2)
	}
	gmove(&n2, res)

	gc.Regfree(&n1)
	gc.Regfree(&n2)
}
示例#10
0
文件: obj5.go 项目: arnold8/go
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
	// MOVW			g_stackguard(g), R1
	p = obj.Appendp(ctxt, p)

	p.As = AMOVW
	p.From.Type = obj.TYPE_MEM
	p.From.Reg = REGG
	p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
	if ctxt.Cursym.Cfunc != 0 {
		p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
	}
	p.To.Type = obj.TYPE_REG
	p.To.Reg = REG_R1

	if framesize <= obj.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REGSP
	} else if framesize <= obj.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = obj.Appendp(ctxt, p)

		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = int64(-framesize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REG_R2
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
		p.Reg = REG_R1

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = obj.StackGuard
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R3
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R3
		p.Reg = REG_R2
		p.Scond = C_SCOND_NE
	}

	// BLS call-to-morestack
	bls := obj.Appendp(ctxt, p)
	bls.As = ABLS
	bls.To.Type = obj.TYPE_BRANCH

	var last *obj.Prog
	for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
	}

	// MOVW	LR, R3
	movw := obj.Appendp(ctxt, last)
	movw.As = AMOVW
	movw.From.Type = obj.TYPE_REG
	movw.From.Reg = REGLINK
	movw.To.Type = obj.TYPE_REG
	movw.To.Reg = REG_R3

	bls.Pcond = movw

	// BL runtime.morestack
	call := obj.Appendp(ctxt, movw)
	call.As = obj.ACALL
	call.To.Type = obj.TYPE_BRANCH
	morestack := "runtime.morestack"
	switch {
	case ctxt.Cursym.Cfunc != 0:
		morestack = "runtime.morestackc"
	case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
		morestack = "runtime.morestack_noctxt"
	}
	call.To.Sym = obj.Linklookup(ctxt, morestack, 0)

	// B start
	b := obj.Appendp(ctxt, call)
	b.As = obj.AJMP
	b.To.Type = obj.TYPE_BRANCH
	b.Pcond = ctxt.Cursym.Text.Link

	return bls
}
示例#11
0
文件: cgen.go 项目: Greentor/go
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op obj.As
	switch align {
	default:
		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)

	case 1:
		op = arm64.AMOVB

	case 2:
		op = arm64.AMOVH

	case 4:
		op = arm64.AMOVW

	case 8:
		op = arm64.AMOVD
	}

	if w%int64(align) != 0 {
		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
	}
	c := int32(w / int64(align))

	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
		gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align

	if osrc < odst && odst < osrc+w {
		dir = -dir
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		gc.Agenr(n, &dst, res) // temporarily use dst
		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(arm64.AMOVD, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		gc.Agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		gc.Agenr(res, &dst, res)
		gc.Agenr(n, &src, nil)
	}

	var tmp gc.Node
	gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)

	// set up end marker
	var nend gc.Node

	// move src and dest to the end of block if necessary
	if dir < 0 {
		if c >= 4 {
			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
			gins(arm64.AMOVD, &src, &nend)
		}

		p := gins(arm64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w

		p = gins(arm64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = w
	} else {
		p := gins(arm64.AADD, nil, &src)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		p = gins(arm64.AADD, nil, &dst)
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(-dir)

		if c >= 4 {
			gc.Regalloc(&nend, gc.Types[gc.Tptr], nil)
			p := gins(arm64.AMOVD, &src, &nend)
			p.From.Type = obj.TYPE_ADDR
			p.From.Offset = w
		}
	}

	// move
	// TODO: enable duffcopy for larger copies.
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		p.Scond = arm64.C_XPRE
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)
		p.Scond = arm64.C_XPRE

		p = gcmp(arm64.ACMP, &src, &nend)

		gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop)
		gc.Regfree(&nend)
	} else {
		// TODO(austin): Instead of generating ADD $-8,R8; ADD
		// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
		// generate the offsets directly and eliminate the
		// ADDs. That will produce shorter, more
		// pipeline-able code.
		var p *obj.Prog
		for ; c > 0; c-- {
			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)
			p.Scond = arm64.C_XPRE

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
			p.Scond = arm64.C_XPRE
		}
	}

	gc.Regfree(&dst)
	gc.Regfree(&src)
	gc.Regfree(&tmp)
}
示例#12
0
文件: cgen.go 项目: klueska/go-akaros
/*
 * block copy:
 *	memmove(&res, &n, w);
 * NB: character copy assumed little endian architecture
 */
func sgen(n *gc.Node, res *gc.Node, w int64) {
	if gc.Debug['g'] != 0 {
		fmt.Printf("\nsgen w=%d\n", w)
		gc.Dump("r", n)
		gc.Dump("res", res)
	}

	if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
		gc.Fatal("sgen UINF")
	}

	if w < 0 || int64(int32(w)) != w {
		gc.Fatal("sgen copy %d", w)
	}

	if n.Type == nil {
		gc.Fatal("sgen: missing type")
	}

	if w == 0 {
		// evaluate side effects only.
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], nil)

		agen(res, &dst)
		agen(n, &dst)
		regfree(&dst)
		return
	}

	// If copying .args, that's all the results, so record definition sites
	// for them for the liveness analysis.
	if res.Op == gc.ONAME && res.Sym.Name == ".args" {
		for l := gc.Curfn.Dcl; l != nil; l = l.Next {
			if l.N.Class == gc.PPARAMOUT {
				gc.Gvardef(l.N)
			}
		}
	}

	// Avoid taking the address for simple enough types.
	if componentgen(n, res) {
		return
	}

	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op int
	switch align {
	default:
		gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))

	case 1:
		op = arm.AMOVB

	case 2:
		op = arm.AMOVH

	case 4:
		op = arm.AMOVW
	}

	if w%int64(align) != 0 {
		gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
	}
	c := int32(w / int64(align))

	// offset on the stack
	osrc := stkof(n)

	odst := stkof(res)
	if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
		// osrc and odst both on stack, and at least one is in
		// an unknown position.  Could generate code to test
		// for forward/backward copy, but instead just copy
		// to a temporary location first.
		var tmp gc.Node
		gc.Tempname(&tmp, n.Type)

		sgen(n, &tmp, w)
		sgen(&tmp, res, w)
		return
	}

	if osrc%int32(align) != 0 || odst%int32(align) != 0 {
		gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align

	if osrc < odst && int64(odst) < int64(osrc)+w {
		dir = -dir
	}

	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
		var r0 gc.Node
		r0.Op = gc.OREGISTER
		r0.Val.U.Reg = REGALLOC_R0
		var r1 gc.Node
		r1.Op = gc.OREGISTER
		r1.Val.U.Reg = REGALLOC_R0 + 1
		var r2 gc.Node
		r2.Op = gc.OREGISTER
		r2.Val.U.Reg = REGALLOC_R0 + 2

		var src gc.Node
		regalloc(&src, gc.Types[gc.Tptr], &r1)
		var dst gc.Node
		regalloc(&dst, gc.Types[gc.Tptr], &r2)
		if n.Ullman >= res.Ullman {
			// eval n first
			agen(n, &src)

			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			agen(res, &dst)
		} else {
			// eval res first
			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			agen(res, &dst)
			agen(n, &src)
		}

		var tmp gc.Node
		regalloc(&tmp, gc.Types[gc.Tptr], &r0)
		f := gc.Sysfunc("duffcopy")
		p := gins(obj.ADUFFCOPY, nil, f)
		gc.Afunclit(&p.To, f)

		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
		p.To.Offset = 8 * (128 - int64(c))

		regfree(&tmp)
		regfree(&src)
		regfree(&dst)
		return
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		agenr(n, &dst, res) // temporarily use dst
		regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(arm.AMOVW, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		agenr(res, &dst, res)
		agenr(n, &src, nil)
	}

	var tmp gc.Node
	regalloc(&tmp, gc.Types[gc.TUINT32], nil)

	// set up end marker
	var nend gc.Node

	if c >= 4 {
		regalloc(&nend, gc.Types[gc.TUINT32], nil)

		p := gins(arm.AMOVW, &src, &nend)
		p.From.Type = obj.TYPE_ADDR
		if dir < 0 {
			p.From.Offset = int64(dir)
		} else {
			p.From.Offset = w
		}
	}

	// move src and dest to the end of block if necessary
	if dir < 0 {
		p := gins(arm.AMOVW, &src, &src)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)

		p = gins(arm.AMOVW, &dst, &dst)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)
	}

	// move
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		p.Scond |= arm.C_PBIT
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)
		p.Scond |= arm.C_PBIT

		p = gins(arm.ACMP, &src, nil)
		raddr(&nend, p)

		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
		regfree(&nend)
	} else {
		var p *obj.Prog
		for {
			tmp14 := c
			c--
			if tmp14 <= 0 {
				break
			}
			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)
			p.Scond |= arm.C_PBIT

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
			p.Scond |= arm.C_PBIT
		}
	}

	regfree(&dst)
	regfree(&src)
	regfree(&tmp)
}
示例#13
0
文件: obj5.go 项目: achanda/go
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32) *obj.Prog {
	// MOVW			g_stackguard(g), R1
	p = obj.Appendp(ctxt, p)

	p.As = AMOVW
	p.From.Type = obj.TYPE_MEM
	p.From.Reg = REGG
	p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0
	if ctxt.Cursym.CFunc() {
		p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1
	}
	p.To.Type = obj.TYPE_REG
	p.To.Reg = REG_R1

	if framesize <= obj.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REGSP
	} else if framesize <= obj.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = obj.Appendp(ctxt, p)

		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = int64(-framesize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REG_R2
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
		p.Reg = REG_R1

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = obj.StackGuard
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R3
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R3
		p.Reg = REG_R2
		p.Scond = C_SCOND_NE
	}

	// BLS call-to-morestack
	bls := obj.Appendp(ctxt, p)
	bls.As = ABLS
	bls.To.Type = obj.TYPE_BRANCH

	var last *obj.Prog
	for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
	}

	// Now we are at the end of the function, but logically
	// we are still in function prologue. We need to fix the
	// SP data and PCDATA.
	spfix := obj.Appendp(ctxt, last)
	spfix.As = obj.ANOP
	spfix.Spadj = -framesize

	pcdata := obj.Appendp(ctxt, spfix)
	pcdata.Lineno = ctxt.Cursym.Text.Lineno
	pcdata.Mode = ctxt.Cursym.Text.Mode
	pcdata.As = obj.APCDATA
	pcdata.From.Type = obj.TYPE_CONST
	pcdata.From.Offset = obj.PCDATA_StackMapIndex
	pcdata.To.Type = obj.TYPE_CONST
	pcdata.To.Offset = -1 // pcdata starts at -1 at function entry

	// MOVW	LR, R3
	movw := obj.Appendp(ctxt, pcdata)
	movw.As = AMOVW
	movw.From.Type = obj.TYPE_REG
	movw.From.Reg = REGLINK
	movw.To.Type = obj.TYPE_REG
	movw.To.Reg = REG_R3

	bls.Pcond = movw

	// BL runtime.morestack
	call := obj.Appendp(ctxt, movw)
	call.As = obj.ACALL
	call.To.Type = obj.TYPE_BRANCH
	morestack := "runtime.morestack"
	switch {
	case ctxt.Cursym.CFunc():
		morestack = "runtime.morestackc"
	case ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0:
		morestack = "runtime.morestack_noctxt"
	}
	call.To.Sym = obj.Linklookup(ctxt, morestack, 0)

	// B start
	b := obj.Appendp(ctxt, call)
	b.As = obj.AJMP
	b.To.Type = obj.TYPE_BRANCH
	b.Pcond = ctxt.Cursym.Text.Link
	b.Spadj = +framesize

	return bls
}
示例#14
0
文件: obj5.go 项目: klueska/go-akaros
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
	// MOVW			g_stackguard(g), R1
	p = obj.Appendp(ctxt, p)

	p.As = AMOVW
	p.From.Type = obj.TYPE_MEM
	p.From.Reg = REGG
	p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
	if ctxt.Cursym.Cfunc != 0 {
		p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
	}
	p.To.Type = obj.TYPE_REG
	p.To.Reg = REG_R1

	if framesize <= obj.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REGSP
	} else if framesize <= obj.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = obj.Appendp(ctxt, p)

		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = int64(-framesize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.Reg = REG_R2
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = obj.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
		p.Reg = REG_R1

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Reg = REGSP
		p.From.Offset = obj.StackGuard
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R1
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R2
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_R3
		p.Scond = C_SCOND_NE

		p = obj.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_R3
		p.Reg = REG_R2
		p.Scond = C_SCOND_NE
	}

	// MOVW.LS	R14, R3
	p = obj.Appendp(ctxt, p)

	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Type = obj.TYPE_REG
	p.From.Reg = REGLINK
	p.To.Type = obj.TYPE_REG
	p.To.Reg = REG_R3

	// BL.LS		runtime.morestack(SB) // modifies LR, returns with LO still asserted
	p = obj.Appendp(ctxt, p)

	p.As = ABL
	p.Scond = C_SCOND_LS
	p.To.Type = obj.TYPE_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {
		p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
	}

	// BLS	start
	p = obj.Appendp(ctxt, p)

	p.As = ABLS
	p.To.Type = obj.TYPE_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	return p
}
示例#15
0
文件: obj5.go 项目: klueska/go-akaros
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
	autosize := int32(0)

	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	softfloat(ctxt, cursym)

	p := cursym.Text
	autoffset := int32(p.To.Offset)
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Locals = autoffset
	cursym.Args = p.To.U.Argsize

	if ctxt.Debugzerostack != 0 {
		if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
			// MOVW $4(R13), R1
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type = obj.TYPE_ADDR
			p.From.Reg = REG_R13
			p.From.Offset = 4
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REG_R1

			// MOVW $n(R13), R2
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type = obj.TYPE_ADDR
			p.From.Reg = REG_R13
			p.From.Offset = 4 + int64(autoffset)
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REG_R2

			// MOVW $0, R3
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type = obj.TYPE_CONST
			p.From.Offset = 0
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REG_R3

			// L:
			//	MOVW.nil R3, 0(R1) +4
			//	CMP R1, R2
			//	BNE L
			pl := obj.Appendp(ctxt, p)
			p := pl

			p.As = AMOVW
			p.From.Type = obj.TYPE_REG
			p.From.Reg = REG_R3
			p.To.Type = obj.TYPE_MEM
			p.To.Reg = REG_R1
			p.To.Offset = 4
			p.Scond |= C_PBIT

			p = obj.Appendp(ctxt, p)
			p.As = ACMP
			p.From.Type = obj.TYPE_REG
			p.From.Reg = REG_R1
			p.Reg = REG_R2

			p = obj.Appendp(ctxt, p)
			p.As = ABNE
			p.To.Type = obj.TYPE_BRANCH
			p.Pcond = pl
		}
	}

	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 * expand BECOME pseudo
	 */
	var q1 *obj.Prog
	var q *obj.Prog
	for p := cursym.Text; p != nil; p = p.Link {
		switch p.As {
		case ACASE:
			if ctxt.Flag_shared != 0 {
				linkcase(p)
			}

		case obj.ATEXT:
			p.Mark |= LEAF

		case obj.ARET:
			break

		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			q = p
			if ctxt.Sym_div == nil {
				initdiv(ctxt)
			}
			cursym.Text.Mark &^= LEAF
			continue

		case obj.ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			if q1 != nil {
				q1.Mark |= p.Mark
			}
			continue

		case ABL,
			ABX,
			obj.ADUFFZERO,
			obj.ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough

		case ABCASE,
			AB,
			ABEQ,
			ABNE,
			ABCS,
			ABHS,
			ABCC,
			ABLO,
			ABMI,
			ABPL,
			ABVS,
			ABVC,
			ABHI,
			ABLS,
			ABGE,
			ABLT,
			ABGT,
			ABLE:
			q1 = p.Pcond
			if q1 != nil {
				for q1.As == obj.ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}
			}
		}

		q = p
	}

	var o int
	var p1 *obj.Prog
	var p2 *obj.Prog
	var q2 *obj.Prog
	for p := cursym.Text; p != nil; p = p.Link {
		o = int(p.As)
		switch o {
		case obj.ATEXT:
			autosize = int32(p.To.Offset + 4)
			if autosize <= 4 {
				if cursym.Text.Mark&LEAF != 0 {
					p.To.Offset = -4
					autosize = 0
				}
			}

			if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
				if ctxt.Debugvlog != 0 {
					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
					obj.Bflush(ctxt.Bso)
				}

				cursym.Text.Mark |= LEAF
			}

			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = 1
				if autosize == 0 {
					break
				}
			}

			if p.From3.Offset&obj.NOSPLIT == 0 {
				p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
			}

			// MOVW.W		R14,$-autosize(SP)
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.Scond |= C_WBIT
			p.From.Type = obj.TYPE_REG
			p.From.Reg = REGLINK
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(-autosize)
			p.To.Reg = REGSP
			p.Spadj = autosize

			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
				//
				//	MOVW g_panic(g), R1
				//	CMP $0, R1
				//	B.EQ end
				//	MOVW panic_argp(R1), R2
				//	ADD $(autosize+4), R13, R3
				//	CMP R2, R3
				//	B.NE end
				//	ADD $4, R13, R4
				//	MOVW R4, panic_argp(R1)
				// end:
				//	NOP
				//
				// The NOP is needed to give the jumps somewhere to land.
				// It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.

				p = obj.Appendp(ctxt, p)

				p.As = AMOVW
				p.From.Type = obj.TYPE_MEM
				p.From.Reg = REGG
				p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REG_R1

				p = obj.Appendp(ctxt, p)
				p.As = ACMP
				p.From.Type = obj.TYPE_CONST
				p.From.Offset = 0
				p.Reg = REG_R1

				p = obj.Appendp(ctxt, p)
				p.As = ABEQ
				p.To.Type = obj.TYPE_BRANCH
				p1 = p

				p = obj.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Type = obj.TYPE_MEM
				p.From.Reg = REG_R1
				p.From.Offset = 0 // Panic.argp
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REG_R2

				p = obj.Appendp(ctxt, p)
				p.As = AADD
				p.From.Type = obj.TYPE_CONST
				p.From.Offset = int64(autosize) + 4
				p.Reg = REG_R13
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REG_R3

				p = obj.Appendp(ctxt, p)
				p.As = ACMP
				p.From.Type = obj.TYPE_REG
				p.From.Reg = REG_R2
				p.Reg = REG_R3

				p = obj.Appendp(ctxt, p)
				p.As = ABNE
				p.To.Type = obj.TYPE_BRANCH
				p2 = p

				p = obj.Appendp(ctxt, p)
				p.As = AADD
				p.From.Type = obj.TYPE_CONST
				p.From.Offset = 4
				p.Reg = REG_R13
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REG_R4

				p = obj.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Type = obj.TYPE_REG
				p.From.Reg = REG_R4
				p.To.Type = obj.TYPE_MEM
				p.To.Reg = REG_R1
				p.To.Offset = 0 // Panic.argp

				p = obj.Appendp(ctxt, p)

				p.As = obj.ANOP
				p1.Pcond = p
				p2.Pcond = p
			}

		case obj.ARET:
			obj.Nocache(p)
			if cursym.Text.Mark&LEAF != 0 {
				if autosize == 0 {
					p.As = AB
					p.From = obj.Addr{}
					if p.To.Sym != nil { // retjmp
						p.To.Type = obj.TYPE_BRANCH
					} else {
						p.To.Type = obj.TYPE_MEM
						p.To.Offset = 0
						p.To.Reg = REGLINK
					}

					break
				}
			}

			p.As = AMOVW
			p.Scond |= C_PBIT
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(autosize)
			p.From.Reg = REGSP
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REGPC

			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so no spadj.
			if p.To.Sym != nil { // retjmp
				p.To.Reg = REGLINK
				q2 = obj.Appendp(ctxt, p)
				q2.As = AB
				q2.To.Type = obj.TYPE_BRANCH
				q2.To.Sym = p.To.Sym
				p.To.Sym = nil
				p = q2
			}

		case AADD:
			if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
				p.Spadj = int32(-p.From.Offset)
			}

		case ASUB:
			if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
				p.Spadj = int32(p.From.Offset)
			}

		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			if ctxt.Debugdivmod != 0 {
				break
			}
			if p.From.Type != obj.TYPE_REG {
				break
			}
			if p.To.Type != obj.TYPE_REG {
				break
			}
			q1 = p

			/* MOV a,4(SP) */
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type = obj.TYPE_REG
			p.From.Reg = q1.From.Reg
			p.To.Type = obj.TYPE_MEM
			p.To.Reg = REGSP
			p.To.Offset = 4

			/* MOV b,REGTMP */
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type = obj.TYPE_REG
			p.From.Reg = q1.Reg
			if q1.Reg == 0 {
				p.From.Reg = q1.To.Reg
			}
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REGTMP
			p.To.Offset = 0

			/* CALL appropriate */
			p = obj.Appendp(ctxt, p)

			p.As = ABL
			p.Lineno = q1.Lineno
			p.To.Type = obj.TYPE_BRANCH
			switch o {
			case ADIV:
				p.To.Sym = ctxt.Sym_div

			case ADIVU:
				p.To.Sym = ctxt.Sym_divu

			case AMOD:
				p.To.Sym = ctxt.Sym_mod

			case AMODU:
				p.To.Sym = ctxt.Sym_modu
			}

			/* MOV REGTMP, b */
			p = obj.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type = obj.TYPE_REG
			p.From.Reg = REGTMP
			p.From.Offset = 0
			p.To.Type = obj.TYPE_REG
			p.To.Reg = q1.To.Reg

			/* ADD $8,SP */
			p = obj.Appendp(ctxt, p)

			p.As = AADD
			p.Lineno = q1.Lineno
			p.From.Type = obj.TYPE_CONST
			p.From.Reg = 0
			p.From.Offset = 8
			p.Reg = 0
			p.To.Type = obj.TYPE_REG
			p.To.Reg = REGSP
			p.Spadj = -8

			/* Keep saved LR at 0(SP) after SP change. */
			/* MOVW 0(SP), REGTMP; MOVW REGTMP, -8!(SP) */
			/* TODO: Remove SP adjustments; see issue 6699. */
			q1.As = AMOVW

			q1.From.Type = obj.TYPE_MEM
			q1.From.Reg = REGSP
			q1.From.Offset = 0
			q1.Reg = 0
			q1.To.Type = obj.TYPE_REG
			q1.To.Reg = REGTMP

			/* SUB $8,SP */
			q1 = obj.Appendp(ctxt, q1)

			q1.As = AMOVW
			q1.From.Type = obj.TYPE_REG
			q1.From.Reg = REGTMP
			q1.Reg = 0
			q1.To.Type = obj.TYPE_MEM
			q1.To.Reg = REGSP
			q1.To.Offset = -8
			q1.Scond |= C_WBIT
			q1.Spadj = 8

		case AMOVW:
			if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP {
				p.Spadj = int32(-p.To.Offset)
			}
			if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC {
				p.Spadj = int32(-p.From.Offset)
			}
			if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
				p.Spadj = int32(-p.From.Offset)
			}
		}
	}
}
示例#16
0
/*
 * attempt to generate 64-bit
 *	res = n
 * return 1 on success, 0 if op not handled.
 */
func cgen64(n *gc.Node, res *gc.Node) {
	if res.Op != gc.OINDREG && res.Op != gc.ONAME {
		gc.Dump("n", n)
		gc.Dump("res", res)
		gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
	}

	l := n.Left
	var t1 gc.Node
	if !l.Addable {
		gc.Tempname(&t1, l.Type)
		gc.Cgen(l, &t1)
		l = &t1
	}

	var hi1 gc.Node
	var lo1 gc.Node
	split64(l, &lo1, &hi1)
	switch n.Op {
	default:
		gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))

	case gc.OMINUS:
		var lo2 gc.Node
		var hi2 gc.Node
		split64(res, &lo2, &hi2)

		gc.Regalloc(&t1, lo1.Type, nil)
		var al gc.Node
		gc.Regalloc(&al, lo1.Type, nil)
		var ah gc.Node
		gc.Regalloc(&ah, hi1.Type, nil)

		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)

		gmove(ncon(0), &t1)
		p1 := gins(arm.ASUB, &al, &t1)
		p1.Scond |= arm.C_SBIT
		gins(arm.AMOVW, &t1, &lo2)

		gmove(ncon(0), &t1)
		gins(arm.ASBC, &ah, &t1)
		gins(arm.AMOVW, &t1, &hi2)

		gc.Regfree(&t1)
		gc.Regfree(&al)
		gc.Regfree(&ah)
		splitclean()
		splitclean()
		return

	case gc.OCOM:
		gc.Regalloc(&t1, lo1.Type, nil)
		gmove(ncon(^uint32(0)), &t1)

		var lo2 gc.Node
		var hi2 gc.Node
		split64(res, &lo2, &hi2)
		var n1 gc.Node
		gc.Regalloc(&n1, lo1.Type, nil)

		gins(arm.AMOVW, &lo1, &n1)
		gins(arm.AEOR, &t1, &n1)
		gins(arm.AMOVW, &n1, &lo2)

		gins(arm.AMOVW, &hi1, &n1)
		gins(arm.AEOR, &t1, &n1)
		gins(arm.AMOVW, &n1, &hi2)

		gc.Regfree(&t1)
		gc.Regfree(&n1)
		splitclean()
		splitclean()
		return

		// binary operators.
	// common setup below.
	case gc.OADD,
		gc.OSUB,
		gc.OMUL,
		gc.OLSH,
		gc.ORSH,
		gc.OAND,
		gc.OOR,
		gc.OXOR,
		gc.OLROT:
		break
	}

	// setup for binary operators
	r := n.Right

	if r != nil && !r.Addable {
		var t2 gc.Node
		gc.Tempname(&t2, r.Type)
		gc.Cgen(r, &t2)
		r = &t2
	}

	var hi2 gc.Node
	var lo2 gc.Node
	if gc.Is64(r.Type) {
		split64(r, &lo2, &hi2)
	}

	var al gc.Node
	gc.Regalloc(&al, lo1.Type, nil)
	var ah gc.Node
	gc.Regalloc(&ah, hi1.Type, nil)

	// Do op.  Leave result in ah:al.
	switch n.Op {
	default:
		gc.Fatal("cgen64: not implemented: %v\n", n)

		// TODO: Constants
	case gc.OADD:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi2, &bh)
		gins(arm.AMOVW, &lo2, &bl)
		p1 := gins(arm.AADD, &bl, &al)
		p1.Scond |= arm.C_SBIT
		gins(arm.AADC, &bh, &ah)
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO: Constants.
	case gc.OSUB:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo2, &bl)
		gins(arm.AMOVW, &hi2, &bh)
		p1 := gins(arm.ASUB, &bl, &al)
		p1.Scond |= arm.C_SBIT
		gins(arm.ASBC, &bh, &ah)
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO(kaib): this can be done with 4 regs and does not need 6
	case gc.OMUL:
		var bl gc.Node
		gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)

		var bh gc.Node
		gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
		var cl gc.Node
		gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
		var ch gc.Node
		gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)

		// load args into bh:bl and bh:bl.
		gins(arm.AMOVW, &hi1, &bh)

		gins(arm.AMOVW, &lo1, &bl)
		gins(arm.AMOVW, &hi2, &ch)
		gins(arm.AMOVW, &lo2, &cl)

		// bl * cl -> ah al
		p1 := gins(arm.AMULLU, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bl.Reg
		p1.Reg = cl.Reg
		p1.To.Type = obj.TYPE_REGREG
		p1.To.Reg = ah.Reg
		p1.To.Offset = int64(al.Reg)

		//print("%v\n", p1);

		// bl * ch + ah -> ah
		p1 = gins(arm.AMULA, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bl.Reg
		p1.Reg = ch.Reg
		p1.To.Type = obj.TYPE_REGREG2
		p1.To.Reg = ah.Reg
		p1.To.Offset = int64(ah.Reg)

		//print("%v\n", p1);

		// bh * cl + ah -> ah
		p1 = gins(arm.AMULA, nil, nil)

		p1.From.Type = obj.TYPE_REG
		p1.From.Reg = bh.Reg
		p1.Reg = cl.Reg
		p1.To.Type = obj.TYPE_REGREG2
		p1.To.Reg = ah.Reg
		p1.To.Offset = int64(ah.Reg)

		//print("%v\n", p1);

		gc.Regfree(&bh)

		gc.Regfree(&bl)
		gc.Regfree(&ch)
		gc.Regfree(&cl)

		// We only rotate by a constant c in [0,64).
	// if c >= 32:
	//	lo, hi = hi, lo
	//	c -= 32
	// if c == 0:
	//	no-op
	// else:
	//	t = hi
	//	shld hi:lo, c
	//	shld lo:t, c
	case gc.OLROT:
		v := uint64(r.Int())

		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		if v >= 32 {
			// reverse during load to do the first 32 bits of rotate
			v -= 32

			gins(arm.AMOVW, &hi1, &bl)
			gins(arm.AMOVW, &lo1, &bh)
		} else {
			gins(arm.AMOVW, &hi1, &bh)
			gins(arm.AMOVW, &lo1, &bl)
		}

		if v == 0 {
			gins(arm.AMOVW, &bh, &ah)
			gins(arm.AMOVW, &bl, &al)
		} else {
			// rotate by 1 <= v <= 31
			//	MOVW	bl<<v, al
			//	MOVW	bh<<v, ah
			//	OR		bl>>(32-v), ah
			//	OR		bh>>(32-v), al
			gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)

			gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
			gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
			gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
		}

		gc.Regfree(&bl)
		gc.Regfree(&bh)

	case gc.OLSH:
		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		gins(arm.AMOVW, &hi1, &bh)
		gins(arm.AMOVW, &lo1, &bl)

		var p6 *obj.Prog
		var s gc.Node
		var n1 gc.Node
		var creg gc.Node
		var p1 *obj.Prog
		var p2 *obj.Prog
		var p3 *obj.Prog
		var p4 *obj.Prog
		var p5 *obj.Prog
		if r.Op == gc.OLITERAL {
			v := uint64(r.Int())
			if v >= 64 {
				// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
				// here and below (verify it optimizes to EOR)
				gins(arm.AEOR, &al, &al)

				gins(arm.AEOR, &ah, &ah)
			} else if v > 32 {
				gins(arm.AEOR, &al, &al)

				//	MOVW	bl<<(v-32), ah
				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
			} else if v == 32 {
				gins(arm.AEOR, &al, &al)
				gins(arm.AMOVW, &bl, &ah)
			} else if v > 0 {
				//	MOVW	bl<<v, al
				gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)

				//	MOVW	bh<<v, ah
				gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)

				//	OR		bl>>(32-v), ah
				gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
			} else {
				gins(arm.AMOVW, &bl, &al)
				gins(arm.AMOVW, &bh, &ah)
			}

			goto olsh_break
		}

		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
		if gc.Is64(r.Type) {
			// shift is >= 1<<32
			var cl gc.Node
			var ch gc.Node
			split64(r, &cl, &ch)

			gmove(&ch, &s)
			gins(arm.ATST, &s, nil)
			p6 = gc.Gbranch(arm.ABNE, nil, 0)
			gmove(&cl, &s)
			splitclean()
		} else {
			gmove(r, &s)
			p6 = nil
		}

		gins(arm.ATST, &s, nil)

		// shift == 0
		p1 = gins(arm.AMOVW, &bl, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bh, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p2 = gc.Gbranch(arm.ABEQ, nil, 0)

		// shift is < 32
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		bl<<s, al
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW.LO		bh<<s, ah
		p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		s, creg
		p1 = gins(arm.ASUB, &s, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	OR.LO		bl>>creg, ah
		p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)

		p1.Scond = arm.C_SCOND_LO

		//	BLO	end
		p3 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift == 32
		p1 = gins(arm.AEOR, &al, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bl, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p4 = gc.Gbranch(arm.ABEQ, nil, 0)

		// shift is < 64
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	EOR.LO	al, al
		p1 = gins(arm.AEOR, &al, &al)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW.LO		creg>>1, creg
		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		creg, s
		p1 = gins(arm.ASUB, &creg, &s)

		p1.Scond = arm.C_SCOND_LO

		//	MOVW	bl<<s, ah
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)

		p1.Scond = arm.C_SCOND_LO

		p5 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift >= 64
		if p6 != nil {
			gc.Patch(p6, gc.Pc)
		}
		gins(arm.AEOR, &al, &al)
		gins(arm.AEOR, &ah, &ah)

		gc.Patch(p2, gc.Pc)
		gc.Patch(p3, gc.Pc)
		gc.Patch(p4, gc.Pc)
		gc.Patch(p5, gc.Pc)
		gc.Regfree(&s)
		gc.Regfree(&creg)

	olsh_break:
		gc.Regfree(&bl)
		gc.Regfree(&bh)

	case gc.ORSH:
		var bl gc.Node
		gc.Regalloc(&bl, lo1.Type, nil)
		var bh gc.Node
		gc.Regalloc(&bh, hi1.Type, nil)
		gins(arm.AMOVW, &hi1, &bh)
		gins(arm.AMOVW, &lo1, &bl)

		var p4 *obj.Prog
		var p5 *obj.Prog
		var n1 gc.Node
		var p6 *obj.Prog
		var s gc.Node
		var p1 *obj.Prog
		var p2 *obj.Prog
		var creg gc.Node
		var p3 *obj.Prog
		if r.Op == gc.OLITERAL {
			v := uint64(r.Int())
			if v >= 64 {
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->31, al
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)

					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					gins(arm.AEOR, &al, &al)
					gins(arm.AEOR, &ah, &ah)
				}
			} else if v > 32 {
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->(v-32), al
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)

					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					//	MOVW	bh>>(v-32), al
					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)

					gins(arm.AEOR, &ah, &ah)
				}
			} else if v == 32 {
				gins(arm.AMOVW, &bh, &al)
				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->31, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
				} else {
					gins(arm.AEOR, &ah, &ah)
				}
			} else if v > 0 {
				//	MOVW	bl>>v, al
				gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)

				//	OR		bh<<(32-v), al
				gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)

				if bh.Type.Etype == gc.TINT32 {
					//	MOVW	bh->v, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
				} else {
					//	MOVW	bh>>v, ah
					gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
				}
			} else {
				gins(arm.AMOVW, &bl, &al)
				gins(arm.AMOVW, &bh, &ah)
			}

			goto orsh_break
		}

		gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
		gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
		if gc.Is64(r.Type) {
			// shift is >= 1<<32
			var ch gc.Node
			var cl gc.Node
			split64(r, &cl, &ch)

			gmove(&ch, &s)
			gins(arm.ATST, &s, nil)
			var p1 *obj.Prog
			if bh.Type.Etype == gc.TINT32 {
				p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
			} else {
				p1 = gins(arm.AEOR, &ah, &ah)
			}
			p1.Scond = arm.C_SCOND_NE
			p6 = gc.Gbranch(arm.ABNE, nil, 0)
			gmove(&cl, &s)
			splitclean()
		} else {
			gmove(r, &s)
			p6 = nil
		}

		gins(arm.ATST, &s, nil)

		// shift == 0
		p1 = gins(arm.AMOVW, &bl, &al)

		p1.Scond = arm.C_SCOND_EQ
		p1 = gins(arm.AMOVW, &bh, &ah)
		p1.Scond = arm.C_SCOND_EQ
		p2 = gc.Gbranch(arm.ABEQ, nil, 0)

		// check if shift is < 32
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		bl>>s, al
		p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		s,creg
		p1 = gins(arm.ASUB, &s, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	OR.LO		bh<<(32-s), al
		p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)

		p1.Scond = arm.C_SCOND_LO

		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->s, ah
			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
		} else {
			//	MOVW	bh>>s, ah
			p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
		}

		p1.Scond = arm.C_SCOND_LO

		//	BLO	end
		p3 = gc.Gbranch(arm.ABLO, nil, 0)

		// shift == 32
		p1 = gins(arm.AMOVW, &bh, &al)

		p1.Scond = arm.C_SCOND_EQ
		if bh.Type.Etype == gc.TINT32 {
			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
		} else {
			gins(arm.AEOR, &ah, &ah)
		}
		p4 = gc.Gbranch(arm.ABEQ, nil, 0)

		// check if shift is < 64
		gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)

		gmove(&n1, &creg)
		gins(arm.ACMP, &s, &creg)

		//	MOVW.LO		creg>>1, creg
		p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)

		p1.Scond = arm.C_SCOND_LO

		//	SUB.LO		creg, s
		p1 = gins(arm.ASUB, &creg, &s)

		p1.Scond = arm.C_SCOND_LO

		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->(s-32), al
			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)

			p1.Scond = arm.C_SCOND_LO
		} else {
			//	MOVW	bh>>(v-32), al
			p1 := gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)

			p1.Scond = arm.C_SCOND_LO
		}

		//	BLO	end
		p5 = gc.Gbranch(arm.ABLO, nil, 0)

		// s >= 64
		if p6 != nil {
			gc.Patch(p6, gc.Pc)
		}
		if bh.Type.Etype == gc.TINT32 {
			//	MOVW	bh->31, al
			gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
		} else {
			gins(arm.AEOR, &al, &al)
		}

		gc.Patch(p2, gc.Pc)
		gc.Patch(p3, gc.Pc)
		gc.Patch(p4, gc.Pc)
		gc.Patch(p5, gc.Pc)
		gc.Regfree(&s)
		gc.Regfree(&creg)

	orsh_break:
		gc.Regfree(&bl)
		gc.Regfree(&bh)

		// TODO(kaib): literal optimizations
	// make constant the right side (it usually is anyway).
	//		if(lo1.op == OLITERAL) {
	//			nswap(&lo1, &lo2);
	//			nswap(&hi1, &hi2);
	//		}
	//		if(lo2.op == OLITERAL) {
	//			// special cases for constants.
	//			lv = mpgetfix(lo2.val.u.xval);
	//			hv = mpgetfix(hi2.val.u.xval);
	//			splitclean();	// right side
	//			split64(res, &lo2, &hi2);
	//			switch(n->op) {
	//			case OXOR:
	//				gmove(&lo1, &lo2);
	//				gmove(&hi1, &hi2);
	//				switch(lv) {
	//				case 0:
	//					break;
	//				case 0xffffffffu:
	//					gins(ANOTL, N, &lo2);
	//					break;
	//				default:
	//					gins(AXORL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					break;
	//				case 0xffffffffu:
	//					gins(ANOTL, N, &hi2);
	//					break;
	//				default:
	//					gins(AXORL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;

	//			case OAND:
	//				switch(lv) {
	//				case 0:
	//					gins(AMOVL, ncon(0), &lo2);
	//					break;
	//				default:
	//					gmove(&lo1, &lo2);
	//					if(lv != 0xffffffffu)
	//						gins(AANDL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					gins(AMOVL, ncon(0), &hi2);
	//					break;
	//				default:
	//					gmove(&hi1, &hi2);
	//					if(hv != 0xffffffffu)
	//						gins(AANDL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;

	//			case OOR:
	//				switch(lv) {
	//				case 0:
	//					gmove(&lo1, &lo2);
	//					break;
	//				case 0xffffffffu:
	//					gins(AMOVL, ncon(0xffffffffu), &lo2);
	//					break;
	//				default:
	//					gmove(&lo1, &lo2);
	//					gins(AORL, ncon(lv), &lo2);
	//					break;
	//				}
	//				switch(hv) {
	//				case 0:
	//					gmove(&hi1, &hi2);
	//					break;
	//				case 0xffffffffu:
	//					gins(AMOVL, ncon(0xffffffffu), &hi2);
	//					break;
	//				default:
	//					gmove(&hi1, &hi2);
	//					gins(AORL, ncon(hv), &hi2);
	//					break;
	//				}
	//				break;
	//			}
	//			splitclean();
	//			splitclean();
	//			goto out;
	//		}
	case gc.OXOR,
		gc.OAND,
		gc.OOR:
		var n1 gc.Node
		gc.Regalloc(&n1, lo1.Type, nil)

		gins(arm.AMOVW, &lo1, &al)
		gins(arm.AMOVW, &hi1, &ah)
		gins(arm.AMOVW, &lo2, &n1)
		gins(optoas(int(n.Op), lo1.Type), &n1, &al)
		gins(arm.AMOVW, &hi2, &n1)
		gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
		gc.Regfree(&n1)
	}

	if gc.Is64(r.Type) {
		splitclean()
	}
	splitclean()

	split64(res, &lo1, &hi1)
	gins(arm.AMOVW, &al, &lo1)
	gins(arm.AMOVW, &ah, &hi1)
	splitclean()

	//out:
	gc.Regfree(&al)

	gc.Regfree(&ah)
}
示例#17
0
文件: obj7.go 项目: klueska/go-akaros
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p := cursym.Text
	textstksiz := p.To.Offset
	aoffset := int32(textstksiz)

	cursym.Args = p.To.U.Argsize
	cursym.Locals = int32(textstksiz)

	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 */
	obj.Bflush(ctxt.Bso)
	q := (*obj.Prog)(nil)
	var q1 *obj.Prog
	for p := cursym.Text; p != nil; p = p.Link {
		switch p.As {
		case obj.ATEXT:
			p.Mark |= LEAF

		case obj.ARET:
			break

		case obj.ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			q1.Mark |= p.Mark
			continue

		case ABL,
			obj.ADUFFZERO,
			obj.ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough

		case ACBNZ,
			ACBZ,
			ACBNZW,
			ACBZW,
			ATBZ,
			ATBNZ,
			ABCASE,
			AB,
			ABEQ,
			ABNE,
			ABCS,
			ABHS,
			ABCC,
			ABLO,
			ABMI,
			ABPL,
			ABVS,
			ABVC,
			ABHI,
			ABLS,
			ABGE,
			ABLT,
			ABGT,
			ABLE,
			AADR, /* strange */
			AADRP:
			q1 = p.Pcond

			if q1 != nil {
				for q1.As == obj.ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}
			}

			break
		}

		q = p
	}

	var o int
	var q2 *obj.Prog
	var retjmp *obj.LSym
	var stkadj int64
	for p := cursym.Text; p != nil; p = p.Link {
		o = int(p.As)
		switch o {
		case obj.ATEXT:
			cursym.Text = p
			if textstksiz < 0 {
				ctxt.Autosize = 0
			} else {
				ctxt.Autosize = int32(textstksiz + 8)
			}
			if (cursym.Text.Mark&LEAF != 0) && ctxt.Autosize <= 8 {
				ctxt.Autosize = 0
			} else if ctxt.Autosize&(16-1) != 0 {
				stkadj = 16 - (int64(ctxt.Autosize) & (16 - 1))
				ctxt.Autosize += int32(stkadj)
				cursym.Locals += int32(stkadj)
			}
			p.To.Offset = int64(ctxt.Autosize) - 8
			if ctxt.Autosize == 0 && !(cursym.Text.Mark&LEAF != 0) {
				if ctxt.Debugvlog != 0 {
					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Text.From.Sym.Name)
				}
				obj.Bflush(ctxt.Bso)
				cursym.Text.Mark |= LEAF
			}

			if !(p.From3.Offset&obj.NOSPLIT != 0) {
				p = stacksplit(ctxt, p, ctxt.Autosize, bool2int(cursym.Text.From3.Offset&obj.NEEDCTXT == 0)) // emit split check
			}

			aoffset = ctxt.Autosize
			if aoffset > 0xF0 {
				aoffset = 0xF0
			}
			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = 1
				if ctxt.Autosize == 0 {
					break
				}
				aoffset = 0
			}

			q = p
			if ctxt.Autosize > aoffset {
				q = ctxt.NewProg()
				q.As = ASUB
				q.Lineno = p.Lineno
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REGSP
				q.Spadj = int32(q.From.Offset)
				q.Link = p.Link
				p.Link = q
				if cursym.Text.Mark&LEAF != 0 {
					break
				}
			}

			q1 = ctxt.NewProg()
			q1.As = AMOVD
			q1.Lineno = p.Lineno
			q1.From.Type = obj.TYPE_REG
			q1.From.Reg = REGLINK
			q1.To.Type = obj.TYPE_MEM
			q1.Scond = C_XPRE
			q1.To.Offset = int64(-aoffset)
			q1.To.Reg = REGSP
			q1.Link = q.Link
			q1.Spadj = aoffset
			q.Link = q1

			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
				//
				//	MOV g_panic(g), R1
				//	CMP ZR, R1
				//	BEQ end
				//	MOV panic_argp(R1), R2
				//	ADD $(autosize+8), RSP, R3
				//	CMP R2, R3
				//	BNE end
				//	ADD $8, RSP, R4
				//	MOVD R4, panic_argp(R1)
				// end:
				//	NOP
				//
				// The NOP is needed to give the jumps somewhere to land.
				// It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes.
				q = q1

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_MEM
				q.From.Reg = REGG
				q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R1

				q = obj.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REGZERO
				q.Reg = REG_R1

				q = obj.Appendp(ctxt, q)
				q.As = ABEQ
				q.To.Type = obj.TYPE_BRANCH
				q1 = q

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_MEM
				q.From.Reg = REG_R1
				q.From.Offset = 0 // Panic.argp
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R2

				q = obj.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = int64(ctxt.Autosize) + 8
				q.Reg = REGSP
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R3

				q = obj.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REG_R2
				q.Reg = REG_R3

				q = obj.Appendp(ctxt, q)
				q.As = ABNE
				q.To.Type = obj.TYPE_BRANCH
				q2 = q

				q = obj.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = 8
				q.Reg = REGSP
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R4

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REG_R4
				q.To.Type = obj.TYPE_MEM
				q.To.Reg = REG_R1
				q.To.Offset = 0 // Panic.argp

				q = obj.Appendp(ctxt, q)

				q.As = obj.ANOP
				q1.Pcond = q
				q2.Pcond = q
			}

		case obj.ARET:
			nocache(p)
			if p.From.Type == obj.TYPE_CONST {
				ctxt.Diag("using BECOME (%v) is not supported!", p)
				break
			}

			retjmp = p.To.Sym
			p.To = obj.Addr{}
			if cursym.Text.Mark&LEAF != 0 {
				if ctxt.Autosize != 0 {
					p.As = AADD
					p.From.Type = obj.TYPE_CONST
					p.From.Offset = int64(ctxt.Autosize)
					p.To.Type = obj.TYPE_REG
					p.To.Reg = REGSP
					p.Spadj = -ctxt.Autosize
				}
			} else {
				/* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/
				aoffset = ctxt.Autosize

				if aoffset > 0xF0 {
					aoffset = 0xF0
				}
				p.As = AMOVD
				p.From.Type = obj.TYPE_MEM
				p.Scond = C_XPOST
				p.From.Offset = int64(aoffset)
				p.From.Reg = REGSP
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REGLINK
				p.Spadj = -aoffset
				if ctxt.Autosize > aoffset {
					q = ctxt.NewProg()
					q.As = AADD
					q.From.Type = obj.TYPE_CONST
					q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
					q.To.Type = obj.TYPE_REG
					q.To.Reg = REGSP
					q.Link = p.Link
					q.Spadj = int32(-q.From.Offset)
					q.Lineno = p.Lineno
					p.Link = q
					p = q
				}
			}

			if p.As != obj.ARET {
				q = ctxt.NewProg()
				q.Lineno = p.Lineno
				q.Link = p.Link
				p.Link = q
				p = q
			}

			if retjmp != nil { // retjmp
				p.As = AB
				p.To.Type = obj.TYPE_BRANCH
				p.To.Sym = retjmp
				p.Spadj = +ctxt.Autosize
				break
			}

			p.As = obj.ARET
			p.Lineno = p.Lineno
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = 0
			p.To.Reg = REGLINK
			p.Spadj = +ctxt.Autosize

		case AADD,
			ASUB:
			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
				if p.As == AADD {
					p.Spadj = int32(-p.From.Offset)
				} else {
					p.Spadj = int32(+p.From.Offset)
				}
			}
			break
		}
	}
}
示例#18
0
文件: cgen.go 项目: rentongzhang/go
func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
	// determine alignment.
	// want to avoid unaligned access, so have to use
	// smaller operations for less aligned types.
	// for example moving [4]byte must use 4 MOVB not 1 MOVW.
	align := int(n.Type.Align)

	var op int
	switch align {
	default:
		gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)

	case 1:
		op = arm.AMOVB

	case 2:
		op = arm.AMOVH

	case 4:
		op = arm.AMOVW
	}

	if w%int64(align) != 0 {
		gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type)
	}
	c := int32(w / int64(align))

	if osrc%int64(align) != 0 || odst%int64(align) != 0 {
		gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
	}

	// if we are copying forward on the stack and
	// the src and dst overlap, then reverse direction
	dir := align
	if osrc < odst && int64(odst) < int64(osrc)+w {
		dir = -dir
	}

	if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
		var r0 gc.Node
		r0.Op = gc.OREGISTER
		r0.Reg = arm.REG_R0
		var r1 gc.Node
		r1.Op = gc.OREGISTER
		r1.Reg = arm.REG_R0 + 1
		var r2 gc.Node
		r2.Op = gc.OREGISTER
		r2.Reg = arm.REG_R0 + 2

		var src gc.Node
		gc.Regalloc(&src, gc.Types[gc.Tptr], &r1)
		var dst gc.Node
		gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2)
		if n.Ullman >= res.Ullman {
			// eval n first
			gc.Agen(n, &src)

			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			gc.Agen(res, &dst)
		} else {
			// eval res first
			if res.Op == gc.ONAME {
				gc.Gvardef(res)
			}
			gc.Agen(res, &dst)
			gc.Agen(n, &src)
		}

		var tmp gc.Node
		gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0)
		f := gc.Sysfunc("duffcopy")
		p := gins(obj.ADUFFCOPY, nil, f)
		gc.Afunclit(&p.To, f)

		// 8 and 128 = magic constants: see ../../runtime/asm_arm.s
		p.To.Offset = 8 * (128 - int64(c))

		gc.Regfree(&tmp)
		gc.Regfree(&src)
		gc.Regfree(&dst)
		return
	}

	var dst gc.Node
	var src gc.Node
	if n.Ullman >= res.Ullman {
		gc.Agenr(n, &dst, res) // temporarily use dst
		gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
		gins(arm.AMOVW, &dst, &src)
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		gc.Agen(res, &dst)
	} else {
		if res.Op == gc.ONAME {
			gc.Gvardef(res)
		}
		gc.Agenr(res, &dst, res)
		gc.Agenr(n, &src, nil)
	}

	var tmp gc.Node
	gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil)

	// set up end marker
	var nend gc.Node

	if c >= 4 {
		gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil)

		p := gins(arm.AMOVW, &src, &nend)
		p.From.Type = obj.TYPE_ADDR
		if dir < 0 {
			p.From.Offset = int64(dir)
		} else {
			p.From.Offset = w
		}
	}

	// move src and dest to the end of block if necessary
	if dir < 0 {
		p := gins(arm.AMOVW, &src, &src)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)

		p = gins(arm.AMOVW, &dst, &dst)
		p.From.Type = obj.TYPE_ADDR
		p.From.Offset = w + int64(dir)
	}

	// move
	if c >= 4 {
		p := gins(op, &src, &tmp)
		p.From.Type = obj.TYPE_MEM
		p.From.Offset = int64(dir)
		p.Scond |= arm.C_PBIT
		ploop := p

		p = gins(op, &tmp, &dst)
		p.To.Type = obj.TYPE_MEM
		p.To.Offset = int64(dir)
		p.Scond |= arm.C_PBIT

		p = gins(arm.ACMP, &src, nil)
		raddr(&nend, p)

		gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
		gc.Regfree(&nend)
	} else {
		var p *obj.Prog
		for {
			tmp14 := c
			c--
			if tmp14 <= 0 {
				break
			}
			p = gins(op, &src, &tmp)
			p.From.Type = obj.TYPE_MEM
			p.From.Offset = int64(dir)
			p.Scond |= arm.C_PBIT

			p = gins(op, &tmp, &dst)
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = int64(dir)
			p.Scond |= arm.C_PBIT
		}
	}

	gc.Regfree(&dst)
	gc.Regfree(&src)
	gc.Regfree(&tmp)
}
示例#19
0
文件: obj7.go 项目: hurkgu/go
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p := cursym.Text
	textstksiz := p.To.Offset
	aoffset := int32(textstksiz)

	cursym.Args = p.To.Val.(int32)
	cursym.Locals = int32(textstksiz)

	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 */
	q := (*obj.Prog)(nil)
	var q1 *obj.Prog
	for p := cursym.Text; p != nil; p = p.Link {
		switch p.As {
		case obj.ATEXT:
			p.Mark |= LEAF

		case obj.ARET:
			break

		case obj.ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			q1.Mark |= p.Mark
			continue

		case ABL,
			obj.ADUFFZERO,
			obj.ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough

		case ACBNZ,
			ACBZ,
			ACBNZW,
			ACBZW,
			ATBZ,
			ATBNZ,
			AB,
			ABEQ,
			ABNE,
			ABCS,
			ABHS,
			ABCC,
			ABLO,
			ABMI,
			ABPL,
			ABVS,
			ABVC,
			ABHI,
			ABLS,
			ABGE,
			ABLT,
			ABGT,
			ABLE,
			AADR, /* strange */
			AADRP:
			q1 = p.Pcond

			if q1 != nil {
				for q1.As == obj.ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}
			}

			break
		}

		q = p
	}

	var q2 *obj.Prog
	var retjmp *obj.LSym
	for p := cursym.Text; p != nil; p = p.Link {
		o := p.As
		switch o {
		case obj.ATEXT:
			cursym.Text = p
			if textstksiz < 0 {
				ctxt.Autosize = 0
			} else {
				ctxt.Autosize = int32(textstksiz + 8)
			}
			if (cursym.Text.Mark&LEAF != 0) && ctxt.Autosize <= 8 {
				ctxt.Autosize = 0
			} else if ctxt.Autosize&(16-1) != 0 {
				// The frame includes an LR.
				// If the frame size is 8, it's only an LR,
				// so there's no potential for breaking references to
				// local variables by growing the frame size,
				// because there are no local variables.
				// But otherwise, if there is a non-empty locals section,
				// the author of the code is responsible for making sure
				// that the frame size is 8 mod 16.
				if ctxt.Autosize == 8 {
					ctxt.Autosize += 8
					cursym.Locals += 8
				} else {
					ctxt.Diag("%v: unaligned frame size %d - must be 8 mod 16 (or 0)", p, ctxt.Autosize-8)
				}
			}
			p.To.Offset = int64(ctxt.Autosize) - 8
			if ctxt.Autosize == 0 && !(cursym.Text.Mark&LEAF != 0) {
				if ctxt.Debugvlog != 0 {
					ctxt.Logf("save suppressed in: %s\n", cursym.Text.From.Sym.Name)
				}
				cursym.Text.Mark |= LEAF
			}

			if !(p.From3.Offset&obj.NOSPLIT != 0) {
				p = stacksplit(ctxt, p, ctxt.Autosize) // emit split check
			}

			aoffset = ctxt.Autosize
			if aoffset > 0xF0 {
				aoffset = 0xF0
			}
			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = true
				if ctxt.Autosize == 0 {
					break
				}
				aoffset = 0
			}

			q = p
			if ctxt.Autosize > aoffset {
				q = ctxt.NewProg()
				q.As = ASUB
				q.Lineno = p.Lineno
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REGSP
				q.Spadj = int32(q.From.Offset)
				q.Link = p.Link
				p.Link = q
				if cursym.Text.Mark&LEAF != 0 {
					break
				}
			}

			q1 = ctxt.NewProg()
			q1.As = AMOVD
			q1.Lineno = p.Lineno
			q1.From.Type = obj.TYPE_REG
			q1.From.Reg = REGLINK
			q1.To.Type = obj.TYPE_MEM
			q1.Scond = C_XPRE
			q1.To.Offset = int64(-aoffset)
			q1.To.Reg = REGSP
			q1.Link = q.Link
			q1.Spadj = aoffset
			q.Link = q1

			if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
				//
				//	MOV g_panic(g), R1
				//	CMP ZR, R1
				//	BEQ end
				//	MOV panic_argp(R1), R2
				//	ADD $(autosize+8), RSP, R3
				//	CMP R2, R3
				//	BNE end
				//	ADD $8, RSP, R4
				//	MOVD R4, panic_argp(R1)
				// end:
				//	NOP
				//
				// The NOP is needed to give the jumps somewhere to land.
				// It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes.
				q = q1

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_MEM
				q.From.Reg = REGG
				q.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R1

				q = obj.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REGZERO
				q.Reg = REG_R1

				q = obj.Appendp(ctxt, q)
				q.As = ABEQ
				q.To.Type = obj.TYPE_BRANCH
				q1 = q

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_MEM
				q.From.Reg = REG_R1
				q.From.Offset = 0 // Panic.argp
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R2

				q = obj.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = int64(ctxt.Autosize) + 8
				q.Reg = REGSP
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R3

				q = obj.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REG_R2
				q.Reg = REG_R3

				q = obj.Appendp(ctxt, q)
				q.As = ABNE
				q.To.Type = obj.TYPE_BRANCH
				q2 = q

				q = obj.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type = obj.TYPE_CONST
				q.From.Offset = 8
				q.Reg = REGSP
				q.To.Type = obj.TYPE_REG
				q.To.Reg = REG_R4

				q = obj.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type = obj.TYPE_REG
				q.From.Reg = REG_R4
				q.To.Type = obj.TYPE_MEM
				q.To.Reg = REG_R1
				q.To.Offset = 0 // Panic.argp

				q = obj.Appendp(ctxt, q)

				q.As = obj.ANOP
				q1.Pcond = q
				q2.Pcond = q
			}

		case obj.ARET:
			nocache(p)
			if p.From.Type == obj.TYPE_CONST {
				ctxt.Diag("using BECOME (%v) is not supported!", p)
				break
			}

			retjmp = p.To.Sym
			p.To = obj.Addr{}
			if cursym.Text.Mark&LEAF != 0 {
				if ctxt.Autosize != 0 {
					p.As = AADD
					p.From.Type = obj.TYPE_CONST
					p.From.Offset = int64(ctxt.Autosize)
					p.To.Type = obj.TYPE_REG
					p.To.Reg = REGSP
					p.Spadj = -ctxt.Autosize
				}
			} else {
				/* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/
				aoffset = ctxt.Autosize

				if aoffset > 0xF0 {
					aoffset = 0xF0
				}
				p.As = AMOVD
				p.From.Type = obj.TYPE_MEM
				p.Scond = C_XPOST
				p.From.Offset = int64(aoffset)
				p.From.Reg = REGSP
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REGLINK
				p.Spadj = -aoffset
				if ctxt.Autosize > aoffset {
					q = ctxt.NewProg()
					q.As = AADD
					q.From.Type = obj.TYPE_CONST
					q.From.Offset = int64(ctxt.Autosize) - int64(aoffset)
					q.To.Type = obj.TYPE_REG
					q.To.Reg = REGSP
					q.Link = p.Link
					q.Spadj = int32(-q.From.Offset)
					q.Lineno = p.Lineno
					p.Link = q
					p = q
				}
			}

			if p.As != obj.ARET {
				q = ctxt.NewProg()
				q.Lineno = p.Lineno
				q.Link = p.Link
				p.Link = q
				p = q
			}

			if retjmp != nil { // retjmp
				p.As = AB
				p.To.Type = obj.TYPE_BRANCH
				p.To.Sym = retjmp
				p.Spadj = +ctxt.Autosize
				break
			}

			p.As = obj.ARET
			p.To.Type = obj.TYPE_MEM
			p.To.Offset = 0
			p.To.Reg = REGLINK
			p.Spadj = +ctxt.Autosize

		case AADD, ASUB:
			if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
				if p.As == AADD {
					p.Spadj = int32(-p.From.Offset)
				} else {
					p.Spadj = int32(+p.From.Offset)
				}
			}
			break
		}
	}
}