Пример #1
0
Файл: obj5.go Проект: rsc/tmp
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int32, noctxt int) *liblink.Prog {
	// MOVW			g_stackguard(g), R1
	p = liblink.Appendp(ctxt, p)

	p.As = AMOVW
	p.From.Type_ = D_OREG
	p.From.Reg = REGG
	p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
	if ctxt.Cursym.Cfunc != 0 {
		p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
	}
	p.To.Type_ = D_REG
	p.To.Reg = 1

	if framesize <= liblink.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = liblink.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type_ = D_REG
		p.From.Reg = 1
		p.Reg = REGSP
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = liblink.Appendp(ctxt, p)

		p.As = AMOVW
		p.From.Type_ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = int64(-framesize)
		p.To.Type_ = D_REG
		p.To.Reg = 2

		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type_ = D_REG
		p.From.Reg = 1
		p.Reg = 2
	} else {

		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = liblink.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type_ = D_CONST
		p.From.Offset = int64(uint32(liblink.StackPreempt & (1<<32 - 1)))
		p.Reg = 1

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type_ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = liblink.StackGuard
		p.To.Type_ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE

		p = liblink.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Type_ = D_REG
		p.From.Reg = 1
		p.To.Type_ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Type_ = D_CONST
		p.From.Offset = int64(framesize) + (liblink.StackGuard - liblink.StackSmall)
		p.To.Type_ = D_REG
		p.To.Reg = 3
		p.Scond = C_SCOND_NE

		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Type_ = D_REG
		p.From.Reg = 3
		p.Reg = 2
		p.Scond = C_SCOND_NE
	}

	// MOVW.LS	R14, R3
	p = liblink.Appendp(ctxt, p)

	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Type_ = D_REG
	p.From.Reg = REGLINK
	p.To.Type_ = D_REG
	p.To.Reg = 3

	// BL.LS		runtime.morestack(SB) // modifies LR, returns with LO still asserted
	p = liblink.Appendp(ctxt, p)

	p.As = ABL
	p.Scond = C_SCOND_LS
	p.To.Type_ = D_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = liblink.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {

		p.To.Sym = ctxt.Symmorestack[noctxt]
	}

	// BLS	start
	p = liblink.Appendp(ctxt, p)

	p.As = ABLS
	p.To.Type_ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	return p
}
Пример #2
0
Файл: obj5.go Проект: rsc/tmp
func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var r *liblink.Prog
	var a int
	var i int

loop:
	if p == nil {
		return
	}
	a = int(p.As)
	if a == AB {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			p.Mark |= FOLL
			p = q
			if !(p.Mark&FOLL != 0) {
				goto loop
			}
		}
	}

	if p.Mark&FOLL != 0 {
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {
			if q == *last || q == nil {
				break
			}
			a = int(q.As)
			if a == ANOP {
				i--
				continue
			}

			if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
				goto copy
			}
			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
				continue
			}
			if a != ABEQ && a != ABNE {
				continue
			}

		copy:
			for {
				r = ctxt.Arch.Prg()
				*r = *p
				if !(r.Mark&FOLL != 0) {
					fmt.Printf("can't happen 1\n")
				}
				r.Mark |= FOLL
				if p != q {
					p = p.Link
					(*last).Link = r
					*last = r
					continue
				}

				(*last).Link = r
				*last = r
				if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
					return
				}
				r.As = ABNE
				if a == ABNE {
					r.As = ABEQ
				}
				r.Pcond = p.Link
				r.Link = p.Pcond
				if !(r.Link.Mark&FOLL != 0) {
					xfol(ctxt, r.Link, last)
				}
				if !(r.Pcond.Mark&FOLL != 0) {
					fmt.Printf("can't happen 2\n")
				}
				return
			}
		}

		a = AB
		q = ctxt.Arch.Prg()
		q.As = int16(a)
		q.Lineno = p.Lineno
		q.To.Type_ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}

	p.Mark |= FOLL
	(*last).Link = p
	*last = p
	if a == AB || (a == ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
		return
	}

	if p.Pcond != nil {
		if a != ABL && a != ABX && p.Link != nil {
			q = liblink.Brchain(ctxt, p.Link)
			if a != ATEXT && a != ABCASE {
				if q != nil && (q.Mark&FOLL != 0) {
					p.As = int16(relinv(a))
					p.Link = p.Pcond
					p.Pcond = q
				}
			}

			xfol(ctxt, p.Link, last)
			q = liblink.Brchain(ctxt, p.Pcond)
			if q == nil {
				q = p.Pcond
			}
			if q.Mark&FOLL != 0 {
				p.Pcond = q
				return
			}

			p = q
			goto loop
		}
	}

	p = p.Link
	goto loop
}
Пример #3
0
Файл: obj5.go Проект: rsc/tmp
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var pl *liblink.Prog
	var p1 *liblink.Prog
	var p2 *liblink.Prog
	var q *liblink.Prog
	var q1 *liblink.Prog
	var q2 *liblink.Prog
	var o int
	var autosize int32
	var autoffset int32

	autosize = 0

	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	q = nil

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	softfloat(ctxt, cursym)

	p = cursym.Text
	autoffset = int32(p.To.Offset)
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Locals = autoffset
	cursym.Args = p.To.Offset2

	if ctxt.Debugzerostack != 0 {
		if autoffset != 0 && !(p.Reg&liblink.NOSPLIT != 0) {
			// MOVW $4(R13), R1
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type_ = D_CONST
			p.From.Reg = 13
			p.From.Offset = 4
			p.To.Type_ = D_REG
			p.To.Reg = 1

			// MOVW $n(R13), R2
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type_ = D_CONST
			p.From.Reg = 13
			p.From.Offset = 4 + int64(autoffset)
			p.To.Type_ = D_REG
			p.To.Reg = 2

			// MOVW $0, R3
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.From.Type_ = D_CONST
			p.From.Offset = 0
			p.To.Type_ = D_REG
			p.To.Reg = 3

			// L:
			//	MOVW.nil R3, 0(R1) +4
			//	CMP R1, R2
			//	BNE L
			pl = liblink.Appendp(ctxt, p)
			p = pl

			p.As = AMOVW
			p.From.Type_ = D_REG
			p.From.Reg = 3
			p.To.Type_ = D_OREG
			p.To.Reg = 1
			p.To.Offset = 4
			p.Scond |= C_PBIT

			p = liblink.Appendp(ctxt, p)
			p.As = ACMP
			p.From.Type_ = D_REG
			p.From.Reg = 1
			p.Reg = 2

			p = liblink.Appendp(ctxt, p)
			p.As = ABNE
			p.To.Type_ = D_BRANCH
			p.Pcond = pl
		}
	}

	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 * expand BECOME pseudo
	 */
	for p = cursym.Text; p != nil; p = p.Link {

		switch p.As {
		case ACASE:
			if ctxt.Flag_shared != 0 {
				linkcase(p)
			}

		case ATEXT:
			p.Mark |= LEAF

		case ARET:
			break

		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			q = p
			if ctxt.Sym_div == nil {
				initdiv(ctxt)
			}
			cursym.Text.Mark &^= LEAF
			continue

		case ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			if q1 != nil {
				q1.Mark |= p.Mark
			}
			continue

		case ABL,
			ABX,
			ADUFFZERO,
			ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough

		case ABCASE,
			AB,
			ABEQ,
			ABNE,
			ABCS,
			ABHS,
			ABCC,
			ABLO,
			ABMI,
			ABPL,
			ABVS,
			ABVC,
			ABHI,
			ABLS,
			ABGE,
			ABLT,
			ABGT,
			ABLE:
			q1 = p.Pcond
			if q1 != nil {
				for q1.As == ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}
			}

			break
		}

		q = p
	}

	for p = cursym.Text; p != nil; p = p.Link {
		o = int(p.As)
		switch o {
		case ATEXT:
			autosize = int32(p.To.Offset + 4)
			if autosize <= 4 {
				if cursym.Text.Mark&LEAF != 0 {
					p.To.Offset = -4
					autosize = 0
				}
			}

			if !(autosize != 0) && !(cursym.Text.Mark&LEAF != 0) {
				if ctxt.Debugvlog != 0 {
					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
					liblink.Bflush(ctxt.Bso)
				}

				cursym.Text.Mark |= LEAF
			}

			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = 1
				if !(autosize != 0) {
					break
				}
			}

			if !(p.Reg&liblink.NOSPLIT != 0) {
				p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.Reg&liblink.NEEDCTXT != 0))) // emit split check
			}

			// MOVW.W		R14,$-autosize(SP)
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.Scond |= C_WBIT
			p.From.Type_ = D_REG
			p.From.Reg = REGLINK
			p.To.Type_ = D_OREG
			p.To.Offset = int64(-autosize)
			p.To.Reg = REGSP
			p.Spadj = autosize

			if cursym.Text.Reg&liblink.WRAPPER != 0 {
				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
				//
				//	MOVW g_panic(g), R1
				//	CMP $0, R1
				//	B.EQ end
				//	MOVW panic_argp(R1), R2
				//	ADD $(autosize+4), R13, R3
				//	CMP R2, R3
				//	B.NE end
				//	ADD $4, R13, R4
				//	MOVW R4, panic_argp(R1)
				// end:
				//	NOP
				//
				// The NOP is needed to give the jumps somewhere to land.
				// It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.

				p = liblink.Appendp(ctxt, p)

				p.As = AMOVW
				p.From.Type_ = D_OREG
				p.From.Reg = REGG
				p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
				p.To.Type_ = D_REG
				p.To.Reg = 1

				p = liblink.Appendp(ctxt, p)
				p.As = ACMP
				p.From.Type_ = D_CONST
				p.From.Offset = 0
				p.Reg = 1

				p = liblink.Appendp(ctxt, p)
				p.As = ABEQ
				p.To.Type_ = D_BRANCH
				p1 = p

				p = liblink.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Type_ = D_OREG
				p.From.Reg = 1
				p.From.Offset = 0 // Panic.argp
				p.To.Type_ = D_REG
				p.To.Reg = 2

				p = liblink.Appendp(ctxt, p)
				p.As = AADD
				p.From.Type_ = D_CONST
				p.From.Offset = int64(autosize) + 4
				p.Reg = 13
				p.To.Type_ = D_REG
				p.To.Reg = 3

				p = liblink.Appendp(ctxt, p)
				p.As = ACMP
				p.From.Type_ = D_REG
				p.From.Reg = 2
				p.Reg = 3

				p = liblink.Appendp(ctxt, p)
				p.As = ABNE
				p.To.Type_ = D_BRANCH
				p2 = p

				p = liblink.Appendp(ctxt, p)
				p.As = AADD
				p.From.Type_ = D_CONST
				p.From.Offset = 4
				p.Reg = 13
				p.To.Type_ = D_REG
				p.To.Reg = 4

				p = liblink.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Type_ = D_REG
				p.From.Reg = 4
				p.To.Type_ = D_OREG
				p.To.Reg = 1
				p.To.Offset = 0 // Panic.argp

				p = liblink.Appendp(ctxt, p)

				p.As = ANOP
				p1.Pcond = p
				p2.Pcond = p
			}

		case ARET:
			nocache5(p)
			if cursym.Text.Mark&LEAF != 0 {
				if !(autosize != 0) {
					p.As = AB
					p.From = zprg5.From
					if p.To.Sym != nil { // retjmp
						p.To.Type_ = D_BRANCH
					} else {

						p.To.Type_ = D_OREG
						p.To.Offset = 0
						p.To.Reg = REGLINK
					}

					break
				}
			}

			p.As = AMOVW
			p.Scond |= C_PBIT
			p.From.Type_ = D_OREG
			p.From.Offset = int64(autosize)
			p.From.Reg = REGSP
			p.To.Type_ = D_REG
			p.To.Reg = REGPC

			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so no spadj.
			if p.To.Sym != nil { // retjmp
				p.To.Reg = REGLINK
				q2 = liblink.Appendp(ctxt, p)
				q2.As = AB
				q2.To.Type_ = D_BRANCH
				q2.To.Sym = p.To.Sym
				p.To.Sym = nil
				p = q2
			}

		case AADD:
			if p.From.Type_ == D_CONST && p.From.Reg == NREG && p.To.Type_ == D_REG && p.To.Reg == REGSP {
				p.Spadj = int32(-p.From.Offset)
			}

		case ASUB:
			if p.From.Type_ == D_CONST && p.From.Reg == NREG && p.To.Type_ == D_REG && p.To.Reg == REGSP {
				p.Spadj = int32(p.From.Offset)
			}

		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			if ctxt.Debugdivmod != 0 {
				break
			}
			if p.From.Type_ != D_REG {
				break
			}
			if p.To.Type_ != D_REG {
				break
			}
			q1 = p

			/* MOV a,4(SP) */
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type_ = D_REG
			p.From.Reg = q1.From.Reg
			p.To.Type_ = D_OREG
			p.To.Reg = REGSP
			p.To.Offset = 4

			/* MOV b,REGTMP */
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type_ = D_REG
			p.From.Reg = int8(q1.Reg)
			if q1.Reg == NREG {
				p.From.Reg = q1.To.Reg
			}
			p.To.Type_ = D_REG
			p.To.Reg = REGTMP
			p.To.Offset = 0

			/* CALL appropriate */
			p = liblink.Appendp(ctxt, p)

			p.As = ABL
			p.Lineno = q1.Lineno
			p.To.Type_ = D_BRANCH
			switch o {
			case ADIV:
				p.To.Sym = ctxt.Sym_div

			case ADIVU:
				p.To.Sym = ctxt.Sym_divu

			case AMOD:
				p.To.Sym = ctxt.Sym_mod

			case AMODU:
				p.To.Sym = ctxt.Sym_modu
				break
			}

			/* MOV REGTMP, b */
			p = liblink.Appendp(ctxt, p)

			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Type_ = D_REG
			p.From.Reg = REGTMP
			p.From.Offset = 0
			p.To.Type_ = D_REG
			p.To.Reg = q1.To.Reg

			/* ADD $8,SP */
			p = liblink.Appendp(ctxt, p)

			p.As = AADD
			p.Lineno = q1.Lineno
			p.From.Type_ = D_CONST
			p.From.Reg = NREG
			p.From.Offset = 8
			p.Reg = NREG
			p.To.Type_ = D_REG
			p.To.Reg = REGSP
			p.Spadj = -8

			/* Keep saved LR at 0(SP) after SP change. */
			/* MOVW 0(SP), REGTMP; MOVW REGTMP, -8!(SP) */
			/* TODO: Remove SP adjustments; see issue 6699. */
			q1.As = AMOVW

			q1.From.Type_ = D_OREG
			q1.From.Reg = REGSP
			q1.From.Offset = 0
			q1.Reg = NREG
			q1.To.Type_ = D_REG
			q1.To.Reg = REGTMP

			/* SUB $8,SP */
			q1 = liblink.Appendp(ctxt, q1)

			q1.As = AMOVW
			q1.From.Type_ = D_REG
			q1.From.Reg = REGTMP
			q1.Reg = NREG
			q1.To.Type_ = D_OREG
			q1.To.Reg = REGSP
			q1.To.Offset = -8
			q1.Scond |= C_WBIT
			q1.Spadj = 8

		case AMOVW:
			if (p.Scond&C_WBIT != 0) && p.To.Type_ == D_OREG && p.To.Reg == REGSP {
				p.Spadj = int32(-p.To.Offset)
			}
			if (p.Scond&C_PBIT != 0) && p.From.Type_ == D_OREG && p.From.Reg == REGSP && p.To.Reg != REGPC {
				p.Spadj = int32(-p.From.Offset)
			}
			if p.From.Type_ == D_CONST && p.From.Reg == REGSP && p.To.Type_ == D_REG && p.To.Reg == REGSP {
				p.Spadj = int32(-p.From.Offset)
			}
			break
		}
	}
}
Пример #4
0
Файл: obj9.go Проект: rsc/tmp
/*
// instruction scheduling
	if(debug['Q'] == 0)
		return;

	curtext = nil;
	q = nil;	// p - 1
	q1 = firstp;	// top of block
	o = 0;		// count of instructions
	for(p = firstp; p != nil; p = p1) {
		p1 = p->link;
		o++;
		if(p->mark & NOSCHED){
			if(q1 != p){
				sched(q1, q);
			}
			for(; p != nil; p = p->link){
				if(!(p->mark & NOSCHED))
					break;
				q = p;
			}
			p1 = p;
			q1 = p;
			o = 0;
			continue;
		}
		if(p->mark & (LABEL|SYNC)) {
			if(q1 != p)
				sched(q1, q);
			q1 = p;
			o = 1;
		}
		if(p->mark & (BRANCH|SYNC)) {
			sched(q1, p);
			q1 = p1;
			o = 0;
		}
		if(o >= NSCHED) {
			sched(q1, p);
			q1 = p1;
			o = 0;
		}
		q = p;
	}
*/
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int32, noctxt int) *liblink.Prog {

	var q *liblink.Prog
	var q1 *liblink.Prog

	// MOVD	g_stackguard(g), R3
	p = liblink.Appendp(ctxt, p)

	p.As = AMOVD
	p.From.Type_ = D_OREG
	p.From.Reg = REGG
	p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
	if ctxt.Cursym.Cfunc != 0 {
		p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
	}
	p.To.Type_ = D_REG
	p.To.Reg = 3

	q = nil
	if framesize <= liblink.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = liblink.Appendp(ctxt, p)

		p.As = ACMPU
		p.From.Type_ = D_REG
		p.From.Reg = 3
		p.To.Type_ = D_REG
		p.To.Reg = REGSP
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	ADD $-framesize, SP, R4
		//	CMP stackguard, R4
		p = liblink.Appendp(ctxt, p)

		p.As = AADD
		p.From.Type_ = D_CONST
		p.From.Offset = int64(-framesize)
		p.Reg = REGSP
		p.To.Type_ = D_REG
		p.To.Reg = 4

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPU
		p.From.Type_ = D_REG
		p.From.Reg = 3
		p.To.Type_ = D_REG
		p.To.Reg = 4
	} else {

		// Such a large stack we need to protect against wraparound.
		// If SP is close to zero:
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	// stackguard is R3
		//	CMP	R3, $StackPreempt
		//	BEQ	label-of-call-to-morestack
		//	ADD	$StackGuard, SP, R4
		//	SUB	R3, R4
		//	MOVD	$(framesize+(StackGuard-StackSmall)), R31
		//	CMPU	R31, R4
		p = liblink.Appendp(ctxt, p)

		p.As = ACMP
		p.From.Type_ = D_REG
		p.From.Reg = 3
		p.To.Type_ = D_CONST
		p.To.Offset = liblink.StackPreempt

		p = liblink.Appendp(ctxt, p)
		q = p
		p.As = ABEQ
		p.To.Type_ = D_BRANCH

		p = liblink.Appendp(ctxt, p)
		p.As = AADD
		p.From.Type_ = D_CONST
		p.From.Offset = liblink.StackGuard
		p.Reg = REGSP
		p.To.Type_ = D_REG
		p.To.Reg = 4

		p = liblink.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Type_ = D_REG
		p.From.Reg = 3
		p.To.Type_ = D_REG
		p.To.Reg = 4

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVD
		p.From.Type_ = D_CONST
		p.From.Offset = int64(framesize) + liblink.StackGuard - liblink.StackSmall
		p.To.Type_ = D_REG
		p.To.Reg = REGTMP

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPU
		p.From.Type_ = D_REG
		p.From.Reg = REGTMP
		p.To.Type_ = D_REG
		p.To.Reg = 4
	}

	// q1: BLT	done
	p = liblink.Appendp(ctxt, p)
	q1 = p

	p.As = ABLT
	p.To.Type_ = D_BRANCH

	// MOVD	LR, R5
	p = liblink.Appendp(ctxt, p)

	p.As = AMOVD
	p.From.Type_ = D_SPR
	p.From.Offset = D_LR
	p.To.Type_ = D_REG
	p.To.Reg = 5
	if q != nil {
		q.Pcond = p
	}

	// BL	runtime.morestack(SB)
	p = liblink.Appendp(ctxt, p)

	p.As = ABL
	p.To.Type_ = D_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = liblink.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {

		p.To.Sym = ctxt.Symmorestack[noctxt]
	}

	// BR	start
	p = liblink.Appendp(ctxt, p)

	p.As = ABR
	p.To.Type_ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	// placeholder for q1's jump target
	p = liblink.Appendp(ctxt, p)

	p.As = ANOP // zero-width place holder
	q1.Pcond = p

	return p
}
Пример #5
0
Файл: obj9.go Проект: rsc/tmp
func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var r *liblink.Prog
	var a int
	var b int
	var i int

loop:
	if p == nil {
		return
	}
	a = int(p.As)
	if a == ABR {
		q = p.Pcond
		if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
			p.Mark |= FOLL
			(*last).Link = p
			*last = p
			p = p.Link
			xfol(ctxt, p, last)
			p = q
			if p != nil && !(p.Mark&FOLL != 0) {
				goto loop
			}
			return
		}

		if q != nil {
			p.Mark |= FOLL
			p = q
			if !(p.Mark&FOLL != 0) {
				goto loop
			}
		}
	}

	if p.Mark&FOLL != 0 {
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {
			if q == *last || (q.Mark&NOSCHED != 0) {
				break
			}
			b = 0 /* set */
			a = int(q.As)
			if a == ANOP {
				i--
				continue
			}

			if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
				goto copy
			}
			if !(q.Pcond != nil) || (q.Pcond.Mark&FOLL != 0) {
				continue
			}
			b = relinv(a)
			if !(b != 0) {
				continue
			}

		copy:
			for {
				r = ctxt.Arch.Prg()
				*r = *p
				if !(r.Mark&FOLL != 0) {
					fmt.Printf("cant happen 1\n")
				}
				r.Mark |= FOLL
				if p != q {
					p = p.Link
					(*last).Link = r
					*last = r
					continue
				}

				(*last).Link = r
				*last = r
				if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
					return
				}
				r.As = int16(b)
				r.Pcond = p.Link
				r.Link = p.Pcond
				if !(r.Link.Mark&FOLL != 0) {
					xfol(ctxt, r.Link, last)
				}
				if !(r.Pcond.Mark&FOLL != 0) {
					fmt.Printf("cant happen 2\n")
				}
				return
			}
		}

		a = ABR
		q = ctxt.Arch.Prg()
		q.As = int16(a)
		q.Lineno = p.Lineno
		q.To.Type_ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}

	p.Mark |= FOLL
	(*last).Link = p
	*last = p
	if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
		if p.Mark&NOSCHED != 0 {
			p = p.Link
			goto loop
		}

		return
	}

	if p.Pcond != nil {
		if a != ABL && p.Link != nil {
			xfol(ctxt, p.Link, last)
			p = p.Pcond
			if p == nil || (p.Mark&FOLL != 0) {
				return
			}
			goto loop
		}
	}

	p = p.Link
	goto loop
}
Пример #6
0
Файл: obj9.go Проект: rsc/tmp
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var p1 *liblink.Prog
	var p2 *liblink.Prog
	var q1 *liblink.Prog
	var o int
	var mov int
	var aoffset int
	var textstksiz int64
	var textarg int64
	var autosize int32

	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	// TODO(minux): add morestack short-cuts with small fixed frame-size.
	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p = cursym.Text
	parsetextconst(p.To.Offset, &textstksiz, &textarg)

	cursym.Args = int32(p.To.Offset >> 32)
	cursym.Locals = int32(textstksiz)

	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 * expand BECOME pseudo
	 */
	if ctxt.Debugvlog != 0 {

		fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", liblink.Cputime())
	}
	liblink.Bflush(ctxt.Bso)

	q = nil
	for p = cursym.Text; p != nil; p = p.Link {
		switch p.As {
		/* too hard, just leave alone */
		case ATEXT:
			q = p

			p.Mark |= LABEL | LEAF | SYNC
			if p.Link != nil {
				p.Link.Mark |= LABEL
			}

		case ANOR:
			q = p
			if p.To.Type_ == D_REG {
				if p.To.Reg == REGZERO {
					p.Mark |= LABEL | SYNC
				}
			}

		case ALWAR,
			ASTWCCC,
			AECIWX,
			AECOWX,
			AEIEIO,
			AICBI,
			AISYNC,
			ATLBIE,
			ATLBIEL,
			ASLBIA,
			ASLBIE,
			ASLBMFEE,
			ASLBMFEV,
			ASLBMTE,
			ADCBF,
			ADCBI,
			ADCBST,
			ADCBT,
			ADCBTST,
			ADCBZ,
			ASYNC,
			ATLBSYNC,
			APTESYNC,
			ATW,
			AWORD,
			ARFI,
			ARFCI,
			ARFID,
			AHRFID:
			q = p
			p.Mark |= LABEL | SYNC
			continue

		case AMOVW,
			AMOVWZ,
			AMOVD:
			q = p
			switch p.From.Type_ {
			case D_MSR,
				D_SPR,
				D_FPSCR,
				D_CREG,
				D_DCR:
				p.Mark |= LABEL | SYNC
			}

			switch p.To.Type_ {
			case D_MSR,
				D_SPR,
				D_FPSCR,
				D_CREG,
				D_DCR:
				p.Mark |= LABEL | SYNC
			}

			continue

		case AFABS,
			AFABSCC,
			AFADD,
			AFADDCC,
			AFCTIW,
			AFCTIWCC,
			AFCTIWZ,
			AFCTIWZCC,
			AFDIV,
			AFDIVCC,
			AFMADD,
			AFMADDCC,
			AFMOVD,
			AFMOVDU,
			/* case AFMOVDS: */
			AFMOVS,
			AFMOVSU,

			/* case AFMOVSD: */
			AFMSUB,
			AFMSUBCC,
			AFMUL,
			AFMULCC,
			AFNABS,
			AFNABSCC,
			AFNEG,
			AFNEGCC,
			AFNMADD,
			AFNMADDCC,
			AFNMSUB,
			AFNMSUBCC,
			AFRSP,
			AFRSPCC,
			AFSUB,
			AFSUBCC:
			q = p

			p.Mark |= FLOAT
			continue

		case ABL,
			ABCL,
			ADUFFZERO,
			ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough

		case ABC,
			ABEQ,
			ABGE,
			ABGT,
			ABLE,
			ABLT,
			ABNE,
			ABR,
			ABVC,
			ABVS:
			p.Mark |= BRANCH
			q = p
			q1 = p.Pcond
			if q1 != nil {
				for q1.As == ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}

				if !(q1.Mark&LEAF != 0) {
					q1.Mark |= LABEL
				}
			} else {

				p.Mark |= LABEL
			}
			q1 = p.Link
			if q1 != nil {
				q1.Mark |= LABEL
			}
			continue

		case AFCMPO,
			AFCMPU:
			q = p
			p.Mark |= FCMP | FLOAT
			continue

		case ARETURN:
			q = p
			if p.Link != nil {
				p.Link.Mark |= LABEL
			}
			continue

		case ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			q1.Mark |= p.Mark
			continue

		default:
			q = p
			continue
		}
	}

	autosize = 0
	for p = cursym.Text; p != nil; p = p.Link {
		o = int(p.As)
		switch o {
		case ATEXT:
			mov = AMOVD
			aoffset = 0
			autosize = int32(textstksiz + 8)
			if (p.Mark&LEAF != 0) && autosize <= 8 {
				autosize = 0
			} else if autosize&4 != 0 {
				autosize += 4
			}
			p.To.Offset = int64(uint64(p.To.Offset)&(0xffffffff<<32) | uint64(uint32(autosize-8)))

			if !(p.Reg&liblink.NOSPLIT != 0) {
				p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.Reg&liblink.NEEDCTXT != 0))) // emit split check
			}

			q = p

			if autosize != 0 {
				/* use MOVDU to adjust R1 when saving R31, if autosize is small */
				if !(cursym.Text.Mark&LEAF != 0) && autosize >= -BIG && autosize <= BIG {

					mov = AMOVDU
					aoffset = int(-autosize)
				} else {

					q = liblink.Appendp(ctxt, p)
					q.As = AADD
					q.Lineno = p.Lineno
					q.From.Type_ = D_CONST
					q.From.Offset = int64(-autosize)
					q.To.Type_ = D_REG
					q.To.Reg = REGSP
					q.Spadj = +autosize
				}
			} else if !(cursym.Text.Mark&LEAF != 0) {
				if ctxt.Debugvlog != 0 {
					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
					liblink.Bflush(ctxt.Bso)
				}

				cursym.Text.Mark |= LEAF
			}

			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = 1
				break
			}

			q = liblink.Appendp(ctxt, q)
			q.As = AMOVD
			q.Lineno = p.Lineno
			q.From.Type_ = D_SPR
			q.From.Offset = D_LR
			q.To.Type_ = D_REG
			q.To.Reg = REGTMP

			q = liblink.Appendp(ctxt, q)
			q.As = int16(mov)
			q.Lineno = p.Lineno
			q.From.Type_ = D_REG
			q.From.Reg = REGTMP
			q.To.Type_ = D_OREG
			q.To.Offset = int64(aoffset)
			q.To.Reg = REGSP
			if q.As == AMOVDU {
				q.Spadj = int32(-aoffset)
			}

			if cursym.Text.Reg&liblink.WRAPPER != 0 {
				// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
				//
				//	MOVD g_panic(g), R3
				//	CMP R0, R3
				//	BEQ end
				//	MOVD panic_argp(R3), R4
				//	ADD $(autosize+8), R1, R5
				//	CMP R4, R5
				//	BNE end
				//	ADD $8, R1, R6
				//	MOVD R6, panic_argp(R3)
				// end:
				//	NOP
				//
				// The NOP is needed to give the jumps somewhere to land.
				// It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.

				q = liblink.Appendp(ctxt, q)

				q.As = AMOVD
				q.From.Type_ = D_OREG
				q.From.Reg = REGG
				q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
				q.To.Type_ = D_REG
				q.To.Reg = 3

				q = liblink.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type_ = D_REG
				q.From.Reg = 0
				q.To.Type_ = D_REG
				q.To.Reg = 3

				q = liblink.Appendp(ctxt, q)
				q.As = ABEQ
				q.To.Type_ = D_BRANCH
				p1 = q

				q = liblink.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type_ = D_OREG
				q.From.Reg = 3
				q.From.Offset = 0 // Panic.argp
				q.To.Type_ = D_REG
				q.To.Reg = 4

				q = liblink.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type_ = D_CONST
				q.From.Offset = int64(autosize) + 8
				q.Reg = REGSP
				q.To.Type_ = D_REG
				q.To.Reg = 5

				q = liblink.Appendp(ctxt, q)
				q.As = ACMP
				q.From.Type_ = D_REG
				q.From.Reg = 4
				q.To.Type_ = D_REG
				q.To.Reg = 5

				q = liblink.Appendp(ctxt, q)
				q.As = ABNE
				q.To.Type_ = D_BRANCH
				p2 = q

				q = liblink.Appendp(ctxt, q)
				q.As = AADD
				q.From.Type_ = D_CONST
				q.From.Offset = 8
				q.Reg = REGSP
				q.To.Type_ = D_REG
				q.To.Reg = 6

				q = liblink.Appendp(ctxt, q)
				q.As = AMOVD
				q.From.Type_ = D_REG
				q.From.Reg = 6
				q.To.Type_ = D_OREG
				q.To.Reg = 3
				q.To.Offset = 0 // Panic.argp

				q = liblink.Appendp(ctxt, q)

				q.As = ANOP
				p1.Pcond = q
				p2.Pcond = q
			}

		case ARETURN:
			if p.From.Type_ == D_CONST {
				ctxt.Diag("using BECOME (%P) is not supported!", p)
				break
			}

			if p.To.Sym != nil { // retjmp
				p.As = ABR
				p.To.Type_ = D_BRANCH
				break
			}

			if cursym.Text.Mark&LEAF != 0 {
				if !(autosize != 0) {
					p.As = ABR
					p.From = zprg.From
					p.To.Type_ = D_SPR
					p.To.Offset = D_LR
					p.Mark |= BRANCH
					break
				}

				p.As = AADD
				p.From.Type_ = D_CONST
				p.From.Offset = int64(autosize)
				p.To.Type_ = D_REG
				p.To.Reg = REGSP
				p.Spadj = -autosize

				q = ctxt.Arch.Prg()
				q.As = ABR
				q.Lineno = p.Lineno
				q.To.Type_ = D_SPR
				q.To.Offset = D_LR
				q.Mark |= BRANCH
				q.Spadj = +autosize

				q.Link = p.Link
				p.Link = q
				break
			}

			p.As = AMOVD
			p.From.Type_ = D_OREG
			p.From.Offset = 0
			p.From.Reg = REGSP
			p.To.Type_ = D_REG
			p.To.Reg = REGTMP

			q = ctxt.Arch.Prg()
			q.As = AMOVD
			q.Lineno = p.Lineno
			q.From.Type_ = D_REG
			q.From.Reg = REGTMP
			q.To.Type_ = D_SPR
			q.To.Offset = D_LR

			q.Link = p.Link
			p.Link = q
			p = q

			if false {
				// Debug bad returns
				q = ctxt.Arch.Prg()

				q.As = AMOVD
				q.Lineno = p.Lineno
				q.From.Type_ = D_OREG
				q.From.Offset = 0
				q.From.Reg = REGTMP
				q.To.Type_ = D_REG
				q.To.Reg = REGTMP

				q.Link = p.Link
				p.Link = q
				p = q
			}

			if autosize != 0 {
				q = ctxt.Arch.Prg()
				q.As = AADD
				q.Lineno = p.Lineno
				q.From.Type_ = D_CONST
				q.From.Offset = int64(autosize)
				q.To.Type_ = D_REG
				q.To.Reg = REGSP
				q.Spadj = -autosize

				q.Link = p.Link
				p.Link = q
			}

			q1 = ctxt.Arch.Prg()
			q1.As = ABR
			q1.Lineno = p.Lineno
			q1.To.Type_ = D_SPR
			q1.To.Offset = D_LR
			q1.Mark |= BRANCH
			q1.Spadj = +autosize

			q1.Link = q.Link
			q.Link = q1

		case AADD:
			if p.To.Type_ == D_REG && p.To.Reg == REGSP && p.From.Type_ == D_CONST {
				p.Spadj = int32(-p.From.Offset)
			}
			break
		}
	}
}
Пример #7
0
Файл: obj8.go Проект: rsc/tmp
func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var i int
	var a int

loop:
	if p == nil {
		return
	}
	if p.As == AJMP {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			/* mark instruction as done and continue layout at target of jump */
			p.Mark = 1

			p = q
			if p.Mark == 0 {
				goto loop
			}
		}
	}

	if p.Mark != 0 {
		/*
		 * p goes here, but already used it elsewhere.
		 * copy up to 4 instructions or else branch to other copy.
		 */
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {

			if q == nil {
				break
			}
			if q == *last {
				break
			}
			a = int(q.As)
			if a == ANOP {
				i--
				continue
			}

			if nofollow(a) != 0 || pushpop(a) != 0 {
				break // NOTE(rsc): arm does goto copy
			}
			if q.Pcond == nil || q.Pcond.Mark != 0 {
				continue
			}
			if a == ACALL || a == ALOOP {
				continue
			}
			for {
				if p.As == ANOP {
					p = p.Link
					continue
				}

				q = liblink.Copyp(ctxt, p)
				p = p.Link
				q.Mark = 1
				(*last).Link = q
				*last = q
				if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
					continue
				}

				q.As = int16(relinv(int(q.As)))
				p = q.Pcond
				q.Pcond = q.Link
				q.Link = p
				xfol(ctxt, q.Link, last)
				p = q.Link
				if p.Mark != 0 {
					return
				}
				goto loop
				/* */
			}
		}
		q = ctxt.Arch.Prg()
		q.As = AJMP
		q.Lineno = p.Lineno
		q.To.Type_ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}

	/* emit p */
	p.Mark = 1

	(*last).Link = p
	*last = p
	a = int(p.As)

	/* continue loop with what comes after p */
	if nofollow(a) != 0 {

		return
	}
	if p.Pcond != nil && a != ACALL {
		/*
		 * some kind of conditional branch.
		 * recurse to follow one path.
		 * continue loop on the other.
		 */
		q = liblink.Brchain(ctxt, p.Pcond)
		if q != nil {

			p.Pcond = q
		}
		q = liblink.Brchain(ctxt, p.Link)
		if q != nil {
			p.Link = q
		}
		if p.From.Type_ == D_CONST {
			if p.From.Offset == 1 {
				/*
				 * expect conditional jump to be taken.
				 * rewrite so that's the fall-through case.
				 */
				p.As = int16(relinv(a))

				q = p.Link
				p.Link = p.Pcond
				p.Pcond = q
			}
		} else {

			q = p.Link
			if q.Mark != 0 {
				if a != ALOOP {
					p.As = int16(relinv(a))
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
		}

		xfol(ctxt, p.Link, last)
		if p.Pcond.Mark != 0 {
			return
		}
		p = p.Pcond
		goto loop
	}

	p = p.Link
	goto loop
}
Пример #8
0
Файл: obj8.go Проект: rsc/tmp
// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int32, noctxt int, jmpok **liblink.Prog) *liblink.Prog {

	var q *liblink.Prog
	var q1 *liblink.Prog

	if ctxt.Debugstack != 0 {
		// 8l -K means check not only for stack
		// overflow but stack underflow.
		// On underflow, INT 3 (breakpoint).
		// Underflow itself is rare but this also
		// catches out-of-sync stack guard info.
		p = liblink.Appendp(ctxt, p)

		p.As = ACMPL
		p.From.Type_ = D_INDIR + D_CX
		p.From.Offset = 4
		p.To.Type_ = D_SP

		p = liblink.Appendp(ctxt, p)
		p.As = AJCC
		p.To.Type_ = D_BRANCH
		p.To.Offset = 4
		q1 = p

		p = liblink.Appendp(ctxt, p)
		p.As = AINT
		p.From.Type_ = D_CONST
		p.From.Offset = 3

		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		q1.Pcond = p
	}

	q1 = nil

	if framesize <= liblink.StackSmall {
		// small stack: SP <= stackguard
		//	CMPL SP, stackguard
		p = liblink.Appendp(ctxt, p)

		p.As = ACMPL
		p.From.Type_ = D_SP
		p.To.Type_ = D_INDIR + D_CX
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAL -(framesize-StackSmall)(SP), AX
		//	CMPL AX, stackguard
		p = liblink.Appendp(ctxt, p)

		p.As = ALEAL
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = -(int64(framesize) - liblink.StackSmall)
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Type_ = D_AX
		p.To.Type_ = D_INDIR + D_CX
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else {

		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVL	stackguard, CX
		//	CMPL	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAL	StackGuard(SP), AX
		//	SUBL	stackguard, AX
		//	CMPL	AX, $(framesize+(StackGuard-StackSmall))
		p = liblink.Appendp(ctxt, p)

		p.As = AMOVL
		p.From.Type_ = D_INDIR + D_CX
		p.From.Offset = 0
		p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
		p.To.Type_ = D_SI

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Type_ = D_SI
		p.To.Type_ = D_CONST
		p.To.Offset = int64(uint32(liblink.StackPreempt & (1<<32 - 1)))

		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type_ = D_BRANCH
		q1 = p

		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = liblink.StackGuard
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = ASUBL
		p.From.Type_ = D_SI
		p.From.Offset = 0
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Type_ = D_AX
		p.To.Type_ = D_CONST
		p.To.Offset = int64(framesize) + (liblink.StackGuard - liblink.StackSmall)
	}

	// common
	p = liblink.Appendp(ctxt, p)

	p.As = AJHI
	p.To.Type_ = D_BRANCH
	p.To.Offset = 4
	q = p

	p = liblink.Appendp(ctxt, p)
	p.As = ACALL
	p.To.Type_ = D_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = liblink.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {

		p.To.Sym = ctxt.Symmorestack[noctxt]
	}

	p = liblink.Appendp(ctxt, p)
	p.As = AJMP
	p.To.Type_ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}

	*jmpok = q
	return p
}
Пример #9
0
Файл: obj8.go Проект: rsc/tmp
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var p1 *liblink.Prog
	var p2 *liblink.Prog
	var autoffset int32
	var deltasp int32
	var a int

	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	if ctxt.Headtype == liblink.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = liblink.Linklookup(ctxt, "_privates", 0)
	}

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p = cursym.Text
	autoffset = int32(p.To.Offset)
	if autoffset < 0 {
		autoffset = 0
	}

	cursym.Locals = autoffset
	cursym.Args = p.To.Offset2

	q = nil

	if !(p.From.Scale&liblink.NOSPLIT != 0) || (p.From.Scale&liblink.WRAPPER != 0) {
		p = liblink.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}

	if !(cursym.Text.From.Scale&liblink.NOSPLIT != 0) {
		p = stacksplit(ctxt, p, autoffset, bool2int(!(cursym.Text.From.Scale&liblink.NEEDCTXT != 0)), &q) // emit split check
	}

	if autoffset != 0 {

		p = liblink.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Type_ = D_CONST
		p.From.Offset = int64(autoffset)
		p.Spadj = autoffset
	} else {

		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = liblink.Appendp(ctxt, p)

		p.As = ANOP
		p.Spadj = int32(-ctxt.Arch.Ptrsize)
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = int32(ctxt.Arch.Ptrsize)
	}

	if q != nil {
		q.Pcond = p
	}
	deltasp = autoffset

	if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
		// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
		//
		//	MOVL g_panic(CX), BX
		//	TESTL BX, BX
		//	JEQ end
		//	LEAL (autoffset+4)(SP), DI
		//	CMPL panic_argp(BX), DI
		//	JNE end
		//	MOVL SP, panic_argp(BX)
		// end:
		//	NOP
		//
		// The NOP is needed to give the jumps somewhere to land.
		// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.

		p = liblink.Appendp(ctxt, p)

		p.As = AMOVL
		p.From.Type_ = D_INDIR + D_CX
		p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
		p.To.Type_ = D_BX

		p = liblink.Appendp(ctxt, p)
		p.As = ATESTL
		p.From.Type_ = D_BX
		p.To.Type_ = D_BX

		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type_ = D_BRANCH
		p1 = p

		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = int64(autoffset) + 4
		p.To.Type_ = D_DI

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Type_ = D_INDIR + D_BX
		p.From.Offset = 0 // Panic.argp
		p.To.Type_ = D_DI

		p = liblink.Appendp(ctxt, p)
		p.As = AJNE
		p.To.Type_ = D_BRANCH
		p2 = p

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Type_ = D_SP
		p.To.Type_ = D_INDIR + D_BX
		p.To.Offset = 0 // Panic.argp

		p = liblink.Appendp(ctxt, p)

		p.As = ANOP
		p1.Pcond = p
		p2.Pcond = p
	}

	if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From.Scale&liblink.NOSPLIT != 0) {
		// 8l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = liblink.Appendp(ctxt, p)

		p.As = AMOVL
		p.From.Type_ = D_SP
		p.To.Type_ = D_DI

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Type_ = D_CONST
		p.From.Offset = int64(autoffset) / 4
		p.To.Type_ = D_CX

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Type_ = D_CONST
		p.From.Offset = 0
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = AREP

		p = liblink.Appendp(ctxt, p)
		p.As = ASTOSL
	}

	for ; p != nil; p = p.Link {
		a = int(p.From.Type_)
		if a == D_AUTO {
			p.From.Offset += int64(deltasp)
		}
		if a == D_PARAM {
			p.From.Offset += int64(deltasp) + 4
		}
		a = int(p.To.Type_)
		if a == D_AUTO {
			p.To.Offset += int64(deltasp)
		}
		if a == D_PARAM {
			p.To.Offset += int64(deltasp) + 4
		}

		switch p.As {
		default:
			continue

		case APUSHL,
			APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue

		case APUSHW,
			APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue

		case APOPL,
			APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue

		case APOPW,
			APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue

		case ARET:
			break
		}

		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}

		if autoffset != 0 {
			p.As = AADJSP
			p.From.Type_ = D_CONST
			p.From.Offset = int64(-autoffset)
			p.Spadj = -autoffset
			p = liblink.Appendp(ctxt, p)
			p.As = ARET

			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}

		if p.To.Sym != nil { // retjmp
			p.As = AJMP
		}
	}
}
Пример #10
0
Файл: obj6.go Проект: rsc/tmp
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var p1 *liblink.Prog
	var p2 *liblink.Prog
	var autoffset int32
	var deltasp int32
	var a int
	var pcsize int
	var textstksiz int64
	var textarg int64

	if ctxt.Tlsg == nil {
		ctxt.Tlsg = liblink.Linklookup(ctxt, "runtime.tlsg", 0)
	}
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	if ctxt.Headtype == liblink.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = liblink.Linklookup(ctxt, "_privates", 0)
	}

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p = cursym.Text
	parsetextconst(p.To.Offset, &textstksiz, &textarg)
	autoffset = int32(textstksiz)
	if autoffset < 0 {
		autoffset = 0
	}

	cursym.Args = int32(p.To.Offset >> 32)
	cursym.Locals = int32(textstksiz)

	if autoffset < liblink.StackSmall && !(p.From.Scale&liblink.NOSPLIT != 0) {
		for q = p; q != nil; q = q.Link {
			if q.As == ACALL {
				goto noleaf
			}
			if (q.As == ADUFFCOPY || q.As == ADUFFZERO) && autoffset >= liblink.StackSmall-8 {
				goto noleaf
			}
		}

		p.From.Scale |= liblink.NOSPLIT
	noleaf:
	}

	q = nil
	if !(p.From.Scale&liblink.NOSPLIT != 0) || (p.From.Scale&liblink.WRAPPER != 0) {
		p = liblink.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}

	if !(cursym.Text.From.Scale&liblink.NOSPLIT != 0) {
		p = stacksplit(ctxt, p, autoffset, int32(textarg), bool2int(!(cursym.Text.From.Scale&liblink.NEEDCTXT != 0)), &q) // emit split check
	}

	if autoffset != 0 {

		if autoffset%int32(ctxt.Arch.Regsize) != 0 {
			ctxt.Diag("unaligned stack size %d", autoffset)
		}
		p = liblink.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Type_ = D_CONST
		p.From.Offset = int64(autoffset)
		p.Spadj = autoffset
	} else {

		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = liblink.Appendp(ctxt, p)

		p.As = ANOP
		p.Spadj = int32(-ctxt.Arch.Ptrsize)
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = int32(ctxt.Arch.Ptrsize)
	}

	if q != nil {
		q.Pcond = p
	}
	deltasp = autoffset

	if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
		// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
		//
		//	MOVQ g_panic(CX), BX
		//	TESTQ BX, BX
		//	JEQ end
		//	LEAQ (autoffset+8)(SP), DI
		//	CMPQ panic_argp(BX), DI
		//	JNE end
		//	MOVQ SP, panic_argp(BX)
		// end:
		//	NOP
		//
		// The NOP is needed to give the jumps somewhere to land.
		// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.

		p = liblink.Appendp(ctxt, p)

		p.As = AMOVQ
		p.From.Type_ = D_INDIR + D_CX
		p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
		p.To.Type_ = D_BX
		if ctxt.Headtype == liblink.Hnacl {
			p.As = AMOVL
			p.From.Type_ = D_INDIR + D_R15
			p.From.Scale = 1
			p.From.Index = D_CX
		}

		p = liblink.Appendp(ctxt, p)
		p.As = ATESTQ
		p.From.Type_ = D_BX
		p.To.Type_ = D_BX
		if ctxt.Headtype == liblink.Hnacl {
			p.As = ATESTL
		}

		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type_ = D_BRANCH
		p1 = p

		p = liblink.Appendp(ctxt, p)
		p.As = ALEAQ
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = int64(autoffset) + 8
		p.To.Type_ = D_DI
		if ctxt.Headtype == liblink.Hnacl {
			p.As = ALEAL
		}

		p = liblink.Appendp(ctxt, p)
		p.As = ACMPQ
		p.From.Type_ = D_INDIR + D_BX
		p.From.Offset = 0 // Panic.argp
		p.To.Type_ = D_DI
		if ctxt.Headtype == liblink.Hnacl {
			p.As = ACMPL
			p.From.Type_ = D_INDIR + D_R15
			p.From.Scale = 1
			p.From.Index = D_BX
		}

		p = liblink.Appendp(ctxt, p)
		p.As = AJNE
		p.To.Type_ = D_BRANCH
		p2 = p

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type_ = D_SP
		p.To.Type_ = D_INDIR + D_BX
		p.To.Offset = 0 // Panic.argp
		if ctxt.Headtype == liblink.Hnacl {
			p.As = AMOVL
			p.To.Type_ = D_INDIR + D_R15
			p.To.Scale = 1
			p.To.Index = D_BX
		}

		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p1.Pcond = p
		p2.Pcond = p
	}

	if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From.Scale&liblink.NOSPLIT != 0) {
		// 6l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = liblink.Appendp(ctxt, p)

		p.As = AMOVQ
		p.From.Type_ = D_SP
		p.To.Type_ = D_DI

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type_ = D_CONST
		p.From.Offset = int64(autoffset) / 8
		p.To.Type_ = D_CX

		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type_ = D_CONST
		p.From.Offset = 0
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = AREP

		p = liblink.Appendp(ctxt, p)
		p.As = ASTOSQ
	}

	for ; p != nil; p = p.Link {
		pcsize = int(p.Mode) / 8
		a = int(p.From.Type_)
		if a == D_AUTO {
			p.From.Offset += int64(deltasp)
		}
		if a == D_PARAM {
			p.From.Offset += int64(deltasp) + int64(pcsize)
		}
		a = int(p.To.Type_)
		if a == D_AUTO {
			p.To.Offset += int64(deltasp)
		}
		if a == D_PARAM {
			p.To.Offset += int64(deltasp) + int64(pcsize)
		}

		switch p.As {
		default:
			continue

		case APUSHL,
			APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue

		case APUSHQ,
			APUSHFQ:
			deltasp += 8
			p.Spadj = 8
			continue

		case APUSHW,
			APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue

		case APOPL,
			APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue

		case APOPQ,
			APOPFQ:
			deltasp -= 8
			p.Spadj = -8
			continue

		case APOPW,
			APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue

		case ARET:
			break
		}

		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}

		if autoffset != 0 {
			p.As = AADJSP
			p.From.Type_ = D_CONST
			p.From.Offset = int64(-autoffset)
			p.Spadj = -autoffset
			p = liblink.Appendp(ctxt, p)
			p.As = ARET

			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}

		if p.To.Sym != nil { // retjmp
			p.As = AJMP
		}
	}
}

func indir_cx(ctxt *liblink.Link, a *liblink.Addr) {
	if ctxt.Headtype == liblink.Hnacl {
		a.Type_ = D_INDIR + D_R15
		a.Index = D_CX
		a.Scale = 1
		return
	}

	a.Type_ = D_INDIR + D_CX
}

// Append code to p to load g into cx.
// Overwrites p with the first instruction (no first appendp).
// Overwriting p is unusual but it lets use this in both the
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *liblink.Link, p *liblink.Prog) *liblink.Prog {

	var next *liblink.Prog

	p.As = AMOVQ
	if ctxt.Arch.Ptrsize == 4 {
		p.As = AMOVL
	}
	p.From.Type_ = D_INDIR + D_TLS
	p.From.Offset = 0
	p.To.Type_ = D_CX

	next = p.Link
	progedit(ctxt, p)
	for p.Link != next {
		p = p.Link
	}

	if p.From.Index == D_TLS {
		p.From.Scale = 2
	}

	return p
}

// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int32, textarg int32, noctxt int, jmpok **liblink.Prog) *liblink.Prog {

	var q *liblink.Prog
	var q1 *liblink.Prog
	var cmp int
	var lea int
	var mov int
	var sub int

	cmp = ACMPQ
	lea = ALEAQ
	mov = AMOVQ
	sub = ASUBQ

	if ctxt.Headtype == liblink.Hnacl {
		cmp = ACMPL
		lea = ALEAL
		mov = AMOVL
		sub = ASUBL
	}

	q1 = nil
	if framesize <= liblink.StackSmall {
		// small stack: SP <= stackguard
		//	CMPQ SP, stackguard
		p = liblink.Appendp(ctxt, p)

		p.As = int16(cmp)
		p.From.Type_ = D_SP
		indir_cx(ctxt, &p.To)
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAQ -xxx(SP), AX
		//	CMPQ AX, stackguard
		p = liblink.Appendp(ctxt, p)

		p.As = int16(lea)
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = -(int64(framesize) - liblink.StackSmall)
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type_ = D_AX
		indir_cx(ctxt, &p.To)
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else {

		// Such a large stack we need to protect against wraparound.
		// If SP is close to zero:
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVQ	stackguard, CX
		//	CMPQ	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAQ	StackGuard(SP), AX
		//	SUBQ	CX, AX
		//	CMPQ	AX, $(framesize+(StackGuard-StackSmall))

		p = liblink.Appendp(ctxt, p)

		p.As = int16(mov)
		indir_cx(ctxt, &p.From)
		p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
		p.To.Type_ = D_SI

		p = liblink.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type_ = D_SI
		p.To.Type_ = D_CONST
		p.To.Offset = liblink.StackPreempt

		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type_ = D_BRANCH
		q1 = p

		p = liblink.Appendp(ctxt, p)
		p.As = int16(lea)
		p.From.Type_ = D_INDIR + D_SP
		p.From.Offset = liblink.StackGuard
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = int16(sub)
		p.From.Type_ = D_SI
		p.To.Type_ = D_AX

		p = liblink.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type_ = D_AX
		p.To.Type_ = D_CONST
		p.To.Offset = int64(framesize) + (liblink.StackGuard - liblink.StackSmall)
	}

	// common
	p = liblink.Appendp(ctxt, p)

	p.As = AJHI
	p.To.Type_ = D_BRANCH
	q = p

	p = liblink.Appendp(ctxt, p)
	p.As = ACALL
	p.To.Type_ = D_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = liblink.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {

		p.To.Sym = ctxt.Symmorestack[noctxt]
	}

	p = liblink.Appendp(ctxt, p)
	p.As = AJMP
	p.To.Type_ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}

	*jmpok = q
	return p
}

func follow(ctxt *liblink.Link, s *liblink.LSym) {
	var firstp *liblink.Prog
	var lastp *liblink.Prog

	ctxt.Cursym = s

	firstp = ctxt.Arch.Prg()
	lastp = firstp
	xfol(ctxt, s.Text, &lastp)
	lastp.Link = nil
	s.Text = firstp.Link
}

func nofollow(a int) int {
	switch a {
	case AJMP,
		ARET,
		AIRETL,
		AIRETQ,
		AIRETW,
		ARETFL,
		ARETFQ,
		ARETFW,
		AUNDEF:
		return 1
	}

	return 0
}

func pushpop(a int) int {
	switch a {
	case APUSHL,
		APUSHFL,
		APUSHQ,
		APUSHFQ,
		APUSHW,
		APUSHFW,
		APOPL,
		APOPFL,
		APOPQ,
		APOPFQ,
		APOPW,
		APOPFW:
		return 1
	}

	return 0
}

func relinv(a int) int {
	switch a {
	case AJEQ:
		return AJNE
	case AJNE:
		return AJEQ
	case AJLE:
		return AJGT
	case AJLS:
		return AJHI
	case AJLT:
		return AJGE
	case AJMI:
		return AJPL
	case AJGE:
		return AJLT
	case AJPL:
		return AJMI
	case AJGT:
		return AJLE
	case AJHI:
		return AJLS
	case AJCS:
		return AJCC
	case AJCC:
		return AJCS
	case AJPS:
		return AJPC
	case AJPC:
		return AJPS
	case AJOS:
		return AJOC
	case AJOC:
		return AJOS
	}

	log.Fatalf("unknown relation: %s", anames6[a])
	return 0
}

func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var i int
	var a int

loop:
	if p == nil {
		return
	}
	if p.As == AJMP {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			/* mark instruction as done and continue layout at target of jump */
			p.Mark = 1

			p = q
			if p.Mark == 0 {
				goto loop
			}
		}
	}

	if p.Mark != 0 {
		/*
		 * p goes here, but already used it elsewhere.
		 * copy up to 4 instructions or else branch to other copy.
		 */
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {

			if q == nil {
				break
			}
			if q == *last {
				break
			}
			a = int(q.As)
			if a == ANOP {
				i--
				continue
			}

			if nofollow(a) != 0 || pushpop(a) != 0 {
				break // NOTE(rsc): arm does goto copy
			}
			if q.Pcond == nil || q.Pcond.Mark != 0 {
				continue
			}
			if a == ACALL || a == ALOOP {
				continue
			}
			for {
				if p.As == ANOP {
					p = p.Link
					continue
				}

				q = liblink.Copyp(ctxt, p)
				p = p.Link
				q.Mark = 1
				(*last).Link = q
				*last = q
				if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
					continue
				}

				q.As = int16(relinv(int(q.As)))
				p = q.Pcond
				q.Pcond = q.Link
				q.Link = p
				xfol(ctxt, q.Link, last)
				p = q.Link
				if p.Mark != 0 {
					return
				}
				goto loop
				/* */
			}
		}
		q = ctxt.Arch.Prg()
		q.As = AJMP
		q.Lineno = p.Lineno
		q.To.Type_ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}

	/* emit p */
	p.Mark = 1

	(*last).Link = p
	*last = p
	a = int(p.As)

	/* continue loop with what comes after p */
	if nofollow(a) != 0 {

		return
	}
	if p.Pcond != nil && a != ACALL {
		/*
		 * some kind of conditional branch.
		 * recurse to follow one path.
		 * continue loop on the other.
		 */
		q = liblink.Brchain(ctxt, p.Pcond)
		if q != nil {

			p.Pcond = q
		}
		q = liblink.Brchain(ctxt, p.Link)
		if q != nil {
			p.Link = q
		}
		if p.From.Type_ == D_CONST {
			if p.From.Offset == 1 {
				/*
				 * expect conditional jump to be taken.
				 * rewrite so that's the fall-through case.
				 */
				p.As = int16(relinv(a))

				q = p.Link
				p.Link = p.Pcond
				p.Pcond = q
			}
		} else {

			q = p.Link
			if q.Mark != 0 {
				if a != ALOOP {
					p.As = int16(relinv(a))
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
		}

		xfol(ctxt, p.Link, last)
		if p.Pcond.Mark != 0 {
			return
		}
		p = p.Pcond
		goto loop
	}

	p = p.Link
	goto loop
}

func prg() *liblink.Prog {
	var p *liblink.Prog

	p = new(liblink.Prog)
	*p = zprg
	return p
}

var Linkamd64 = liblink.LinkArch{
	ByteOrder:     binary.LittleEndian,
	Pconv:         Pconv,
	Name:          "amd64",
	Thechar:       '6',
	Endian:        liblink.LittleEndian,
	Addstacksplit: addstacksplit,
	Assemble:      span6,
	Datasize:      datasize,
	Follow:        follow,
	Iscall:        iscall,
	Isdata:        isdata,
	Prg:           prg,
	Progedit:      progedit,
	Settextflag:   settextflag,
	Symtype:       symtype,
	Textflag:      textflag,
	Minlc:         1,
	Ptrsize:       8,
	Regsize:       8,
	D_ADDR:        D_ADDR,
	D_AUTO:        D_AUTO,
	D_BRANCH:      D_BRANCH,
	D_CONST:       D_CONST,
	D_EXTERN:      D_EXTERN,
	D_FCONST:      D_FCONST,
	D_NONE:        D_NONE,
	D_PARAM:       D_PARAM,
	D_SCONST:      D_SCONST,
	D_STATIC:      D_STATIC,
	ACALL:         ACALL,
	ADATA:         ADATA,
	AEND:          AEND,
	AFUNCDATA:     AFUNCDATA,
	AGLOBL:        AGLOBL,
	AJMP:          AJMP,
	ANOP:          ANOP,
	APCDATA:       APCDATA,
	ARET:          ARET,
	ATEXT:         ATEXT,
	ATYPE:         ATYPE,
	AUSEFIELD:     AUSEFIELD,
}

var Linkamd64p32 = liblink.LinkArch{
	ByteOrder:     binary.LittleEndian,
	Pconv:         Pconv,
	Name:          "amd64p32",
	Thechar:       '6',
	Endian:        liblink.LittleEndian,
	Addstacksplit: addstacksplit,
	Assemble:      span6,
	Datasize:      datasize,
	Follow:        follow,
	Iscall:        iscall,
	Isdata:        isdata,
	Prg:           prg,
	Progedit:      progedit,
	Settextflag:   settextflag,
	Symtype:       symtype,
	Textflag:      textflag,
	Minlc:         1,
	Ptrsize:       4,
	Regsize:       8,
	D_ADDR:        D_ADDR,
	D_AUTO:        D_AUTO,
	D_BRANCH:      D_BRANCH,
	D_CONST:       D_CONST,
	D_EXTERN:      D_EXTERN,
	D_FCONST:      D_FCONST,
	D_NONE:        D_NONE,
	D_PARAM:       D_PARAM,
	D_SCONST:      D_SCONST,
	D_STATIC:      D_STATIC,
	ACALL:         ACALL,
	ADATA:         ADATA,
	AEND:          AEND,
	AFUNCDATA:     AFUNCDATA,
	AGLOBL:        AGLOBL,
	AJMP:          AJMP,
	ANOP:          ANOP,
	APCDATA:       APCDATA,
	ARET:          ARET,
	ATEXT:         ATEXT,
	ATYPE:         ATYPE,
	AUSEFIELD:     AUSEFIELD,
}