Exemplo n.º 1
0
func nacladdr(ctxt *liblink.Link, p *liblink.Prog, a *liblink.Addr) {
	if p.As == ALEAL || p.As == ALEAQ {
		return
	}
	if a.Typ == D_BP || a.Typ == D_INDIR+D_BP {
		ctxt.Diag("invalid address: %P", p)
		return
	}
	if a.Typ == D_INDIR+D_TLS {
		a.Typ = D_INDIR + D_BP
	} else if a.Typ == D_TLS {
		a.Typ = D_BP
	}
	if D_INDIR <= a.Typ && a.Typ <= D_INDIR+D_INDIR {
		switch a.Typ {
		// all ok
		case D_INDIR + D_BP,
			D_INDIR + D_SP,
			D_INDIR + D_R15:
			break
		default:
			if a.Index != D_NONE {
				ctxt.Diag("invalid address %P", p)
			}
			a.Index = a.Typ - D_INDIR
			if a.Index != D_NONE {
				a.Scale = 1
			}
			a.Typ = D_INDIR + D_R15
			break
		}
	}
}
Exemplo n.º 2
0
// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int64, noctxt int, jmpok **liblink.Prog) *liblink.Prog {
	var q *liblink.Prog
	var q1 *liblink.Prog
	var arg int
	if ctxt.Debugstack != 0 {
		// 8l -K means check not only for stack
		// overflow but stack underflow.
		// On underflow, INT 3 (breakpoint).
		// Underflow itself is rare but this also
		// catches out-of-sync stack guard info.
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_INDIR + D_CX
		p.From.Offset = 4
		p.To.Typ = D_SP
		p = liblink.Appendp(ctxt, p)
		p.As = AJCC
		p.To.Typ = D_BRANCH
		p.To.Offset = 4
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = AINT
		p.From.Typ = D_CONST
		p.From.Offset = 3
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		q1.Pcond = p
	}
	q1 = nil
	if framesize <= liblink.StackSmall {
		// small stack: SP <= stackguard
		//	CMPL SP, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_SP
		p.To.Typ = D_INDIR + D_CX
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAL -(framesize-StackSmall)(SP), AX
		//	CMPL AX, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = -(framesize - liblink.StackSmall)
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_AX
		p.To.Typ = D_INDIR + D_CX
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVL	stackguard, CX
		//	CMPL	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAL	StackGuard(SP), AX
		//	SUBL	stackguard, AX
		//	CMPL	AX, $(framesize+(StackGuard-StackSmall))
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_INDIR + D_CX
		p.From.Offset = 0
		p.To.Typ = D_SI
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_SI
		p.To.Typ = D_CONST
		p.To.Offset = int64(uint32(liblink.StackPreempt & 0xFFFFFFFF))
		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Typ = D_BRANCH
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = liblink.StackGuard
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ASUBL
		p.From.Typ = D_SI
		p.From.Offset = 0
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_AX
		p.To.Typ = D_CONST
		p.To.Offset = framesize + (liblink.StackGuard - liblink.StackSmall)
	}
	// common
	p = liblink.Appendp(ctxt, p)
	p.As = AJHI
	p.To.Typ = D_BRANCH
	p.To.Offset = 4
	q = p
	p = liblink.Appendp(ctxt, p) // save frame size in DI
	p.As = AMOVL
	p.To.Typ = D_DI
	p.From.Typ = D_CONST
	// If we ask for more stack, we'll get a minimum of StackMin bytes.
	// We need a stack frame large enough to hold the top-of-stack data,
	// the function arguments+results, our caller's PC, our frame,
	// a word for the return PC of the next call, and then the StackLimit bytes
	// that must be available on entry to any function called from a function
	// that did a stack check.  If StackMin is enough, don't ask for a specific
	// amount: then we can use the custom functions and save a few
	// instructions.
	if int64(liblink.StackTop+ctxt.Cursym.Text.To.Offset2)+ctxt.Arch.Ptrsize+framesize+ctxt.Arch.Ptrsize+liblink.StackLimit >= liblink.StackMin {
		p.From.Offset = (framesize + 7) &^ 7
	}
	arg = ctxt.Cursym.Text.To.Offset2
	if arg == 1 { // special marker for known 0
		arg = 0
	}
	if arg&3 != 0 {
		ctxt.Diag("misaligned argument size in stack split")
	}
	p = liblink.Appendp(ctxt, p) // save arg size in AX
	p.As = AMOVL
	p.To.Typ = D_AX
	p.From.Typ = D_CONST
	p.From.Offset = int64(arg)
	p = liblink.Appendp(ctxt, p)
	p.As = ACALL
	p.To.Typ = D_BRANCH
	p.To.Sym = ctxt.Symmorestack[noctxt]
	p = liblink.Appendp(ctxt, p)
	p.As = AJMP
	p.To.Typ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link
	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}
	*jmpok = q
	return p
}
Exemplo n.º 3
0
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int64, noctxt int) *liblink.Prog {
	var arg int
	// MOVW			g_stackguard(g), R1
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.From.Typ = D_OREG
	p.From.Reg = REGG
	p.To.Typ = D_REG
	p.To.Reg = 1
	if framesize <= liblink.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.Reg = REGSP
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = -framesize
		p.To.Typ = D_REG
		p.To.Reg = 2
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.Reg = 2
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_CONST
		p.From.Offset = int64(uint32(liblink.StackPreempt & 0xFFFFFFFF))
		p.Reg = 1
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = liblink.StackGuard
		p.To.Typ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.To.Typ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Offset = framesize + (liblink.StackGuard - liblink.StackSmall)
		p.To.Typ = D_REG
		p.To.Reg = 3
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 3
		p.Reg = 2
		p.Scond = C_SCOND_NE
	}
	// MOVW.LS		$framesize, R1
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_CONST
	p.From.Offset = framesize
	p.To.Typ = D_REG
	p.To.Reg = 1
	// MOVW.LS		$args, R2
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_CONST
	arg = ctxt.Cursym.Text.To.Offset2
	if arg == 1 { // special marker for known 0
		arg = 0
	}
	if arg&3 != 0 {
		ctxt.Diag("misaligned argument size in stack split")
	}
	p.From.Offset = int64(arg)
	p.To.Typ = D_REG
	p.To.Reg = 2
	// MOVW.LS	R14, R3
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_REG
	p.From.Reg = REGLINK
	p.To.Typ = D_REG
	p.To.Reg = 3
	// BL.LS		runtime.morestack(SB) // modifies LR, returns with LO still asserted
	p = liblink.Appendp(ctxt, p)
	p.As = ABL
	p.Scond = C_SCOND_LS
	p.To.Typ = D_BRANCH
	p.To.Sym = ctxt.Symmorestack[noctxt]
	// BLS	start
	p = liblink.Appendp(ctxt, p)
	p.As = ABLS
	p.To.Typ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link
	return p
}
Exemplo n.º 4
0
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var autoffset int64
	var deltasp int64
	var a int
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}
	if ctxt.Headtype == liblink.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = liblink.Linklookup(ctxt, "_privates", 0)
	}
	ctxt.Cursym = cursym
	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}
	p = cursym.Text
	autoffset = p.To.Offset
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Locals = autoffset
	cursym.Args = p.To.Offset2
	q = nil
	if p.From.Scale&liblink.NOSPLIT == 0 || (p.From.Scale&liblink.WRAPPER != 0) {
		p = liblink.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}
	if cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		p = stacksplit(ctxt, p, autoffset, bool2int(cursym.Text.From.Scale&liblink.NEEDCTXT == 0), &q) // emit split check
	}
	if autoffset != 0 {
		p = liblink.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Typ = D_CONST
		p.From.Offset = autoffset
		p.Spadj = autoffset
	} else {
		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = -ctxt.Arch.Ptrsize
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = ctxt.Arch.Ptrsize
	}
	if q != nil {
		q.Pcond = p
	}
	deltasp = autoffset
	if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
		// g->panicwrap += autoffset + ctxt->arch->ptrsize;
		p = liblink.Appendp(ctxt, p)
		p.As = AADDL
		p.From.Typ = D_CONST
		p.From.Offset = autoffset + ctxt.Arch.Ptrsize
		p.To.Typ = D_INDIR + D_CX
		p.To.Offset = 2 * ctxt.Arch.Ptrsize
	}
	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		// 8l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_SP
		p.To.Typ = D_DI
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = autoffset / 4
		p.To.Typ = D_CX
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = 0
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = AREP
		p = liblink.Appendp(ctxt, p)
		p.As = ASTOSL
	}
	for ; p != nil; p = p.Link {
		a = p.From.Typ
		if a == D_AUTO {
			p.From.Offset += deltasp
		}
		if a == D_PARAM {
			p.From.Offset += deltasp + 4
		}
		a = p.To.Typ
		if a == D_AUTO {
			p.To.Offset += deltasp
		}
		if a == D_PARAM {
			p.To.Offset += deltasp + 4
		}
		switch p.As {
		default:
			continue
		case APUSHL,
			APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue
		case APUSHW,
			APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue
		case APOPL,
			APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue
		case APOPW,
			APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue
		case ARET:
			break
		}
		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}
		if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
			p = load_g_cx(ctxt, p)
			p = liblink.Appendp(ctxt, p)
			// g->panicwrap -= autoffset + ctxt->arch->ptrsize;
			p.As = ASUBL
			p.From.Typ = D_CONST
			p.From.Offset = autoffset + ctxt.Arch.Ptrsize
			p.To.Typ = D_INDIR + D_CX
			p.To.Offset = 2 * ctxt.Arch.Ptrsize
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
		}
		if autoffset != 0 {
			p.As = AADJSP
			p.From.Typ = D_CONST
			p.From.Offset = -autoffset
			p.Spadj = -autoffset
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}
		if p.To.Sym != nil { // retjmp
			p.As = AJMP
		}
	}
}