Beispiel #1
0
func follow(ctxt *liblink.Link, s *liblink.LSym) {
	var firstp *liblink.Prog
	var lastp *liblink.Prog
	ctxt.Cursym = s
	firstp = ctxt.Prg()
	lastp = firstp
	xfol(ctxt, s.Text, &lastp)
	lastp.Link = nil
	s.Text = firstp.Link
}
Beispiel #2
0
func linkcase(casep *liblink.Prog) {
	var p *liblink.Prog
	for p = casep; p != nil; p = p.Link {
		if p.As == ABCASE {
			for ; p != nil && p.As == ABCASE; p = p.Link {
				p.Pcrel = casep
			}
			break
		}
	}
}
Beispiel #3
0
// Append code to p to load g into cx.
// Overwrites p with the first instruction (no first appendp).
// Overwriting p is unusual but it lets use this in both the
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *liblink.Link, p *liblink.Prog) *liblink.Prog {
	var next *liblink.Prog
	p.As = AMOVL
	p.From.Typ = D_INDIR + D_TLS
	p.From.Offset = 0
	p.To.Typ = D_CX
	next = p.Link
	progedit(ctxt, p)
	for p.Link != next {
		p = p.Link
	}
	if p.From.Index == D_TLS {
		p.From.Scale = 2
	}
	return p
}
Beispiel #4
0
func readprog(b *bufio.Reader, p *liblink.Prog) {
	if !undef[p] {
		panic("double-def")
	}
	delete(undef, p)
	p.Pc = rdint(b)
	p.Lineno = int(rdint(b))
	p.Link = rdprog(b)
	p.As = int(rdint(b))
	p.Reg = int(rdint(b))
	p.Scond = int(rdint(b))
	p.Width = int8(rdint(b))
	readaddr(b, &p.From)
	readaddr(b, &p.To)
}
Beispiel #5
0
func Pconv(p *liblink.Prog) string {
	var str string
	var fp string

	switch p.As {
	case ADATA:
		str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(p.As), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, 0, &p.To))
	case ATEXT:
		if p.From.Scale != 0 {
			str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(p.As), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, fmtLong, &p.To))
			break
		}
		str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(p.As), Dconv(p, 0, &p.From), Dconv(p, fmtLong, &p.To))
	default:
		str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(p.As), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
		break
	}

	fp += str
	return fp
}
Beispiel #6
0
func nopout(p *liblink.Prog) {
	p.As = ANOP
	p.From.Typ = D_NONE
	p.To.Typ = D_NONE
}
Beispiel #7
0
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var q1 *liblink.Prog
	var autoffset int64
	var deltasp int64
	var a int
	var pcsize int
	var i uint32
	var textstksiz int64
	var textarg int64
	if ctxt.Tlsg == nil {
		ctxt.Tlsg = liblink.Linklookup(ctxt, "runtime.tlsg", 0)
	}
	if ctxt.Symmorestack[0] == nil {
		if len(morename) > len(ctxt.Symmorestack) {
			log.Fatalf("Link.symmorestack needs at least %d elements", len(morename))
		}
		for i = 0; i < uint32(len(morename)); i++ {
			ctxt.Symmorestack[i] = liblink.Linklookup(ctxt, morename[i], 0)
		}
	}
	if ctxt.Headtype == liblink.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = liblink.Linklookup(ctxt, "_privates", 0)
	}
	ctxt.Cursym = cursym
	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}
	p = cursym.Text
	parsetextconst(p.To.Offset, &textstksiz, &textarg)
	autoffset = textstksiz
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Args = int(p.To.Offset >> 32)
	cursym.Locals = textstksiz
	if autoffset < liblink.StackSmall && p.From.Scale&liblink.NOSPLIT == 0 {
		for q = p; q != nil; q = q.Link {
			if q.As == ACALL {
				goto noleaf
			}
			if (q.As == ADUFFCOPY || q.As == ADUFFZERO) && autoffset >= liblink.StackSmall-8 {
				goto noleaf
			}
		}
		p.From.Scale |= liblink.NOSPLIT
	noleaf:
	}
	q = nil
	if p.From.Scale&liblink.NOSPLIT == 0 || (p.From.Scale&liblink.WRAPPER != 0) {
		p = liblink.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}
	if cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		p = stacksplit(ctxt, p, autoffset, textarg, bool2int(cursym.Text.From.Scale&liblink.NEEDCTXT == 0), &q) // emit split check
	}
	if autoffset != 0 {
		if autoffset%int64(ctxt.Arch.Regsize) != 0 {
			ctxt.Diag("unaligned stack size %d", autoffset)
		}
		p = liblink.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Typ = D_CONST
		p.From.Offset = autoffset
		p.Spadj = autoffset
	} else {
		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = -ctxt.Arch.Ptrsize
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = ctxt.Arch.Ptrsize
	}
	if q != nil {
		q.Pcond = p
	}
	deltasp = autoffset
	if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
		// g->panicwrap += autoffset + ctxt->arch->regsize;
		p = liblink.Appendp(ctxt, p)
		p.As = AADDL
		p.From.Typ = D_CONST
		p.From.Offset = autoffset + int64(ctxt.Arch.Regsize)
		indir_cx(ctxt, &p.To)
		p.To.Offset = 2 * ctxt.Arch.Ptrsize
	}
	if ctxt.Debugstack > 1 && autoffset != 0 {
		// 6l -K -K means double-check for stack overflow
		// even after calling morestack and even if the
		// function is marked as nosplit.
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		indir_cx(ctxt, &p.From)
		p.From.Offset = 0
		p.To.Typ = D_BX
		p = liblink.Appendp(ctxt, p)
		p.As = ASUBQ
		p.From.Typ = D_CONST
		p.From.Offset = liblink.StackSmall + 32
		p.To.Typ = D_BX
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPQ
		p.From.Typ = D_SP
		p.To.Typ = D_BX
		p = liblink.Appendp(ctxt, p)
		p.As = AJHI
		p.To.Typ = D_BRANCH
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = AINT
		p.From.Typ = D_CONST
		p.From.Offset = 3
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		q1.Pcond = p
	}
	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		// 6l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Typ = D_SP
		p.To.Typ = D_DI
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Typ = D_CONST
		p.From.Offset = autoffset / 8
		p.To.Typ = D_CX
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Typ = D_CONST
		p.From.Offset = 0
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = AREP
		p = liblink.Appendp(ctxt, p)
		p.As = ASTOSQ
	}
	for ; p != nil; p = p.Link {
		pcsize = p.Mode / 8
		a = p.From.Typ
		if a == D_AUTO {
			p.From.Offset += deltasp
		}
		if a == D_PARAM {
			p.From.Offset += deltasp + int64(pcsize)
		}
		a = p.To.Typ
		if a == D_AUTO {
			p.To.Offset += deltasp
		}
		if a == D_PARAM {
			p.To.Offset += deltasp + int64(pcsize)
		}
		switch p.As {
		default:
			continue
		case APUSHL,
			APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue
		case APUSHQ,
			APUSHFQ:
			deltasp += 8
			p.Spadj = 8
			continue
		case APUSHW,
			APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue
		case APOPL,
			APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue
		case APOPQ,
			APOPFQ:
			deltasp -= 8
			p.Spadj = -8
			continue
		case APOPW,
			APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue
		case ARET:
			break
		}
		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}
		if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
			p = load_g_cx(ctxt, p)
			p = liblink.Appendp(ctxt, p)
			// g->panicwrap -= autoffset + ctxt->arch->regsize;
			p.As = ASUBL
			p.From.Typ = D_CONST
			p.From.Offset = autoffset + int64(ctxt.Arch.Regsize)
			indir_cx(ctxt, &p.To)
			p.To.Offset = 2 * ctxt.Arch.Ptrsize
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
		}
		if autoffset != 0 {
			p.As = AADJSP
			p.From.Typ = D_CONST
			p.From.Offset = -autoffset
			p.Spadj = -autoffset
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}
		if p.To.Sym != nil { // retjmp
			p.As = AJMP
		}
	}
}

func indir_cx(ctxt *liblink.Link, a *liblink.Addr) {
	if ctxt.Headtype == liblink.Hnacl {
		a.Typ = D_INDIR + D_R15
		a.Index = D_CX
		a.Scale = 1
		return
	}
	a.Typ = D_INDIR + D_CX
}

// Append code to p to load g into cx.
// Overwrites p with the first instruction (no first appendp).
// Overwriting p is unusual but it lets use this in both the
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *liblink.Link, p *liblink.Prog) *liblink.Prog {
	var next *liblink.Prog
	p.As = AMOVQ
	if ctxt.Arch.Ptrsize == 4 {
		p.As = AMOVL
	}
	p.From.Typ = D_INDIR + D_TLS
	p.From.Offset = 0
	p.To.Typ = D_CX
	next = p.Link
	progedit(ctxt, p)
	for p.Link != next {
		p = p.Link
	}
	if p.From.Index == D_TLS {
		p.From.Scale = 2
	}
	return p
}

// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int64, textarg int64, noctxt int, jmpok **liblink.Prog) *liblink.Prog {
	var q *liblink.Prog
	var q1 *liblink.Prog
	var moreconst1 int64
	var moreconst2 int64
	var i uint32
	var cmp int
	var lea int
	var mov int
	var sub int
	cmp = ACMPQ
	lea = ALEAQ
	mov = AMOVQ
	sub = ASUBQ
	if ctxt.Headtype == liblink.Hnacl {
		cmp = ACMPL
		lea = ALEAL
		mov = AMOVL
		sub = ASUBL
	}
	if ctxt.Debugstack != 0 {
		// 6l -K means check not only for stack
		// overflow but stack underflow.
		// On underflow, INT 3 (breakpoint).
		// Underflow itself is rare but this also
		// catches out-of-sync stack guard info
		p = liblink.Appendp(ctxt, p)
		p.As = cmp
		indir_cx(ctxt, &p.From)
		p.From.Offset = 8
		p.To.Typ = D_SP
		p = liblink.Appendp(ctxt, p)
		p.As = AJHI
		p.To.Typ = D_BRANCH
		p.To.Offset = 4
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = AINT
		p.From.Typ = D_CONST
		p.From.Offset = 3
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		q1.Pcond = p
	}
	q1 = nil
	if framesize <= liblink.StackSmall {
		// small stack: SP <= stackguard
		//	CMPQ SP, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = cmp
		p.From.Typ = D_SP
		indir_cx(ctxt, &p.To)
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAQ -xxx(SP), AX
		//	CMPQ AX, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = lea
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = -(framesize - liblink.StackSmall)
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = cmp
		p.From.Typ = D_AX
		indir_cx(ctxt, &p.To)
	} else {
		// Such a large stack we need to protect against wraparound.
		// If SP is close to zero:
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVQ	stackguard, CX
		//	CMPQ	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAQ	StackGuard(SP), AX
		//	SUBQ	CX, AX
		//	CMPQ	AX, $(framesize+(StackGuard-StackSmall))
		p = liblink.Appendp(ctxt, p)
		p.As = mov
		indir_cx(ctxt, &p.From)
		p.From.Offset = 0
		p.To.Typ = D_SI
		p = liblink.Appendp(ctxt, p)
		p.As = cmp
		p.From.Typ = D_SI
		p.To.Typ = D_CONST
		p.To.Offset = liblink.StackPreempt
		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Typ = D_BRANCH
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = lea
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = liblink.StackGuard
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = sub
		p.From.Typ = D_SI
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = cmp
		p.From.Typ = D_AX
		p.To.Typ = D_CONST
		p.To.Offset = framesize + (liblink.StackGuard - liblink.StackSmall)
	}
	// common
	p = liblink.Appendp(ctxt, p)
	p.As = AJHI
	p.To.Typ = D_BRANCH
	q = p
	// If we ask for more stack, we'll get a minimum of StackMin bytes.
	// We need a stack frame large enough to hold the top-of-stack data,
	// the function arguments+results, our caller's PC, our frame,
	// a word for the return PC of the next call, and then the StackLimit bytes
	// that must be available on entry to any function called from a function
	// that did a stack check.  If StackMin is enough, don't ask for a specific
	// amount: then we can use the custom functions and save a few
	// instructions.
	moreconst1 = 0
	if liblink.StackTop+textarg+ctxt.Arch.Ptrsize+framesize+ctxt.Arch.Ptrsize+liblink.StackLimit >= liblink.StackMin {
		moreconst1 = framesize
	}
	moreconst2 = textarg
	if moreconst2 == 1 { // special marker
		moreconst2 = 0
	}
	if moreconst2&7 != 0 {
		ctxt.Diag("misaligned argument size in stack split")
	}
	// 4 varieties varieties (const1==0 cross const2==0)
	// and 6 subvarieties of (const1==0 and const2!=0)
	p = liblink.Appendp(ctxt, p)
	if moreconst1 == 0 && moreconst2 == 0 {
		p.As = ACALL
		p.To.Typ = D_BRANCH
		p.To.Sym = ctxt.Symmorestack[0*2+noctxt]
	} else if moreconst1 != 0 && moreconst2 == 0 {
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = moreconst1
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACALL
		p.To.Typ = D_BRANCH
		p.To.Sym = ctxt.Symmorestack[1*2+noctxt]
	} else if moreconst1 == 0 && moreconst2 <= 48 && moreconst2%8 == 0 {
		i = uint32(moreconst2/8 + 3)
		p.As = ACALL
		p.To.Typ = D_BRANCH
		p.To.Sym = ctxt.Symmorestack[i*2+uint32(noctxt)]
	} else if moreconst1 == 0 && moreconst2 != 0 {
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = moreconst2
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACALL
		p.To.Typ = D_BRANCH
		p.To.Sym = ctxt.Symmorestack[2*2+noctxt]
	} else {
		// Pass framesize and argsize.
		p.As = AMOVQ
		p.From.Typ = D_CONST
		p.From.Offset = int64(uint64(moreconst2) << 32)
		p.From.Offset |= moreconst1
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACALL
		p.To.Typ = D_BRANCH
		p.To.Sym = ctxt.Symmorestack[3*2+noctxt]
	}
	p = liblink.Appendp(ctxt, p)
	p.As = AJMP
	p.To.Typ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link
	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}
	*jmpok = q
	return p
}

func follow(ctxt *liblink.Link, s *liblink.LSym) {
	var firstp *liblink.Prog
	var lastp *liblink.Prog
	ctxt.Cursym = s
	firstp = ctxt.Prg()
	lastp = firstp
	xfol(ctxt, s.Text, &lastp)
	lastp.Link = nil
	s.Text = firstp.Link
}

func nofollow(a int) bool {
	switch a {
	case AJMP,
		ARET,
		AIRETL,
		AIRETQ,
		AIRETW,
		ARETFL,
		ARETFQ,
		ARETFW,
		AUNDEF:
		return true
	}
	return false
}

func pushpop(a int) int {
	switch a {
	case APUSHL,
		APUSHFL,
		APUSHQ,
		APUSHFQ,
		APUSHW,
		APUSHFW,
		APOPL,
		APOPFL,
		APOPQ,
		APOPFQ,
		APOPW,
		APOPFW:
		return 1
	}
	return 0
}

func relinv(a int) int {
	switch a {
	case AJEQ:
		return AJNE
	case AJNE:
		return AJEQ
	case AJLE:
		return AJGT
	case AJLS:
		return AJHI
	case AJLT:
		return AJGE
	case AJMI:
		return AJPL
	case AJGE:
		return AJLT
	case AJPL:
		return AJMI
	case AJGT:
		return AJLE
	case AJHI:
		return AJLS
	case AJCS:
		return AJCC
	case AJCC:
		return AJCS
	case AJPS:
		return AJPC
	case AJPC:
		return AJPS
	case AJOS:
		return AJOC
	case AJOC:
		return AJOS
	}
	log.Fatalf("unknown relation: %s", Anames6[a])
	return 0
}

func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var i int
	var a int
loop:
	if p == nil {
		return
	}
	if p.As == AJMP {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			/* mark instruction as done and continue layout at target of jump */
			p.Mark = 1
			p = q
			if p.Mark == 0 {
				goto loop
			}
		}
	}
	if p.Mark != 0 {
		/*
		 * p goes here, but already used it elsewhere.
		 * copy up to 4 instructions or else branch to other copy.
		 */
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {
			if q == nil {
				break
			}
			if q == *last {
				break
			}
			a = q.As
			if a == ANOP {
				i--
				continue
			}
			if nofollow(a) || pushpop(a) != 0 {
				break // NOTE(rsc): arm does goto copy
			}
			if q.Pcond == nil || q.Pcond.Mark != 0 {
				continue
			}
			if a == ACALL || a == ALOOP {
				continue
			}
			for {
				if p.As == ANOP {
					p = p.Link
					continue
				}
				q = liblink.Copyp(ctxt, p)
				p = p.Link
				q.Mark = 1
				(*last).Link = q
				*last = q
				if q.As != a || q.Pcond == nil || q.Pcond.Mark != 0 {
					continue
				}
				q.As = relinv(q.As)
				p = q.Pcond
				q.Pcond = q.Link
				q.Link = p
				xfol(ctxt, q.Link, last)
				p = q.Link
				if p.Mark != 0 {
					return
				}
				goto loop /* */
			}
		}
		q = ctxt.Prg()
		q.As = AJMP
		q.Lineno = p.Lineno
		q.To.Typ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}
	/* emit p */
	p.Mark = 1
	(*last).Link = p
	*last = p
	a = p.As
	/* continue loop with what comes after p */
	if nofollow(a) {
		return
	}
	if p.Pcond != nil && a != ACALL {
		/*
		 * some kind of conditional branch.
		 * recurse to follow one path.
		 * continue loop on the other.
		 */
		q = liblink.Brchain(ctxt, p.Pcond)
		if q != nil {
			p.Pcond = q
		}
		q = liblink.Brchain(ctxt, p.Link)
		if q != nil {
			p.Link = q
		}
		if p.From.Typ == D_CONST {
			if p.From.Offset == 1 {
				/*
				 * expect conditional jump to be taken.
				 * rewrite so that's the fall-through case.
				 */
				p.As = relinv(a)
				q = p.Link
				p.Link = p.Pcond
				p.Pcond = q
			}
		} else {
			q = p.Link
			if q.Mark != 0 {
				if a != ALOOP {
					p.As = relinv(a)
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
		}
		xfol(ctxt, p.Link, last)
		if p.Pcond.Mark != 0 {
			return
		}
		p = p.Pcond
		goto loop
	}
	p = p.Link
	goto loop
}

func prg() *liblink.Prog {
	var p *liblink.Prog
	p = new(liblink.Prog)
	*p = zprg
	return p
}

var Linkamd64 = liblink.LinkArch{
	Name:          "amd64",
	Thechar:       '6',
	ByteOrder:     binary.LittleEndian,
	Pconv:         Pconv,
	Addstacksplit: addstacksplit,
	Assemble:      span6,
	Datasize:      datasize,
	Follow:        follow,
	Iscall:        iscall,
	Isdata:        isdata,
	Prg:           prg,
	Progedit:      progedit,
	Settextflag:   settextflag,
	Symtype:       symtype,
	Textflag:      textflag,
	Minlc:         1,
	Ptrsize:       8,
	Regsize:       8,
	D_ADDR:        D_ADDR,
	D_AUTO:        D_AUTO,
	D_BRANCH:      D_BRANCH,
	D_CONST:       D_CONST,
	D_EXTERN:      D_EXTERN,
	D_FCONST:      D_FCONST,
	D_NONE:        D_NONE,
	D_PARAM:       D_PARAM,
	D_SCONST:      D_SCONST,
	D_STATIC:      D_STATIC,
	ACALL:         ACALL,
	ADATA:         ADATA,
	AEND:          AEND,
	AFUNCDATA:     AFUNCDATA,
	AGLOBL:        AGLOBL,
	AJMP:          AJMP,
	ANOP:          ANOP,
	APCDATA:       APCDATA,
	ARET:          ARET,
	ATEXT:         ATEXT,
	ATYPE:         ATYPE,
	AUSEFIELD:     AUSEFIELD,
}

var Linkamd64p32 = liblink.LinkArch{
	Name:          "amd64p32",
	Thechar:       '6',
	ByteOrder:     binary.LittleEndian,
	Pconv:         Pconv,
	Addstacksplit: addstacksplit,
	Assemble:      span6,
	Datasize:      datasize,
	Follow:        follow,
	Iscall:        iscall,
	Isdata:        isdata,
	Prg:           prg,
	Progedit:      progedit,
	Settextflag:   settextflag,
	Symtype:       symtype,
	Textflag:      textflag,
	Minlc:         1,
	Ptrsize:       4,
	Regsize:       8,
	D_ADDR:        D_ADDR,
	D_AUTO:        D_AUTO,
	D_BRANCH:      D_BRANCH,
	D_CONST:       D_CONST,
	D_EXTERN:      D_EXTERN,
	D_FCONST:      D_FCONST,
	D_NONE:        D_NONE,
	D_PARAM:       D_PARAM,
	D_SCONST:      D_SCONST,
	D_STATIC:      D_STATIC,
	ACALL:         ACALL,
	ADATA:         ADATA,
	AEND:          AEND,
	AFUNCDATA:     AFUNCDATA,
	AGLOBL:        AGLOBL,
	AJMP:          AJMP,
	ANOP:          ANOP,
	APCDATA:       APCDATA,
	ARET:          ARET,
	ATEXT:         ATEXT,
	ATYPE:         ATYPE,
	AUSEFIELD:     AUSEFIELD,
}
Beispiel #8
0
func progedit(ctxt *liblink.Link, p *liblink.Prog) {
	var literal string
	var s *liblink.LSym
	var q *liblink.Prog
	// See obj6.c for discussion of TLS.
	if canuselocaltls(ctxt) {
		// Reduce TLS initial exec model to TLS local exec model.
		// Sequences like
		//	MOVL TLS, BX
		//	... off(BX)(TLS*1) ...
		// become
		//	NOP
		//	... off(TLS) ...
		if p.As == AMOVL && p.From.Typ == D_TLS && D_AX <= p.To.Typ && p.To.Typ <= D_DI {
			p.As = ANOP
			p.From.Typ = D_NONE
			p.To.Typ = D_NONE
		}
		if p.From.Index == D_TLS && D_INDIR+D_AX <= p.From.Typ && p.From.Typ <= D_INDIR+D_DI {
			p.From.Typ = D_INDIR + D_TLS
			p.From.Scale = 0
			p.From.Index = D_NONE
		}
		if p.To.Index == D_TLS && D_INDIR+D_AX <= p.To.Typ && p.To.Typ <= D_INDIR+D_DI {
			p.To.Typ = D_INDIR + D_TLS
			p.To.Scale = 0
			p.To.Index = D_NONE
		}
	} else {
		// As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
		// The instruction
		//	MOVL off(TLS), BX
		// becomes the sequence
		//	MOVL TLS, BX
		//	MOVL off(BX)(TLS*1), BX
		// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
		if p.As == AMOVL && p.From.Typ == D_INDIR+D_TLS && D_AX <= p.To.Typ && p.To.Typ <= D_DI {
			q = liblink.Appendp(ctxt, p)
			q.As = p.As
			q.From = p.From
			q.From.Typ = D_INDIR + p.To.Typ
			q.From.Index = D_TLS
			q.From.Scale = 2 // TODO: use 1
			q.To = p.To
			p.From.Typ = D_TLS
			p.From.Index = D_NONE
			p.From.Offset = 0
		}
	}
	// TODO: Remove.
	if ctxt.Headtype == liblink.Hplan9 {
		if p.From.Scale == 1 && p.From.Index == D_TLS {
			p.From.Scale = 2
		}
		if p.To.Scale == 1 && p.To.Index == D_TLS {
			p.To.Scale = 2
		}
	}
	// Rewrite CALL/JMP/RET to symbol as D_BRANCH.
	switch p.As {
	case ACALL,
		AJMP,
		ARET:
		if (p.To.Typ == D_EXTERN || p.To.Typ == D_STATIC) && p.To.Sym != nil {
			p.To.Typ = D_BRANCH
		}
		break
	}
	// Rewrite float constants to values stored in memory.
	switch p.As {
	case AFMOVF,
		AFADDF,
		AFSUBF,
		AFSUBRF,
		AFMULF,
		AFDIVF,
		AFDIVRF,
		AFCOMF,
		AFCOMFP,
		AMOVSS,
		AADDSS,
		ASUBSS,
		AMULSS,
		ADIVSS,
		ACOMISS,
		AUCOMISS:
		if p.From.Typ == D_FCONST {
			var i32 uint32
			var f32 float32
			f32 = float32(p.From.U.Dval)
			i32 = math.Float32bits(f32)
			literal = fmt.Sprintf("$f32.%08x", uint32(i32))
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint32(ctxt, s, i32)
				s.Reachable = 0
			}
			p.From.Typ = D_EXTERN
			p.From.Sym = s
			p.From.Offset = 0
		}
	case AFMOVD,
		AFADDD,
		AFSUBD,
		AFSUBRD,
		AFMULD,
		AFDIVD,
		AFDIVRD,
		AFCOMD,
		AFCOMDP,
		AMOVSD,
		AADDSD,
		ASUBSD,
		AMULSD,
		ADIVSD,
		ACOMISD,
		AUCOMISD:
		if p.From.Typ == D_FCONST {
			var i64 uint64
			i64 = math.Float64bits(p.From.U.Dval)
			literal = fmt.Sprintf("$f64.%016x", uint64(i64))
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint64(ctxt, s, i64)
				s.Reachable = 0
			}
			p.From.Typ = D_EXTERN
			p.From.Sym = s
			p.From.Offset = 0
		}
		break
	}
}
Beispiel #9
0
func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var i int
	var a int
loop:
	if p == nil {
		return
	}
	if p.As == AJMP {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			/* mark instruction as done and continue layout at target of jump */
			p.Mark = 1
			p = q
			if p.Mark == 0 {
				goto loop
			}
		}
	}
	if p.Mark != 0 {
		/*
		 * p goes here, but already used it elsewhere.
		 * copy up to 4 instructions or else branch to other copy.
		 */
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {
			if q == nil {
				break
			}
			if q == *last {
				break
			}
			a = q.As
			if a == ANOP {
				i--
				continue
			}
			if nofollow(a) || pushpop(a) != 0 {
				break // NOTE(rsc): arm does goto copy
			}
			if q.Pcond == nil || q.Pcond.Mark != 0 {
				continue
			}
			if a == ACALL || a == ALOOP {
				continue
			}
			for {
				if p.As == ANOP {
					p = p.Link
					continue
				}
				q = liblink.Copyp(ctxt, p)
				p = p.Link
				q.Mark = 1
				(*last).Link = q
				*last = q
				if q.As != a || q.Pcond == nil || q.Pcond.Mark != 0 {
					continue
				}
				q.As = relinv(q.As)
				p = q.Pcond
				q.Pcond = q.Link
				q.Link = p
				xfol(ctxt, q.Link, last)
				p = q.Link
				if p.Mark != 0 {
					return
				}
				goto loop /* */
			}
		}
		q = ctxt.Prg()
		q.As = AJMP
		q.Lineno = p.Lineno
		q.To.Typ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}
	/* emit p */
	p.Mark = 1
	(*last).Link = p
	*last = p
	a = p.As
	/* continue loop with what comes after p */
	if nofollow(a) {
		return
	}
	if p.Pcond != nil && a != ACALL {
		/*
		 * some kind of conditional branch.
		 * recurse to follow one path.
		 * continue loop on the other.
		 */
		q = liblink.Brchain(ctxt, p.Pcond)
		if q != nil {
			p.Pcond = q
		}
		q = liblink.Brchain(ctxt, p.Link)
		if q != nil {
			p.Link = q
		}
		if p.From.Typ == D_CONST {
			if p.From.Offset == 1 {
				/*
				 * expect conditional jump to be taken.
				 * rewrite so that's the fall-through case.
				 */
				p.As = relinv(a)
				q = p.Link
				p.Link = p.Pcond
				p.Pcond = q
			}
		} else {
			q = p.Link
			if q.Mark != 0 {
				if a != ALOOP {
					p.As = relinv(a)
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
		}
		xfol(ctxt, p.Link, last)
		if p.Pcond.Mark != 0 {
			return
		}
		p = p.Pcond
		goto loop
	}
	p = p.Link
	goto loop
}
Beispiel #10
0
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var pl *liblink.Prog
	var q *liblink.Prog
	var q1 *liblink.Prog
	var q2 *liblink.Prog
	var o int
	var autosize int64
	var autoffset int64
	autosize = 0
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}
	q = nil
	ctxt.Cursym = cursym
	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}
	softfloat(ctxt, cursym)
	p = cursym.Text
	autoffset = p.To.Offset
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Locals = autoffset
	cursym.Args = p.To.Offset2
	if ctxt.Debugzerostack != 0 {
		if autoffset != 0 && p.Reg&liblink.NOSPLIT == 0 {
			// MOVW $4(R13), R1
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.From.Typ = D_CONST
			p.From.Reg = 13
			p.From.Offset = 4
			p.To.Typ = D_REG
			p.To.Reg = 1
			// MOVW $n(R13), R2
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.From.Typ = D_CONST
			p.From.Reg = 13
			p.From.Offset = 4 + autoffset
			p.To.Typ = D_REG
			p.To.Reg = 2
			// MOVW $0, R3
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.From.Typ = D_CONST
			p.From.Offset = 0
			p.To.Typ = D_REG
			p.To.Reg = 3
			// L:
			//	MOVW.nil R3, 0(R1) +4
			//	CMP R1, R2
			//	BNE L
			pl = liblink.Appendp(ctxt, p)
			p = pl
			p.As = AMOVW
			p.From.Typ = D_REG
			p.From.Reg = 3
			p.To.Typ = D_OREG
			p.To.Reg = 1
			p.To.Offset = 4
			p.Scond |= C_PBIT
			p = liblink.Appendp(ctxt, p)
			p.As = ACMP
			p.From.Typ = D_REG
			p.From.Reg = 1
			p.Reg = 2
			p = liblink.Appendp(ctxt, p)
			p.As = ABNE
			p.To.Typ = D_BRANCH
			p.Pcond = pl
		}
	}
	/*
	 * find leaf subroutines
	 * strip NOPs
	 * expand RET
	 * expand BECOME pseudo
	 */
	for p = cursym.Text; p != nil; p = p.Link {
		switch p.As {
		case ACASE:
			if ctxt.Flag_shared != 0 {
				linkcase(p)
			}
		case ATEXT:
			p.Mark |= LEAF
		case ARET:
			break
		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			q = p
			if ctxt.Sym_div == nil {
				initdiv(ctxt)
			}
			cursym.Text.Mark &^= LEAF
			continue
		case ANOP:
			q1 = p.Link
			q.Link = q1 /* q is non-nop */
			if q1 != nil {
				q1.Mark |= p.Mark
			}
			continue
		case ABL,
			ABX,
			ADUFFZERO,
			ADUFFCOPY:
			cursym.Text.Mark &^= LEAF
			fallthrough
		case ABCASE,
			AB,
			ABEQ,
			ABNE,
			ABCS,
			ABHS,
			ABCC,
			ABLO,
			ABMI,
			ABPL,
			ABVS,
			ABVC,
			ABHI,
			ABLS,
			ABGE,
			ABLT,
			ABGT,
			ABLE:
			q1 = p.Pcond
			if q1 != nil {
				for q1.As == ANOP {
					q1 = q1.Link
					p.Pcond = q1
				}
			}
			break
		}
		q = p
	}
	for p = cursym.Text; p != nil; p = p.Link {
		o = p.As
		switch o {
		case ATEXT:
			autosize = p.To.Offset + 4
			if autosize <= 4 {
				if cursym.Text.Mark&LEAF != 0 {
					p.To.Offset = -4
					autosize = 0
				}
			}
			if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
				if ctxt.Debugvlog != 0 {
					fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
					liblink.Bflush(ctxt.Bso)
				}
				cursym.Text.Mark |= LEAF
			}
			if cursym.Text.Mark&LEAF != 0 {
				cursym.Leaf = 1
				if autosize == 0 {
					break
				}
			}
			if p.Reg&liblink.NOSPLIT == 0 {
				p = stacksplit(ctxt, p, autosize, bool2int(cursym.Text.Reg&liblink.NEEDCTXT == 0)) // emit split check
			}
			// MOVW.W		R14,$-autosize(SP)
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.Scond |= C_WBIT
			p.From.Typ = D_REG
			p.From.Reg = REGLINK
			p.To.Typ = D_OREG
			p.To.Offset = -autosize
			p.To.Reg = REGSP
			p.Spadj = autosize
			if cursym.Text.Reg&liblink.WRAPPER != 0 {
				// g->panicwrap += autosize;
				// MOVW panicwrap_offset(g), R3
				// ADD $autosize, R3
				// MOVW R3 panicwrap_offset(g)
				p = liblink.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Typ = D_OREG
				p.From.Reg = REGG
				p.From.Offset = 2 * ctxt.Arch.Ptrsize
				p.To.Typ = D_REG
				p.To.Reg = 3
				p = liblink.Appendp(ctxt, p)
				p.As = AADD
				p.From.Typ = D_CONST
				p.From.Offset = autosize
				p.To.Typ = D_REG
				p.To.Reg = 3
				p = liblink.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Typ = D_REG
				p.From.Reg = 3
				p.To.Typ = D_OREG
				p.To.Reg = REGG
				p.To.Offset = 2 * ctxt.Arch.Ptrsize
			}
		case ARET:
			nocache_obj5(p)
			if cursym.Text.Mark&LEAF != 0 {
				if autosize == 0 {
					p.As = AB
					p.From = zprg_obj5.From
					if p.To.Sym != nil { // retjmp
						p.To.Typ = D_BRANCH
					} else {
						p.To.Typ = D_OREG
						p.To.Offset = 0
						p.To.Reg = REGLINK
					}
					break
				}
			}
			if cursym.Text.Reg&liblink.WRAPPER != 0 {
				var scond int
				// Preserve original RET's cond, to allow RET.EQ
				// in the implementation of reflect.call.
				scond = p.Scond
				p.Scond = C_SCOND_NONE
				// g->panicwrap -= autosize;
				// MOVW panicwrap_offset(g), R3
				// SUB $autosize, R3
				// MOVW R3 panicwrap_offset(g)
				p.As = AMOVW
				p.From.Typ = D_OREG
				p.From.Reg = REGG
				p.From.Offset = 2 * ctxt.Arch.Ptrsize
				p.To.Typ = D_REG
				p.To.Reg = 3
				p = liblink.Appendp(ctxt, p)
				p.As = ASUB
				p.From.Typ = D_CONST
				p.From.Offset = autosize
				p.To.Typ = D_REG
				p.To.Reg = 3
				p = liblink.Appendp(ctxt, p)
				p.As = AMOVW
				p.From.Typ = D_REG
				p.From.Reg = 3
				p.To.Typ = D_OREG
				p.To.Reg = REGG
				p.To.Offset = 2 * ctxt.Arch.Ptrsize
				p = liblink.Appendp(ctxt, p)
				p.Scond = scond
			}
			p.As = AMOVW
			p.Scond |= C_PBIT
			p.From.Typ = D_OREG
			p.From.Offset = autosize
			p.From.Reg = REGSP
			p.To.Typ = D_REG
			p.To.Reg = REGPC
			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so no spadj.
			if p.To.Sym != nil { // retjmp
				p.To.Reg = REGLINK
				q2 = liblink.Appendp(ctxt, p)
				q2.As = AB
				q2.To.Typ = D_BRANCH
				q2.To.Sym = p.To.Sym
				p.To.Sym = nil
				p = q2
			}
		case AADD:
			if p.From.Typ == D_CONST && p.From.Reg == NREG && p.To.Typ == D_REG && p.To.Reg == REGSP {
				p.Spadj = -p.From.Offset
			}
		case ASUB:
			if p.From.Typ == D_CONST && p.From.Reg == NREG && p.To.Typ == D_REG && p.To.Reg == REGSP {
				p.Spadj = p.From.Offset
			}
		case ADIV,
			ADIVU,
			AMOD,
			AMODU:
			if ctxt.Debugdivmod != 0 {
				break
			}
			if p.From.Typ != D_REG {
				break
			}
			if p.To.Typ != D_REG {
				break
			}
			q1 = p
			/* MOV a,4(SP) */
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Typ = D_REG
			p.From.Reg = q1.From.Reg
			p.To.Typ = D_OREG
			p.To.Reg = REGSP
			p.To.Offset = 4
			/* MOV b,REGTMP */
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Typ = D_REG
			p.From.Reg = q1.Reg
			if q1.Reg == NREG {
				p.From.Reg = q1.To.Reg
			}
			p.To.Typ = D_REG
			p.To.Reg = REGTMP
			p.To.Offset = 0
			/* CALL appropriate */
			p = liblink.Appendp(ctxt, p)
			p.As = ABL
			p.Lineno = q1.Lineno
			p.To.Typ = D_BRANCH
			switch o {
			case ADIV:
				p.To.Sym = ctxt.Sym_div
			case ADIVU:
				p.To.Sym = ctxt.Sym_divu
			case AMOD:
				p.To.Sym = ctxt.Sym_mod
			case AMODU:
				p.To.Sym = ctxt.Sym_modu
				break
			}
			/* MOV REGTMP, b */
			p = liblink.Appendp(ctxt, p)
			p.As = AMOVW
			p.Lineno = q1.Lineno
			p.From.Typ = D_REG
			p.From.Reg = REGTMP
			p.From.Offset = 0
			p.To.Typ = D_REG
			p.To.Reg = q1.To.Reg
			/* ADD $8,SP */
			p = liblink.Appendp(ctxt, p)
			p.As = AADD
			p.Lineno = q1.Lineno
			p.From.Typ = D_CONST
			p.From.Reg = NREG
			p.From.Offset = 8
			p.Reg = NREG
			p.To.Typ = D_REG
			p.To.Reg = REGSP
			p.Spadj = -8
			/* Keep saved LR at 0(SP) after SP change. */
			/* MOVW 0(SP), REGTMP; MOVW REGTMP, -8!(SP) */
			/* TODO: Remove SP adjustments; see issue 6699. */
			q1.As = AMOVW
			q1.From.Typ = D_OREG
			q1.From.Reg = REGSP
			q1.From.Offset = 0
			q1.Reg = NREG
			q1.To.Typ = D_REG
			q1.To.Reg = REGTMP
			/* SUB $8,SP */
			q1 = liblink.Appendp(ctxt, q1)
			q1.As = AMOVW
			q1.From.Typ = D_REG
			q1.From.Reg = REGTMP
			q1.Reg = NREG
			q1.To.Typ = D_OREG
			q1.To.Reg = REGSP
			q1.To.Offset = -8
			q1.Scond |= C_WBIT
			q1.Spadj = 8
		case AMOVW:
			if (p.Scond&C_WBIT != 0) && p.To.Typ == D_OREG && p.To.Reg == REGSP {
				p.Spadj = -p.To.Offset
			}
			if (p.Scond&C_PBIT != 0) && p.From.Typ == D_OREG && p.From.Reg == REGSP && p.To.Reg != REGPC {
				p.Spadj = -p.From.Offset
			}
			if p.From.Typ == D_CONST && p.From.Reg == REGSP && p.To.Typ == D_REG && p.To.Reg == REGSP {
				p.Spadj = -p.From.Offset
			}
			break
		}
	}
}
Beispiel #11
0
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int64, noctxt int) *liblink.Prog {
	var arg int
	// MOVW			g_stackguard(g), R1
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.From.Typ = D_OREG
	p.From.Reg = REGG
	p.To.Typ = D_REG
	p.To.Reg = 1
	if framesize <= liblink.StackSmall {
		// small stack: SP < stackguard
		//	CMP	stackguard, SP
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.Reg = REGSP
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize < stackguard-StackSmall
		//	MOVW $-framesize(SP), R2
		//	CMP stackguard, R2
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = -framesize
		p.To.Typ = D_REG
		p.To.Reg = 2
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.Reg = 2
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//	CMP $StackPreempt, R1
		//	MOVW.NE $StackGuard(SP), R2
		//	SUB.NE R1, R2
		//	MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
		//	CMP.NE R3, R2
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_CONST
		p.From.Offset = int64(uint32(liblink.StackPreempt & 0xFFFFFFFF))
		p.Reg = 1
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Reg = REGSP
		p.From.Offset = liblink.StackGuard
		p.To.Typ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = ASUB
		p.From.Typ = D_REG
		p.From.Reg = 1
		p.To.Typ = D_REG
		p.To.Reg = 2
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVW
		p.From.Typ = D_CONST
		p.From.Offset = framesize + (liblink.StackGuard - liblink.StackSmall)
		p.To.Typ = D_REG
		p.To.Reg = 3
		p.Scond = C_SCOND_NE
		p = liblink.Appendp(ctxt, p)
		p.As = ACMP
		p.From.Typ = D_REG
		p.From.Reg = 3
		p.Reg = 2
		p.Scond = C_SCOND_NE
	}
	// MOVW.LS		$framesize, R1
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_CONST
	p.From.Offset = framesize
	p.To.Typ = D_REG
	p.To.Reg = 1
	// MOVW.LS		$args, R2
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_CONST
	arg = ctxt.Cursym.Text.To.Offset2
	if arg == 1 { // special marker for known 0
		arg = 0
	}
	if arg&3 != 0 {
		ctxt.Diag("misaligned argument size in stack split")
	}
	p.From.Offset = int64(arg)
	p.To.Typ = D_REG
	p.To.Reg = 2
	// MOVW.LS	R14, R3
	p = liblink.Appendp(ctxt, p)
	p.As = AMOVW
	p.Scond = C_SCOND_LS
	p.From.Typ = D_REG
	p.From.Reg = REGLINK
	p.To.Typ = D_REG
	p.To.Reg = 3
	// BL.LS		runtime.morestack(SB) // modifies LR, returns with LO still asserted
	p = liblink.Appendp(ctxt, p)
	p.As = ABL
	p.Scond = C_SCOND_LS
	p.To.Typ = D_BRANCH
	p.To.Sym = ctxt.Symmorestack[noctxt]
	// BLS	start
	p = liblink.Appendp(ctxt, p)
	p.As = ABLS
	p.To.Typ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link
	return p
}
Beispiel #12
0
func nocache_obj5(p *liblink.Prog) {
	p.Optab = 0
	p.From.Class = 0
	p.To.Class = 0
}
Beispiel #13
0
func addstacksplit(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var q *liblink.Prog
	var autoffset int64
	var deltasp int64
	var a int
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = liblink.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = liblink.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}
	if ctxt.Headtype == liblink.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = liblink.Linklookup(ctxt, "_privates", 0)
	}
	ctxt.Cursym = cursym
	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}
	p = cursym.Text
	autoffset = p.To.Offset
	if autoffset < 0 {
		autoffset = 0
	}
	cursym.Locals = autoffset
	cursym.Args = p.To.Offset2
	q = nil
	if p.From.Scale&liblink.NOSPLIT == 0 || (p.From.Scale&liblink.WRAPPER != 0) {
		p = liblink.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}
	if cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		p = stacksplit(ctxt, p, autoffset, bool2int(cursym.Text.From.Scale&liblink.NEEDCTXT == 0), &q) // emit split check
	}
	if autoffset != 0 {
		p = liblink.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Typ = D_CONST
		p.From.Offset = autoffset
		p.Spadj = autoffset
	} else {
		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = -ctxt.Arch.Ptrsize
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		p.Spadj = ctxt.Arch.Ptrsize
	}
	if q != nil {
		q.Pcond = p
	}
	deltasp = autoffset
	if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
		// g->panicwrap += autoffset + ctxt->arch->ptrsize;
		p = liblink.Appendp(ctxt, p)
		p.As = AADDL
		p.From.Typ = D_CONST
		p.From.Offset = autoffset + ctxt.Arch.Ptrsize
		p.To.Typ = D_INDIR + D_CX
		p.To.Offset = 2 * ctxt.Arch.Ptrsize
	}
	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From.Scale&liblink.NOSPLIT == 0 {
		// 8l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_SP
		p.To.Typ = D_DI
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = autoffset / 4
		p.To.Typ = D_CX
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_CONST
		p.From.Offset = 0
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = AREP
		p = liblink.Appendp(ctxt, p)
		p.As = ASTOSL
	}
	for ; p != nil; p = p.Link {
		a = p.From.Typ
		if a == D_AUTO {
			p.From.Offset += deltasp
		}
		if a == D_PARAM {
			p.From.Offset += deltasp + 4
		}
		a = p.To.Typ
		if a == D_AUTO {
			p.To.Offset += deltasp
		}
		if a == D_PARAM {
			p.To.Offset += deltasp + 4
		}
		switch p.As {
		default:
			continue
		case APUSHL,
			APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue
		case APUSHW,
			APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue
		case APOPL,
			APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue
		case APOPW,
			APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue
		case ARET:
			break
		}
		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}
		if cursym.Text.From.Scale&liblink.WRAPPER != 0 {
			p = load_g_cx(ctxt, p)
			p = liblink.Appendp(ctxt, p)
			// g->panicwrap -= autoffset + ctxt->arch->ptrsize;
			p.As = ASUBL
			p.From.Typ = D_CONST
			p.From.Offset = autoffset + ctxt.Arch.Ptrsize
			p.To.Typ = D_INDIR + D_CX
			p.To.Offset = 2 * ctxt.Arch.Ptrsize
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
		}
		if autoffset != 0 {
			p.As = AADJSP
			p.From.Typ = D_CONST
			p.From.Offset = -autoffset
			p.Spadj = -autoffset
			p = liblink.Appendp(ctxt, p)
			p.As = ARET
			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}
		if p.To.Sym != nil { // retjmp
			p.As = AJMP
		}
	}
}
Beispiel #14
0
func Pconv(p *liblink.Prog) string {
	var str string
	var sc string
	var fp string
	var a int
	var s int

	a = p.As
	s = p.Scond
	sc = extra[s&C_SCOND]
	if s&C_SBIT != 0 {
		sc += ".S"
	}
	if s&C_PBIT != 0 {
		sc += ".P"
	}
	if s&C_WBIT != 0 {
		sc += ".W"
	}
	if s&C_UBIT != 0 { /* ambiguous with FBIT */
		sc += ".U"
	}
	if a == AMOVM {
		if p.From.Typ == D_CONST {
			str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, RAconv(&p.From), Dconv(p, 0, &p.To))
		} else if p.To.Typ == D_CONST {
			str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), RAconv(&p.To))
		} else {
			str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
		}
	} else if a == ADATA {
		str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
	} else if p.As == ATEXT {
		str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
	} else if p.Reg == NREG {
		str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
	} else if p.From.Typ != D_FREG {
		str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,R%d,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
	} else {
		str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,F%d,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
	}

	fp += str
	return fp
}
Beispiel #15
0
func xfol(ctxt *liblink.Link, p *liblink.Prog, last **liblink.Prog) {
	var q *liblink.Prog
	var r *liblink.Prog
	var a int
	var i int
loop:
	if p == nil {
		return
	}
	a = p.As
	if a == AB {
		q = p.Pcond
		if q != nil && q.As != ATEXT {
			p.Mark |= FOLL
			p = q
			if p.Mark&FOLL == 0 {
				goto loop
			}
		}
	}
	if p.Mark&FOLL != 0 {
		i = 0
		q = p
		for ; i < 4; (func() { i++; q = q.Link })() {
			if q == *last || q == nil {
				break
			}
			a = q.As
			if a == ANOP {
				i--
				continue
			}
			if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
				goto copy
			}
			if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
				continue
			}
			if a != ABEQ && a != ABNE {
				continue
			}
		copy:
			for {
				r = ctxt.Prg()
				*r = *p
				if r.Mark&FOLL == 0 {
					fmt.Printf("can't happen 1\n")
				}
				r.Mark |= FOLL
				if p != q {
					p = p.Link
					(*last).Link = r
					*last = r
					continue
				}
				(*last).Link = r
				*last = r
				if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
					return
				}
				r.As = ABNE
				if a == ABNE {
					r.As = ABEQ
				}
				r.Pcond = p.Link
				r.Link = p.Pcond
				if r.Link.Mark&FOLL == 0 {
					xfol(ctxt, r.Link, last)
				}
				if r.Pcond.Mark&FOLL == 0 {
					fmt.Printf("can't happen 2\n")
				}
				return
			}
		}
		a = AB
		q = ctxt.Prg()
		q.As = a
		q.Lineno = p.Lineno
		q.To.Typ = D_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}
	p.Mark |= FOLL
	(*last).Link = p
	*last = p
	if a == AB || (a == ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
		return
	}
	if p.Pcond != nil {
		if a != ABL && a != ABX && p.Link != nil {
			q = liblink.Brchain(ctxt, p.Link)
			if a != ATEXT && a != ABCASE {
				if q != nil && (q.Mark&FOLL != 0) {
					p.As = relinv(a)
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
			xfol(ctxt, p.Link, last)
			q = liblink.Brchain(ctxt, p.Pcond)
			if q == nil {
				q = p.Pcond
			}
			if q.Mark&FOLL != 0 {
				p.Pcond = q
				return
			}
			p = q
			goto loop
		}
	}
	p = p.Link
	goto loop
}
Beispiel #16
0
func progedit(ctxt *liblink.Link, p *liblink.Prog) {
	var literal string
	var s *liblink.LSym
	var tlsfallback *liblink.LSym
	p.From.Class = 0
	p.To.Class = 0
	// Rewrite B/BL to symbol as D_BRANCH.
	switch p.As {
	case AB,
		ABL,
		ADUFFZERO,
		ADUFFCOPY:
		if p.To.Typ == D_OREG && (p.To.Name == D_EXTERN || p.To.Name == D_STATIC) && p.To.Sym != nil {
			p.To.Typ = D_BRANCH
		}
		break
	}
	// Replace TLS register fetches on older ARM procesors.
	switch p.As {
	// If the instruction matches MRC 15, 0, <reg>, C13, C0, 3, replace it.
	case AMRC:
		if ctxt.Goarm < 7 && p.To.Offset&0xffff0fff == 0xee1d0f70 {
			tlsfallback = liblink.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
			// BL runtime.read_tls_fallback(SB)
			p.As = ABL
			p.To.Typ = D_BRANCH
			p.To.Sym = tlsfallback
			p.To.Offset = 0
		} else {
			// Otherwise, MRC/MCR instructions need no further treatment.
			p.As = AWORD
		}
		break
	}
	// Rewrite float constants to values stored in memory.
	switch p.As {
	case AMOVF:
		if p.From.Typ == D_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
			var i32 uint32
			var f32 float32
			f32 = float32(p.From.U.Dval)
			i32 = math.Float32bits(f32)
			literal = fmt.Sprintf("$f32.%08x", i32)
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint32(ctxt, s, i32)
				s.Reachable = 0
			}
			p.From.Typ = D_OREG
			p.From.Sym = s
			p.From.Name = D_EXTERN
			p.From.Offset = 0
		}
	case AMOVD:
		if p.From.Typ == D_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
			var i64 uint64
			i64 = math.Float64bits(p.From.U.Dval)
			literal = fmt.Sprintf("$f64.%016x", uint64(i64))
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint64(ctxt, s, i64)
				s.Reachable = 0
			}
			p.From.Typ = D_OREG
			p.From.Sym = s
			p.From.Name = D_EXTERN
			p.From.Offset = 0
		}
		break
	}
	if ctxt.Flag_shared != 0 {
		// Shared libraries use R_ARM_TLS_IE32 instead of
		// R_ARM_TLS_LE32, replacing the link time constant TLS offset in
		// runtime.tlsg with an address to a GOT entry containing the
		// offset. Rewrite $runtime.tlsg(SB) to runtime.tlsg(SB) to
		// compensate.
		if ctxt.Tlsg == nil {
			ctxt.Tlsg = liblink.Linklookup(ctxt, "runtime.tlsg", 0)
		}
		if p.From.Typ == D_CONST && p.From.Name == D_EXTERN && p.From.Sym == ctxt.Tlsg {
			p.From.Typ = D_OREG
		}
		if p.To.Typ == D_CONST && p.To.Name == D_EXTERN && p.To.Sym == ctxt.Tlsg {
			p.To.Typ = D_OREG
		}
	}
}
Beispiel #17
0
func settextflag(p *liblink.Prog, f int) {
	p.Reg = f
}
Beispiel #18
0
func progedit(ctxt *liblink.Link, p *liblink.Prog) {
	var literal string
	var s *liblink.LSym
	var q *liblink.Prog
	// Thread-local storage references use the TLS pseudo-register.
	// As a register, TLS refers to the thread-local storage base, and it
	// can only be loaded into another register:
	//
	//         MOVQ TLS, AX
	//
	// An offset from the thread-local storage base is written off(reg)(TLS*1).
	// Semantically it is off(reg), but the (TLS*1) annotation marks this as
	// indexing from the loaded TLS base. This emits a relocation so that
	// if the linker needs to adjust the offset, it can. For example:
	//
	//         MOVQ TLS, AX
	//         MOVQ 8(AX)(TLS*1), CX // load m into CX
	//
	// On systems that support direct access to the TLS memory, this
	// pair of instructions can be reduced to a direct TLS memory reference:
	//
	//         MOVQ 8(TLS), CX // load m into CX
	//
	// The 2-instruction and 1-instruction forms correspond roughly to
	// ELF TLS initial exec mode and ELF TLS local exec mode, respectively.
	//
	// We applies this rewrite on systems that support the 1-instruction form.
	// The decision is made using only the operating system (and probably
	// the -shared flag, eventually), not the link mode. If some link modes
	// on a particular operating system require the 2-instruction form,
	// then all builds for that operating system will use the 2-instruction
	// form, so that the link mode decision can be delayed to link time.
	//
	// In this way, all supported systems use identical instructions to
	// access TLS, and they are rewritten appropriately first here in
	// liblink and then finally using relocations in the linker.
	if canuselocaltls(ctxt) {
		// Reduce TLS initial exec model to TLS local exec model.
		// Sequences like
		//	MOVQ TLS, BX
		//	... off(BX)(TLS*1) ...
		// become
		//	NOP
		//	... off(TLS) ...
		//
		// TODO(rsc): Remove the Hsolaris special case. It exists only to
		// guarantee we are producing byte-identical binaries as before this code.
		// But it should be unnecessary.
		if (p.As == AMOVQ || p.As == AMOVL) && p.From.Typ == D_TLS && D_AX <= p.To.Typ && p.To.Typ <= D_R15 && ctxt.Headtype != liblink.Hsolaris {
			nopout(p)
		}
		if p.From.Index == D_TLS && D_INDIR+D_AX <= p.From.Typ && p.From.Typ <= D_INDIR+D_R15 {
			p.From.Typ = D_INDIR + D_TLS
			p.From.Scale = 0
			p.From.Index = D_NONE
		}
		if p.To.Index == D_TLS && D_INDIR+D_AX <= p.To.Typ && p.To.Typ <= D_INDIR+D_R15 {
			p.To.Typ = D_INDIR + D_TLS
			p.To.Scale = 0
			p.To.Index = D_NONE
		}
	} else {
		// As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
		// The instruction
		//	MOVQ off(TLS), BX
		// becomes the sequence
		//	MOVQ TLS, BX
		//	MOVQ off(BX)(TLS*1), BX
		// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
		if (p.As == AMOVQ || p.As == AMOVL) && p.From.Typ == D_INDIR+D_TLS && D_AX <= p.To.Typ && p.To.Typ <= D_R15 {
			q = liblink.Appendp(ctxt, p)
			q.As = p.As
			q.From = p.From
			q.From.Typ = D_INDIR + p.To.Typ
			q.From.Index = D_TLS
			q.From.Scale = 2 // TODO: use 1
			q.To = p.To
			p.From.Typ = D_TLS
			p.From.Index = D_NONE
			p.From.Offset = 0
		}
	}
	// TODO: Remove.
	if ctxt.Headtype == liblink.Hwindows || ctxt.Headtype == liblink.Hplan9 {
		if p.From.Scale == 1 && p.From.Index == D_TLS {
			p.From.Scale = 2
		}
		if p.To.Scale == 1 && p.To.Index == D_TLS {
			p.To.Scale = 2
		}
	}
	if ctxt.Headtype == liblink.Hnacl {
		nacladdr(ctxt, p, &p.From)
		nacladdr(ctxt, p, &p.To)
	}
	// Maintain information about code generation mode.
	if ctxt.Mode == 0 {
		ctxt.Mode = 64
	}
	p.Mode = ctxt.Mode
	switch p.As {
	case AMODE:
		if p.From.Typ == D_CONST || p.From.Typ == D_INDIR+D_NONE {
			switch int(p.From.Offset) {
			case 16,
				32,
				64:
				ctxt.Mode = int(p.From.Offset)
				break
			}
		}
		nopout(p)
		break
	}
	// Rewrite CALL/JMP/RET to symbol as D_BRANCH.
	switch p.As {
	case ACALL,
		AJMP,
		ARET:
		if (p.To.Typ == D_EXTERN || p.To.Typ == D_STATIC) && p.To.Sym != nil {
			p.To.Typ = D_BRANCH
		}
		break
	}
	// Rewrite float constants to values stored in memory.
	switch p.As {
	case AFMOVF,
		AFADDF,
		AFSUBF,
		AFSUBRF,
		AFMULF,
		AFDIVF,
		AFDIVRF,
		AFCOMF,
		AFCOMFP,
		AMOVSS,
		AADDSS,
		ASUBSS,
		AMULSS,
		ADIVSS,
		ACOMISS,
		AUCOMISS:
		if p.From.Typ == D_FCONST {
			var i32 uint32
			var f32 float32
			f32 = float32(p.From.U.Dval)
			i32 = math.Float32bits(f32)
			literal = fmt.Sprintf("$f32.%08x", uint32(i32))
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint32(ctxt, s, i32)
				s.Reachable = 0
			}
			p.From.Typ = D_EXTERN
			p.From.Sym = s
			p.From.Offset = 0
		}
	case AFMOVD,
		AFADDD,
		AFSUBD,
		AFSUBRD,
		AFMULD,
		AFDIVD,
		AFDIVRD,
		AFCOMD,
		AFCOMDP,
		AMOVSD,
		AADDSD,
		ASUBSD,
		AMULSD,
		ADIVSD,
		ACOMISD,
		AUCOMISD:
		if p.From.Typ == D_FCONST {
			var i64 uint64
			i64 = math.Float64bits(p.From.U.Dval)
			literal = fmt.Sprintf("$f64.%016x", uint64(i64))
			s = liblink.Linklookup(ctxt, literal, 0)
			if s.Typ == 0 {
				s.Typ = liblink.SRODATA
				liblink.Adduint64(ctxt, s, i64)
				s.Reachable = 0
			}
			p.From.Typ = D_EXTERN
			p.From.Sym = s
			p.From.Offset = 0
		}
		break
	}
}
Beispiel #19
0
// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *liblink.Link, p *liblink.Prog, framesize int64, noctxt int, jmpok **liblink.Prog) *liblink.Prog {
	var q *liblink.Prog
	var q1 *liblink.Prog
	var arg int
	if ctxt.Debugstack != 0 {
		// 8l -K means check not only for stack
		// overflow but stack underflow.
		// On underflow, INT 3 (breakpoint).
		// Underflow itself is rare but this also
		// catches out-of-sync stack guard info.
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_INDIR + D_CX
		p.From.Offset = 4
		p.To.Typ = D_SP
		p = liblink.Appendp(ctxt, p)
		p.As = AJCC
		p.To.Typ = D_BRANCH
		p.To.Offset = 4
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = AINT
		p.From.Typ = D_CONST
		p.From.Offset = 3
		p = liblink.Appendp(ctxt, p)
		p.As = ANOP
		q1.Pcond = p
	}
	q1 = nil
	if framesize <= liblink.StackSmall {
		// small stack: SP <= stackguard
		//	CMPL SP, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_SP
		p.To.Typ = D_INDIR + D_CX
	} else if framesize <= liblink.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAL -(framesize-StackSmall)(SP), AX
		//	CMPL AX, stackguard
		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = -(framesize - liblink.StackSmall)
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_AX
		p.To.Typ = D_INDIR + D_CX
	} else {
		// Such a large stack we need to protect against wraparound
		// if SP is close to zero.
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVL	stackguard, CX
		//	CMPL	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAL	StackGuard(SP), AX
		//	SUBL	stackguard, AX
		//	CMPL	AX, $(framesize+(StackGuard-StackSmall))
		p = liblink.Appendp(ctxt, p)
		p.As = AMOVL
		p.From.Typ = D_INDIR + D_CX
		p.From.Offset = 0
		p.To.Typ = D_SI
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_SI
		p.To.Typ = D_CONST
		p.To.Offset = int64(uint32(liblink.StackPreempt & 0xFFFFFFFF))
		p = liblink.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Typ = D_BRANCH
		q1 = p
		p = liblink.Appendp(ctxt, p)
		p.As = ALEAL
		p.From.Typ = D_INDIR + D_SP
		p.From.Offset = liblink.StackGuard
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ASUBL
		p.From.Typ = D_SI
		p.From.Offset = 0
		p.To.Typ = D_AX
		p = liblink.Appendp(ctxt, p)
		p.As = ACMPL
		p.From.Typ = D_AX
		p.To.Typ = D_CONST
		p.To.Offset = framesize + (liblink.StackGuard - liblink.StackSmall)
	}
	// common
	p = liblink.Appendp(ctxt, p)
	p.As = AJHI
	p.To.Typ = D_BRANCH
	p.To.Offset = 4
	q = p
	p = liblink.Appendp(ctxt, p) // save frame size in DI
	p.As = AMOVL
	p.To.Typ = D_DI
	p.From.Typ = D_CONST
	// If we ask for more stack, we'll get a minimum of StackMin bytes.
	// We need a stack frame large enough to hold the top-of-stack data,
	// the function arguments+results, our caller's PC, our frame,
	// a word for the return PC of the next call, and then the StackLimit bytes
	// that must be available on entry to any function called from a function
	// that did a stack check.  If StackMin is enough, don't ask for a specific
	// amount: then we can use the custom functions and save a few
	// instructions.
	if int64(liblink.StackTop+ctxt.Cursym.Text.To.Offset2)+ctxt.Arch.Ptrsize+framesize+ctxt.Arch.Ptrsize+liblink.StackLimit >= liblink.StackMin {
		p.From.Offset = (framesize + 7) &^ 7
	}
	arg = ctxt.Cursym.Text.To.Offset2
	if arg == 1 { // special marker for known 0
		arg = 0
	}
	if arg&3 != 0 {
		ctxt.Diag("misaligned argument size in stack split")
	}
	p = liblink.Appendp(ctxt, p) // save arg size in AX
	p.As = AMOVL
	p.To.Typ = D_AX
	p.From.Typ = D_CONST
	p.From.Offset = int64(arg)
	p = liblink.Appendp(ctxt, p)
	p.As = ACALL
	p.To.Typ = D_BRANCH
	p.To.Sym = ctxt.Symmorestack[noctxt]
	p = liblink.Appendp(ctxt, p)
	p.As = AJMP
	p.To.Typ = D_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link
	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}
	*jmpok = q
	return p
}
Beispiel #20
0
func softfloat(ctxt *liblink.Link, cursym *liblink.LSym) {
	var p *liblink.Prog
	var next *liblink.Prog
	var symsfloat *liblink.LSym
	var wasfloat int
	if ctxt.Goarm > 5 {
		return
	}
	symsfloat = liblink.Linklookup(ctxt, "_sfloat", 0)
	wasfloat = 0
	for p = cursym.Text; p != nil; p = p.Link {
		if p.Pcond != nil {
			p.Pcond.Mark |= LABEL
		}
	}
	for p = cursym.Text; p != nil; p = p.Link {
		switch p.As {
		case AMOVW:
			if p.To.Typ == D_FREG || p.From.Typ == D_FREG {
				goto soft
			}
			goto notsoft
		case AMOVWD,
			AMOVWF,
			AMOVDW,
			AMOVFW,
			AMOVFD,
			AMOVDF,
			AMOVF,
			AMOVD,
			ACMPF,
			ACMPD,
			AADDF,
			AADDD,
			ASUBF,
			ASUBD,
			AMULF,
			AMULD,
			ADIVF,
			ADIVD,
			ASQRTF,
			ASQRTD,
			AABSF,
			AABSD:
			goto soft
		default:
			goto notsoft
		}
	soft:
		if wasfloat == 0 || (p.Mark&LABEL != 0) {
			next = ctxt.Prg()
			*next = *p
			// BL _sfloat(SB)
			*p = zprg_obj5
			p.Ctxt = ctxt
			p.Link = next
			p.As = ABL
			p.To.Typ = D_BRANCH
			p.To.Sym = symsfloat
			p.Lineno = next.Lineno
			p = next
			wasfloat = 1
		}
		continue
	notsoft:
		wasfloat = 0
	}
}