示例#1
0
文件: const.go 项目: rsc/tmp
/*
 * convert constant val to type t; leave in con.
 * for back end.
 */
func Convconst(con *Node, t *Type, val *Val) {
	tt := Simsimtype(t)

	// copy the constant for conversion
	Nodconst(con, Types[TINT8], 0)

	con.Type = t
	con.Val = *val

	if Isint[tt] {
		con.Val.Ctype = CTINT
		con.Val.U.Xval = new(Mpint)
		var i int64
		switch val.Ctype {
		default:
			Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))

		case CTINT, CTRUNE:
			i = Mpgetfix(val.U.Xval)

		case CTBOOL:
			i = int64(obj.Bool2int(val.U.Bval))

		case CTNIL:
			i = 0
		}

		i = iconv(i, tt)
		Mpmovecfix(con.Val.U.Xval, i)
		return
	}

	if Isfloat[tt] {
		con.Val = toflt(con.Val)
		if con.Val.Ctype != CTFLT {
			Fatal("convconst ctype=%d %v", con.Val.Ctype, t)
		}
		if tt == TFLOAT32 {
			con.Val.U.Fval = truncfltlit(con.Val.U.Fval, t)
		}
		return
	}

	if Iscomplex[tt] {
		con.Val = tocplx(con.Val)
		if tt == TCOMPLEX64 {
			con.Val.U.Cval.Real = *truncfltlit(&con.Val.U.Cval.Real, Types[TFLOAT32])
			con.Val.U.Cval.Imag = *truncfltlit(&con.Val.U.Cval.Imag, Types[TFLOAT32])
		}

		return
	}

	Fatal("convconst %v constant", Tconv(t, obj.FmtLong))
}
示例#2
0
文件: gsubr.go 项目: rsc/tmp
func Gbranch(as int, t *Type, likely int) *obj.Prog {
	p := Prog(as)
	p.To.Type = obj.TYPE_BRANCH
	p.To.Val = nil
	if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(obj.Bool2int(likely > 0))
	}

	return p
}
示例#3
0
文件: pgen.go 项目: rsc/tmp
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvar(a *Node, b *Node) int {
	if a.Class != b.Class {
		if a.Class == PAUTO {
			return +1
		}
		return -1
	}

	if a.Class != PAUTO {
		if a.Xoffset < b.Xoffset {
			return -1
		}
		if a.Xoffset > b.Xoffset {
			return +1
		}
		return 0
	}

	if a.Used != b.Used {
		return obj.Bool2int(b.Used) - obj.Bool2int(a.Used)
	}

	ap := obj.Bool2int(haspointers(a.Type))
	bp := obj.Bool2int(haspointers(b.Type))
	if ap != bp {
		return bp - ap
	}

	ap = obj.Bool2int(a.Needzero)
	bp = obj.Bool2int(b.Needzero)
	if ap != bp {
		return bp - ap
	}

	if a.Type.Width < b.Type.Width {
		return +1
	}
	if a.Type.Width > b.Type.Width {
		return -1
	}

	return stringsCompare(a.Sym.Name, b.Sym.Name)
}
示例#4
0
文件: obj6.go 项目: rsc/tmp
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
	if ctxt.Tlsg == nil {
		ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
	}
	if ctxt.Symmorestack[0] == nil {
		ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
		ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
	}

	if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
		ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
	}

	ctxt.Cursym = cursym

	if cursym.Text == nil || cursym.Text.Link == nil {
		return
	}

	p := cursym.Text
	autoffset := int32(p.To.Offset)
	if autoffset < 0 {
		autoffset = 0
	}

	var bpsize int
	if p.Mode == 64 && obj.Framepointer_enabled != 0 && autoffset > 0 {
		// Make room for to save a base pointer.  If autoffset == 0,
		// this might do something special like a tail jump to
		// another function, so in that case we omit this.
		bpsize = ctxt.Arch.Ptrsize

		autoffset += int32(bpsize)
		p.To.Offset += int64(bpsize)
	} else {
		bpsize = 0
	}

	textarg := int64(p.To.Val.(int32))
	cursym.Args = int32(textarg)
	cursym.Locals = int32(p.To.Offset)

	// TODO(rsc): Remove.
	if p.Mode == 32 && cursym.Locals < 0 {
		cursym.Locals = 0
	}

	// TODO(rsc): Remove 'p.Mode == 64 &&'.
	if p.Mode == 64 && autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
		for q := p; q != nil; q = q.Link {
			if q.As == obj.ACALL {
				goto noleaf
			}
			if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 {
				goto noleaf
			}
		}

		p.From3.Offset |= obj.NOSPLIT
	noleaf:
	}

	if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
		p = obj.Appendp(ctxt, p)
		p = load_g_cx(ctxt, p) // load g into CX
	}

	var q *obj.Prog
	if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
		p = stacksplit(ctxt, p, autoffset, int32(textarg), cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
	}

	if autoffset != 0 {
		if autoffset%int32(ctxt.Arch.Regsize) != 0 {
			ctxt.Diag("unaligned stack size %d", autoffset)
		}
		p = obj.Appendp(ctxt, p)
		p.As = AADJSP
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(autoffset)
		p.Spadj = autoffset
	} else {
		// zero-byte stack adjustment.
		// Insert a fake non-zero adjustment so that stkcheck can
		// recognize the end of the stack-splitting prolog.
		p = obj.Appendp(ctxt, p)

		p.As = obj.ANOP
		p.Spadj = int32(-ctxt.Arch.Ptrsize)
		p = obj.Appendp(ctxt, p)
		p.As = obj.ANOP
		p.Spadj = int32(ctxt.Arch.Ptrsize)
	}

	if q != nil {
		q.Pcond = p
	}
	deltasp := autoffset

	if bpsize > 0 {
		// Save caller's BP
		p = obj.Appendp(ctxt, p)

		p.As = AMOVQ
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_BP
		p.To.Type = obj.TYPE_MEM
		p.To.Reg = REG_SP
		p.To.Scale = 1
		p.To.Offset = int64(autoffset) - int64(bpsize)

		// Move current frame to BP
		p = obj.Appendp(ctxt, p)

		p.As = ALEAQ
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_SP
		p.From.Scale = 1
		p.From.Offset = int64(autoffset) - int64(bpsize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_BP
	}

	if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
		// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
		//
		//	MOVQ g_panic(CX), BX
		//	TESTQ BX, BX
		//	JEQ end
		//	LEAQ (autoffset+8)(SP), DI
		//	CMPQ panic_argp(BX), DI
		//	JNE end
		//	MOVQ SP, panic_argp(BX)
		// end:
		//	NOP
		//
		// The NOP is needed to give the jumps somewhere to land.
		// It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.

		p = obj.Appendp(ctxt, p)

		p.As = AMOVQ
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_CX
		p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_BX
		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
			p.As = AMOVL
			p.From.Type = obj.TYPE_MEM
			p.From.Reg = REG_R15
			p.From.Scale = 1
			p.From.Index = REG_CX
		}
		if p.Mode == 32 {
			p.As = AMOVL
		}

		p = obj.Appendp(ctxt, p)
		p.As = ATESTQ
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_BX
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_BX
		if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
			p.As = ATESTL
		}

		p = obj.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type = obj.TYPE_BRANCH
		p1 := p

		p = obj.Appendp(ctxt, p)
		p.As = ALEAQ
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_SP
		p.From.Offset = int64(autoffset) + int64(ctxt.Arch.Regsize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_DI
		if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
			p.As = ALEAL
		}

		p = obj.Appendp(ctxt, p)
		p.As = ACMPQ
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_BX
		p.From.Offset = 0 // Panic.argp
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_DI
		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
			p.As = ACMPL
			p.From.Type = obj.TYPE_MEM
			p.From.Reg = REG_R15
			p.From.Scale = 1
			p.From.Index = REG_BX
		}
		if p.Mode == 32 {
			p.As = ACMPL
		}

		p = obj.Appendp(ctxt, p)
		p.As = AJNE
		p.To.Type = obj.TYPE_BRANCH
		p2 := p

		p = obj.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_SP
		p.To.Type = obj.TYPE_MEM
		p.To.Reg = REG_BX
		p.To.Offset = 0 // Panic.argp
		if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
			p.As = AMOVL
			p.To.Type = obj.TYPE_MEM
			p.To.Reg = REG_R15
			p.To.Scale = 1
			p.To.Index = REG_BX
		}
		if p.Mode == 32 {
			p.As = AMOVL
		}

		p = obj.Appendp(ctxt, p)
		p.As = obj.ANOP
		p1.Pcond = p
		p2.Pcond = p
	}

	if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
		// 6l -Z means zero the stack frame on entry.
		// This slows down function calls but can help avoid
		// false positives in garbage collection.
		p = obj.Appendp(ctxt, p)

		p.As = AMOVQ
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_SP
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_DI
		if p.Mode == 32 {
			p.As = AMOVL
		}

		p = obj.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = int64(autoffset) / int64(ctxt.Arch.Regsize)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_CX
		if p.Mode == 32 {
			p.As = AMOVL
		}

		p = obj.Appendp(ctxt, p)
		p.As = AMOVQ
		p.From.Type = obj.TYPE_CONST
		p.From.Offset = 0
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_AX
		if p.Mode == 32 {
			p.As = AMOVL
		}

		p = obj.Appendp(ctxt, p)
		p.As = AREP

		p = obj.Appendp(ctxt, p)
		p.As = ASTOSQ
		if p.Mode == 32 {
			p.As = ASTOSL
		}
	}

	var a int
	var pcsize int
	for ; p != nil; p = p.Link {
		pcsize = int(p.Mode) / 8
		a = int(p.From.Name)
		if a == obj.NAME_AUTO {
			p.From.Offset += int64(deltasp) - int64(bpsize)
		}
		if a == obj.NAME_PARAM {
			p.From.Offset += int64(deltasp) + int64(pcsize)
		}
		a = int(p.From3.Name)
		if a == obj.NAME_AUTO {
			p.From3.Offset += int64(deltasp) - int64(bpsize)
		}
		if a == obj.NAME_PARAM {
			p.From3.Offset += int64(deltasp) + int64(pcsize)
		}
		a = int(p.To.Name)
		if a == obj.NAME_AUTO {
			p.To.Offset += int64(deltasp) - int64(bpsize)
		}
		if a == obj.NAME_PARAM {
			p.To.Offset += int64(deltasp) + int64(pcsize)
		}

		switch p.As {
		default:
			continue

		case APUSHL, APUSHFL:
			deltasp += 4
			p.Spadj = 4
			continue

		case APUSHQ, APUSHFQ:
			deltasp += 8
			p.Spadj = 8
			continue

		case APUSHW, APUSHFW:
			deltasp += 2
			p.Spadj = 2
			continue

		case APOPL, APOPFL:
			deltasp -= 4
			p.Spadj = -4
			continue

		case APOPQ, APOPFQ:
			deltasp -= 8
			p.Spadj = -8
			continue

		case APOPW, APOPFW:
			deltasp -= 2
			p.Spadj = -2
			continue

		case obj.ARET:
			break
		}

		if autoffset != deltasp {
			ctxt.Diag("unbalanced PUSH/POP")
		}

		if autoffset != 0 {
			if bpsize > 0 {
				// Restore caller's BP
				p.As = AMOVQ

				p.From.Type = obj.TYPE_MEM
				p.From.Reg = REG_SP
				p.From.Scale = 1
				p.From.Offset = int64(autoffset) - int64(bpsize)
				p.To.Type = obj.TYPE_REG
				p.To.Reg = REG_BP
				p = obj.Appendp(ctxt, p)
			}

			p.As = AADJSP
			p.From.Type = obj.TYPE_CONST
			p.From.Offset = int64(-autoffset)
			p.Spadj = -autoffset
			p = obj.Appendp(ctxt, p)
			p.As = obj.ARET

			// If there are instructions following
			// this ARET, they come from a branch
			// with the same stackframe, so undo
			// the cleanup.
			p.Spadj = +autoffset
		}

		if p.To.Sym != nil { // retjmp
			p.As = obj.AJMP
		}
	}
}

func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
	if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
		a.Type = obj.TYPE_MEM
		a.Reg = REG_R15
		a.Index = REG_CX
		a.Scale = 1
		return
	}

	a.Type = obj.TYPE_MEM
	a.Reg = REG_CX
}

// Append code to p to load g into cx.
// Overwrites p with the first instruction (no first appendp).
// Overwriting p is unusual but it lets use this in both the
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
	p.As = AMOVQ
	if ctxt.Arch.Ptrsize == 4 {
		p.As = AMOVL
	}
	p.From.Type = obj.TYPE_MEM
	p.From.Reg = REG_TLS
	p.From.Offset = 0
	p.To.Type = obj.TYPE_REG
	p.To.Reg = REG_CX

	next := p.Link
	progedit(ctxt, p)
	for p.Link != next {
		p = p.Link
	}

	if p.From.Index == REG_TLS {
		p.From.Scale = 2
	}

	return p
}

// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
	cmp := ACMPQ
	lea := ALEAQ
	mov := AMOVQ
	sub := ASUBQ

	if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
		cmp = ACMPL
		lea = ALEAL
		mov = AMOVL
		sub = ASUBL
	}

	var q1 *obj.Prog
	if framesize <= obj.StackSmall {
		// small stack: SP <= stackguard
		//	CMPQ SP, stackguard
		p = obj.Appendp(ctxt, p)

		p.As = int16(cmp)
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_SP
		indir_cx(ctxt, p, &p.To)
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else if framesize <= obj.StackBig {
		// large stack: SP-framesize <= stackguard-StackSmall
		//	LEAQ -xxx(SP), AX
		//	CMPQ AX, stackguard
		p = obj.Appendp(ctxt, p)

		p.As = int16(lea)
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_SP
		p.From.Offset = -(int64(framesize) - obj.StackSmall)
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_AX

		p = obj.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_AX
		indir_cx(ctxt, p, &p.To)
		p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
	} else {
		// Such a large stack we need to protect against wraparound.
		// If SP is close to zero:
		//	SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
		// The +StackGuard on both sides is required to keep the left side positive:
		// SP is allowed to be slightly below stackguard. See stack.h.
		//
		// Preemption sets stackguard to StackPreempt, a very large value.
		// That breaks the math above, so we have to check for that explicitly.
		//	MOVQ	stackguard, CX
		//	CMPQ	CX, $StackPreempt
		//	JEQ	label-of-call-to-morestack
		//	LEAQ	StackGuard(SP), AX
		//	SUBQ	CX, AX
		//	CMPQ	AX, $(framesize+(StackGuard-StackSmall))

		p = obj.Appendp(ctxt, p)

		p.As = int16(mov)
		indir_cx(ctxt, p, &p.From)
		p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
		if ctxt.Cursym.Cfunc != 0 {
			p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
		}
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_SI

		p = obj.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_SI
		p.To.Type = obj.TYPE_CONST
		p.To.Offset = obj.StackPreempt
		if p.Mode == 32 {
			p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
		}

		p = obj.Appendp(ctxt, p)
		p.As = AJEQ
		p.To.Type = obj.TYPE_BRANCH
		q1 = p

		p = obj.Appendp(ctxt, p)
		p.As = int16(lea)
		p.From.Type = obj.TYPE_MEM
		p.From.Reg = REG_SP
		p.From.Offset = obj.StackGuard
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_AX

		p = obj.Appendp(ctxt, p)
		p.As = int16(sub)
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_SI
		p.To.Type = obj.TYPE_REG
		p.To.Reg = REG_AX

		p = obj.Appendp(ctxt, p)
		p.As = int16(cmp)
		p.From.Type = obj.TYPE_REG
		p.From.Reg = REG_AX
		p.To.Type = obj.TYPE_CONST
		p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
	}

	// common
	p = obj.Appendp(ctxt, p)

	p.As = AJHI
	p.To.Type = obj.TYPE_BRANCH
	q := p

	p = obj.Appendp(ctxt, p)
	p.As = obj.ACALL
	p.To.Type = obj.TYPE_BRANCH
	if ctxt.Cursym.Cfunc != 0 {
		p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
	} else {
		p.To.Sym = ctxt.Symmorestack[obj.Bool2int(noctxt)]
	}

	p = obj.Appendp(ctxt, p)
	p.As = obj.AJMP
	p.To.Type = obj.TYPE_BRANCH
	p.Pcond = ctxt.Cursym.Text.Link

	if q != nil {
		q.Pcond = p.Link
	}
	if q1 != nil {
		q1.Pcond = q.Link
	}

	*jmpok = q
	return p
}

func follow(ctxt *obj.Link, s *obj.LSym) {
	ctxt.Cursym = s

	firstp := ctxt.NewProg()
	lastp := firstp
	xfol(ctxt, s.Text, &lastp)
	lastp.Link = nil
	s.Text = firstp.Link
}

func nofollow(a int) bool {
	switch a {
	case obj.AJMP,
		obj.ARET,
		AIRETL,
		AIRETQ,
		AIRETW,
		ARETFL,
		ARETFQ,
		ARETFW,
		obj.AUNDEF:
		return true
	}

	return false
}

func pushpop(a int) bool {
	switch a {
	case APUSHL,
		APUSHFL,
		APUSHQ,
		APUSHFQ,
		APUSHW,
		APUSHFW,
		APOPL,
		APOPFL,
		APOPQ,
		APOPFQ,
		APOPW,
		APOPFW:
		return true
	}

	return false
}

func relinv(a int16) int16 {
	switch a {
	case AJEQ:
		return AJNE
	case AJNE:
		return AJEQ
	case AJLE:
		return AJGT
	case AJLS:
		return AJHI
	case AJLT:
		return AJGE
	case AJMI:
		return AJPL
	case AJGE:
		return AJLT
	case AJPL:
		return AJMI
	case AJGT:
		return AJLE
	case AJHI:
		return AJLS
	case AJCS:
		return AJCC
	case AJCC:
		return AJCS
	case AJPS:
		return AJPC
	case AJPC:
		return AJPS
	case AJOS:
		return AJOC
	case AJOC:
		return AJOS
	}

	log.Fatalf("unknown relation: %s", obj.Aconv(int(a)))
	return 0
}

func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
	var q *obj.Prog
	var i int
	var a int

loop:
	if p == nil {
		return
	}
	if p.As == obj.AJMP {
		q = p.Pcond
		if q != nil && q.As != obj.ATEXT {
			/* mark instruction as done and continue layout at target of jump */
			p.Mark = 1

			p = q
			if p.Mark == 0 {
				goto loop
			}
		}
	}

	if p.Mark != 0 {
		/*
		 * p goes here, but already used it elsewhere.
		 * copy up to 4 instructions or else branch to other copy.
		 */
		i = 0
		q = p
		for ; i < 4; i, q = i+1, q.Link {
			if q == nil {
				break
			}
			if q == *last {
				break
			}
			a = int(q.As)
			if a == obj.ANOP {
				i--
				continue
			}

			if nofollow(a) || pushpop(a) {
				break // NOTE(rsc): arm does goto copy
			}
			if q.Pcond == nil || q.Pcond.Mark != 0 {
				continue
			}
			if a == obj.ACALL || a == ALOOP {
				continue
			}
			for {
				if p.As == obj.ANOP {
					p = p.Link
					continue
				}

				q = obj.Copyp(ctxt, p)
				p = p.Link
				q.Mark = 1
				(*last).Link = q
				*last = q
				if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
					continue
				}

				q.As = relinv(q.As)
				p = q.Pcond
				q.Pcond = q.Link
				q.Link = p
				xfol(ctxt, q.Link, last)
				p = q.Link
				if p.Mark != 0 {
					return
				}
				goto loop
				/* */
			}
		}
		q = ctxt.NewProg()
		q.As = obj.AJMP
		q.Lineno = p.Lineno
		q.To.Type = obj.TYPE_BRANCH
		q.To.Offset = p.Pc
		q.Pcond = p
		p = q
	}

	/* emit p */
	p.Mark = 1

	(*last).Link = p
	*last = p
	a = int(p.As)

	/* continue loop with what comes after p */
	if nofollow(a) {
		return
	}
	if p.Pcond != nil && a != obj.ACALL {
		/*
		 * some kind of conditional branch.
		 * recurse to follow one path.
		 * continue loop on the other.
		 */
		q = obj.Brchain(ctxt, p.Pcond)
		if q != nil {
			p.Pcond = q
		}
		q = obj.Brchain(ctxt, p.Link)
		if q != nil {
			p.Link = q
		}
		if p.From.Type == obj.TYPE_CONST {
			if p.From.Offset == 1 {
				/*
				 * expect conditional jump to be taken.
				 * rewrite so that's the fall-through case.
				 */
				p.As = relinv(int16(a))

				q = p.Link
				p.Link = p.Pcond
				p.Pcond = q
			}
		} else {
			q = p.Link
			if q.Mark != 0 {
				if a != ALOOP {
					p.As = relinv(int16(a))
					p.Link = p.Pcond
					p.Pcond = q
				}
			}
		}

		xfol(ctxt, p.Link, last)
		if p.Pcond.Mark != 0 {
			return
		}
		p = p.Pcond
		goto loop
	}

	p = p.Link
	goto loop
}

var unaryDst = map[int]bool{
	ABSWAPL:    true,
	ABSWAPQ:    true,
	ACMPXCHG8B: true,
	ADECB:      true,
	ADECL:      true,
	ADECQ:      true,
	ADECW:      true,
	AINCB:      true,
	AINCL:      true,
	AINCQ:      true,
	AINCW:      true,
	ANEGB:      true,
	ANEGL:      true,
	ANEGQ:      true,
	ANEGW:      true,
	ANOTB:      true,
	ANOTL:      true,
	ANOTQ:      true,
	ANOTW:      true,
	APOPL:      true,
	APOPQ:      true,
	APOPW:      true,
	ASETCC:     true,
	ASETCS:     true,
	ASETEQ:     true,
	ASETGE:     true,
	ASETGT:     true,
	ASETHI:     true,
	ASETLE:     true,
	ASETLS:     true,
	ASETLT:     true,
	ASETMI:     true,
	ASETNE:     true,
	ASETOC:     true,
	ASETOS:     true,
	ASETPC:     true,
	ASETPL:     true,
	ASETPS:     true,
	AFFREE:     true,
	AFLDENV:    true,
	AFSAVE:     true,
	AFSTCW:     true,
	AFSTENV:    true,
	AFSTSW:     true,
	AFXSAVE:    true,
	AFXSAVE64:  true,
	ASTMXCSR:   true,
}

var Linkamd64 = obj.LinkArch{
	ByteOrder:  binary.LittleEndian,
	Name:       "amd64",
	Thechar:    '6',
	Preprocess: preprocess,
	Assemble:   span6,
	Follow:     follow,
	Progedit:   progedit,
	UnaryDst:   unaryDst,
	Minlc:      1,
	Ptrsize:    8,
	Regsize:    8,
}

var Linkamd64p32 = obj.LinkArch{
	ByteOrder:  binary.LittleEndian,
	Name:       "amd64p32",
	Thechar:    '6',
	Preprocess: preprocess,
	Assemble:   span6,
	Follow:     follow,
	Progedit:   progedit,
	UnaryDst:   unaryDst,
	Minlc:      1,
	Ptrsize:    4,
	Regsize:    8,
}

var Link386 = obj.LinkArch{
	ByteOrder:  binary.LittleEndian,
	Name:       "386",
	Thechar:    '8',
	Preprocess: preprocess,
	Assemble:   span6,
	Follow:     follow,
	Progedit:   progedit,
	UnaryDst:   unaryDst,
	Minlc:      1,
	Ptrsize:    4,
	Regsize:    4,
}
示例#5
0
文件: reflect.go 项目: rsc/tmp
func dtypesym(t *Type) *Sym {
	// Replace byte, rune aliases with real type.
	// They've been separate internally to make error messages
	// better, but we have to merge them in the reflect tables.
	if t == bytetype || t == runetype {
		t = Types[t.Etype]
	}

	if isideal(t) {
		Fatal("dtypesym %v", t)
	}

	s := typesym(t)
	if s.Flags&SymSiggen != 0 {
		return s
	}
	s.Flags |= SymSiggen

	// special case (look for runtime below):
	// when compiling package runtime,
	// emit the type structures for int, float, etc.
	tbase := t

	if Isptr[t.Etype] && t.Sym == nil && t.Type.Sym != nil {
		tbase = t.Type
	}
	dupok := 0
	if tbase.Sym == nil {
		dupok = obj.DUPOK
	}

	if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
		goto ok
	}

	// named types from other files are defined only by those files
	if tbase.Sym != nil && !tbase.Local {
		return s
	}
	if isforw[tbase.Etype] {
		return s
	}

ok:
	ot := 0
	xt := 0
	switch t.Etype {
	default:
		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr

	case TARRAY:
		if t.Bound >= 0 {
			// ../../runtime/type.go:/ArrayType
			s1 := dtypesym(t.Type)

			t2 := typ(TARRAY)
			t2.Type = t.Type
			t2.Bound = -1 // slice
			s2 := dtypesym(t2)
			ot = dcommontype(s, ot, t)
			xt = ot - 3*Widthptr
			ot = dsymptr(s, ot, s1, 0)
			ot = dsymptr(s, ot, s2, 0)
			ot = duintptr(s, ot, uint64(t.Bound))
		} else {
			// ../../runtime/type.go:/SliceType
			s1 := dtypesym(t.Type)

			ot = dcommontype(s, ot, t)
			xt = ot - 3*Widthptr
			ot = dsymptr(s, ot, s1, 0)
		}

		// ../../runtime/type.go:/ChanType
	case TCHAN:
		s1 := dtypesym(t.Type)

		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr
		ot = dsymptr(s, ot, s1, 0)
		ot = duintptr(s, ot, uint64(t.Chan))

	case TFUNC:
		for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
			dtypesym(t1.Type)
		}
		isddd := false
		for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
			isddd = t1.Isddd
			dtypesym(t1.Type)
		}

		for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
			dtypesym(t1.Type)
		}

		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr
		ot = duint8(s, ot, uint8(obj.Bool2int(isddd)))

		// two slice headers: in and out.
		ot = int(Rnd(int64(ot), int64(Widthptr)))

		ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint))
		n := t.Thistuple + t.Intuple
		ot = duintxx(s, ot, uint64(n), Widthint)
		ot = duintxx(s, ot, uint64(n), Widthint)
		ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr)
		ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
		ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)

		// slice data
		for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
			n++
		}
		for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
			n++
		}
		for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
			n++
		}

	case TINTER:
		m := imethods(t)
		n := 0
		for a := m; a != nil; a = a.link {
			dtypesym(a.type_)
			n++
		}

		// ../../runtime/type.go:/InterfaceType
		ot = dcommontype(s, ot, t)

		xt = ot - 3*Widthptr
		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
		ot = duintxx(s, ot, uint64(n), Widthint)
		ot = duintxx(s, ot, uint64(n), Widthint)
		for a := m; a != nil; a = a.link {
			// ../../runtime/type.go:/imethod
			ot = dgostringptr(s, ot, a.name)

			ot = dgopkgpath(s, ot, a.pkg)
			ot = dsymptr(s, ot, dtypesym(a.type_), 0)
		}

		// ../../runtime/type.go:/MapType
	case TMAP:
		s1 := dtypesym(t.Down)

		s2 := dtypesym(t.Type)
		s3 := dtypesym(mapbucket(t))
		s4 := dtypesym(hmap(t))
		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr
		ot = dsymptr(s, ot, s1, 0)
		ot = dsymptr(s, ot, s2, 0)
		ot = dsymptr(s, ot, s3, 0)
		ot = dsymptr(s, ot, s4, 0)
		if t.Down.Width > MAXKEYSIZE {
			ot = duint8(s, ot, uint8(Widthptr))
			ot = duint8(s, ot, 1) // indirect
		} else {
			ot = duint8(s, ot, uint8(t.Down.Width))
			ot = duint8(s, ot, 0) // not indirect
		}

		if t.Type.Width > MAXVALSIZE {
			ot = duint8(s, ot, uint8(Widthptr))
			ot = duint8(s, ot, 1) // indirect
		} else {
			ot = duint8(s, ot, uint8(t.Type.Width))
			ot = duint8(s, ot, 0) // not indirect
		}

		ot = duint16(s, ot, uint16(mapbucket(t).Width))
		ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))

	case TPTR32, TPTR64:
		if t.Type.Etype == TANY {
			// ../../runtime/type.go:/UnsafePointerType
			ot = dcommontype(s, ot, t)

			break
		}

		// ../../runtime/type.go:/PtrType
		s1 := dtypesym(t.Type)

		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr
		ot = dsymptr(s, ot, s1, 0)

		// ../../runtime/type.go:/StructType
	// for security, only the exported fields.
	case TSTRUCT:
		n := 0

		for t1 := t.Type; t1 != nil; t1 = t1.Down {
			dtypesym(t1.Type)
			n++
		}

		ot = dcommontype(s, ot, t)
		xt = ot - 3*Widthptr
		ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
		ot = duintxx(s, ot, uint64(n), Widthint)
		ot = duintxx(s, ot, uint64(n), Widthint)
		for t1 := t.Type; t1 != nil; t1 = t1.Down {
			// ../../runtime/type.go:/structField
			if t1.Sym != nil && t1.Embedded == 0 {
				ot = dgostringptr(s, ot, t1.Sym.Name)
				if exportname(t1.Sym.Name) {
					ot = dgostringptr(s, ot, "")
				} else {
					ot = dgopkgpath(s, ot, t1.Sym.Pkg)
				}
			} else {
				ot = dgostringptr(s, ot, "")
				if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
					ot = dgopkgpath(s, ot, localpkg)
				} else {
					ot = dgostringptr(s, ot, "")
				}
			}

			ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
			ot = dgostrlitptr(s, ot, t1.Note)
			ot = duintptr(s, ot, uint64(t1.Width)) // field offset
		}
	}

	ot = dextratype(s, ot, t, xt)
	ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))

	// generate typelink.foo pointing at s = type.foo.
	// The linker will leave a table of all the typelinks for
	// types in the binary, so reflect can find them.
	// We only need the link for unnamed composites that
	// we want be able to find.
	if t.Sym == nil {
		switch t.Etype {
		case TPTR32, TPTR64:
			// The ptrto field of the type data cannot be relied on when
			// dynamic linking: a type T may be defined in a module that makes
			// no use of pointers to that type, but another module can contain
			// a package that imports the first one and does use *T pointers.
			// The second module will end up defining type data for *T and a
			// type.*T symbol pointing at it. It's important that calling
			// .PtrTo() on the refect.Type for T returns this type data and
			// not some synthesized object, so we need reflect to be able to
			// find it!
			if !Ctxt.Flag_dynlink {
				break
			}
			fallthrough
		case TARRAY, TCHAN, TFUNC, TMAP:
			slink := typelinksym(t)
			dsymptr(slink, 0, s, 0)
			ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA))
		}
	}

	return s
}
示例#6
0
文件: reflect.go 项目: rsc/tmp
func haspointers(t *Type) bool {
	if t.Haspointers != 0 {
		return t.Haspointers-1 != 0
	}

	var ret bool
	switch t.Etype {
	case TINT,
		TUINT,
		TINT8,
		TUINT8,
		TINT16,
		TUINT16,
		TINT32,
		TUINT32,
		TINT64,
		TUINT64,
		TUINTPTR,
		TFLOAT32,
		TFLOAT64,
		TCOMPLEX64,
		TCOMPLEX128,
		TBOOL:
		ret = false

	case TARRAY:
		if t.Bound < 0 { // slice
			ret = true
			break
		}

		if t.Bound == 0 { // empty array
			ret = false
			break
		}

		ret = haspointers(t.Type)

	case TSTRUCT:
		ret = false
		for t1 := t.Type; t1 != nil; t1 = t1.Down {
			if haspointers(t1.Type) {
				ret = true
				break
			}
		}

	case TSTRING,
		TPTR32,
		TPTR64,
		TUNSAFEPTR,
		TINTER,
		TCHAN,
		TMAP,
		TFUNC:
		fallthrough
	default:
		ret = true
	}

	t.Haspointers = 1 + uint8(obj.Bool2int(ret))
	return ret
}
示例#7
0
文件: gsubr.go 项目: rsc/tmp
// Naddr rewrites a to refer to n.
// It assumes that a is zeroed on entry.
func Naddr(a *obj.Addr, n *Node) {
	if n == nil {
		return
	}

	if n.Type != nil && n.Type.Etype != TIDEAL {
		// TODO(rsc): This is undone by the selective clearing of width below,
		// to match architectures that were not as aggressive in setting width
		// during naddr. Those widths must be cleared to avoid triggering
		// failures in gins when it detects real but heretofore latent (and one
		// hopes innocuous) type mismatches.
		// The type mismatches should be fixed and the clearing below removed.
		dowidth(n.Type)

		a.Width = n.Type.Width
	}

	switch n.Op {
	default:
		a := a // copy to let escape into Ctxt.Dconv
		Debug['h'] = 1
		Dump("naddr", n)
		Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))

	case OREGISTER:
		a.Type = obj.TYPE_REG
		a.Reg = n.Reg
		a.Sym = nil
		if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
			a.Width = 0
		}

	case OINDREG:
		a.Type = obj.TYPE_MEM
		a.Reg = n.Reg
		a.Sym = Linksym(n.Sym)
		a.Offset = n.Xoffset
		if a.Offset != int64(int32(a.Offset)) {
			Yyerror("offset %d too large for OINDREG", a.Offset)
		}
		if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
			a.Width = 0
		}

		// n->left is PHEAP ONAME for stack parameter.
	// compute address of actual parameter on stack.
	case OPARAM:
		a.Etype = Simtype[n.Left.Type.Etype]

		a.Width = n.Left.Type.Width
		a.Offset = n.Xoffset
		a.Sym = Linksym(n.Left.Sym)
		a.Type = obj.TYPE_MEM
		a.Name = obj.NAME_PARAM
		a.Node = n.Left.Orig

	case OCLOSUREVAR:
		if !Curfn.Func.Needctxt {
			Fatal("closurevar without needctxt")
		}
		a.Type = obj.TYPE_MEM
		a.Reg = int16(Thearch.REGCTXT)
		a.Sym = nil
		a.Offset = n.Xoffset

	case OCFUNC:
		Naddr(a, n.Left)
		a.Sym = Linksym(n.Left.Sym)

	case ONAME:
		a.Etype = 0
		if n.Type != nil {
			a.Etype = Simtype[n.Type.Etype]
		}
		a.Offset = n.Xoffset
		s := n.Sym
		a.Node = n.Orig

		//if(a->node >= (Node*)&n)
		//	fatal("stack node");
		if s == nil {
			s = Lookup(".noname")
		}
		if n.Method {
			if n.Type != nil {
				if n.Type.Sym != nil {
					if n.Type.Sym.Pkg != nil {
						s = Pkglookup(s.Name, n.Type.Sym.Pkg)
					}
				}
			}
		}

		a.Type = obj.TYPE_MEM
		switch n.Class {
		default:
			Fatal("naddr: ONAME class %v %d\n", n.Sym, n.Class)

		case PEXTERN:
			a.Name = obj.NAME_EXTERN

		case PAUTO:
			a.Name = obj.NAME_AUTO

		case PPARAM, PPARAMOUT:
			a.Name = obj.NAME_PARAM

		case PFUNC:
			a.Name = obj.NAME_EXTERN
			a.Type = obj.TYPE_ADDR
			a.Width = int64(Widthptr)
			s = funcsym(s)
		}

		a.Sym = Linksym(s)

	case OLITERAL:
		if Thearch.Thechar == '8' {
			a.Width = 0
		}
		switch n.Val.Ctype {
		default:
			Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))

		case CTFLT:
			a.Type = obj.TYPE_FCONST
			a.Val = mpgetflt(n.Val.U.Fval)

		case CTINT, CTRUNE:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = Mpgetfix(n.Val.U.Xval)

		case CTSTR:
			datagostring(n.Val.U.Sval, a)

		case CTBOOL:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = int64(obj.Bool2int(n.Val.U.Bval))

		case CTNIL:
			a.Sym = nil
			a.Type = obj.TYPE_CONST
			a.Offset = 0
		}

	case OADDR:
		Naddr(a, n.Left)
		a.Etype = uint8(Tptr)
		if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
			a.Width = int64(Widthptr)
		}
		if a.Type != obj.TYPE_MEM {
			a := a // copy to let escape into Ctxt.Dconv
			Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
		}
		a.Type = obj.TYPE_ADDR

		// itable of interface value
	case OITAB:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // itab(nil)
		}
		a.Etype = uint8(Tptr)
		a.Width = int64(Widthptr)

		// pointer in a string or slice
	case OSPTR:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // ptr(nil)
		}
		a.Etype = Simtype[Tptr]
		a.Offset += int64(Array_array)
		a.Width = int64(Widthptr)

		// len of string or slice
	case OLEN:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // len(nil)
		}
		a.Etype = Simtype[TUINT]
		a.Offset += int64(Array_nel)
		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
			a.Width = int64(Widthint)
		}

		// cap of string or slice
	case OCAP:
		Naddr(a, n.Left)

		if a.Type == obj.TYPE_CONST && a.Offset == 0 {
			break // cap(nil)
		}
		a.Etype = Simtype[TUINT]
		a.Offset += int64(Array_cap)
		if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
			a.Width = int64(Widthint)
		}
	}
	return
}