Example #1
0
func (mach X86) opCheckDivideByZero(code gen.RegCoder, t types.T, reg regs.R) {
	var end links.L

	Test.opFromReg(code, t, reg, reg)
	Jne.rel8.opStub(code)
	end.AddSite(code.Len())

	code.OpTrapCall(traps.IntegerDivideByZero)

	end.Addr = code.Len()
	mach.updateBranches8(code, &end)
}
Example #2
0
func (mach X86) opConvertUnsignedI64ToFloat(code gen.Coder, resultType types.T, inputReg regs.R) {
	// this algorithm is copied from code generated by gcc and clang:

	var done links.L
	var huge links.L

	// TODO: allocate target reg

	Test.opFromReg(code, types.I64, inputReg, inputReg)
	Js.rel8.opStub(code)
	huge.AddSite(code.Len())

	// max. 63-bit value
	Cvtsi2sSSE.opReg(code, resultType, types.I64, regResult, inputReg)

	JmpRel.rel8.opStub(code)
	done.AddSite(code.Len())

	huge.Addr = code.Len()
	mach.updateBranches8(code, &huge)

	// 64-bit value
	Mov.opFromReg(code, types.I64, regScratch, inputReg)
	And.opImm(code, types.I64, regScratch, 1)
	ShrImm.op(code, types.I64, inputReg, 1)
	Or.opFromReg(code, types.I64, inputReg, regScratch)
	Cvtsi2sSSE.opReg(code, resultType, types.I64, regResult, inputReg)
	AddsSSE.opFromReg(code, resultType, regResult, regResult)

	done.Addr = code.Len()
	mach.updateBranches8(code, &done)
}
Example #3
0
File: x86.go Project: tsavola/wag
// OpCallIndirect using table index located in result register.
func (mach X86) OpCallIndirect(code gen.Coder, tableLen, sigIndex int32) int32 {
	var outOfBounds links.L
	var checksOut links.L

	mach.opCompareBounds(code, regResult, tableLen)
	Jle.rel8.opStub(code) // TODO: is this the correct comparison?
	outOfBounds.AddSite(code.Len())

	Mov.opFromAddr(code, types.I64, regResult, 3, regResult, code.RODataAddr()+gen.ROTableAddr)
	Mov.opFromReg(code, types.I32, regScratch, regResult) // zero-extended function address
	ShrImm.op(code, types.I64, regResult, 32)             // signature index
	Cmp.opImm(code, types.I32, regResult, sigIndex)
	Je.rel8.opStub(code)
	checksOut.AddSite(code.Len())

	code.OpTrapCall(traps.IndirectCallSignature)

	outOfBounds.Addr = code.Len()
	mach.updateBranches8(code, &outOfBounds)

	code.OpTrapCall(traps.IndirectCallIndex)

	checksOut.Addr = code.Len()
	mach.updateBranches8(code, &checksOut)

	Add.opFromReg(code, types.I64, regScratch, regTextBase)
	Call.opReg(code, regScratch)
	return code.Len()
}
Example #4
0
File: x86.go Project: tsavola/wag
func (mach X86) OpTrapIfStackExhausted(code gen.Coder) (stackCheckAddr int32) {
	var checked links.L

	Lea.opFromStack(code, types.I64, regScratch, -0x80000000) // reserve 32-bit displacement
	stackCheckAddr = code.Len()

	Cmp.opFromReg(code, types.I64, regScratch, regStackLimit)

	Jge.rel8.opStub(code)
	checked.AddSite(code.Len())

	code.OpTrapCall(traps.CallStackExhausted)

	checked.Addr = code.Len()
	mach.updateBranches8(code, &checked)
	return
}
Example #5
0
File: x86.go Project: tsavola/wag
func (mach X86) OpInit(code gen.OpCoder, startAddr int32) (retAddr int32) {
	if code.Len() == 0 || code.Len() > functionAlignment {
		panic("inconsistency")
	}
	code.Align(functionAlignment, paddingByte)
	Add.opImm(code, types.I64, regStackLimit, gen.StackReserve)

	var notResume links.L

	Test.opFromReg(code, types.I64, regResult, regResult)
	Je.rel8.opStub(code)
	notResume.AddSite(code.Len())
	Ret.op(code) // simulate return from snapshot function call

	notResume.Addr = code.Len()
	mach.updateBranches8(code, &notResume)

	CallRel.op(code, startAddr)
	return code.Len()
}
Example #6
0
File: x86.go Project: tsavola/wag
func (mach X86) OpBranchIf(code gen.Coder, x values.Operand, yes bool, addr int32) (sites []int32) {
	var cond values.Condition

	if x.Storage == values.ConditionFlags {
		cond = x.Condition()
	} else {
		reg, _, own := mach.opBorrowMaybeScratchReg(code, x, false)
		if own {
			defer code.FreeReg(types.I32, reg)
		}

		Test.opFromReg(code, types.I32, reg, reg)
		cond = values.Ne
	}

	if !yes {
		cond = values.InvertedConditions[cond]
	}

	var end links.L

	switch {
	case cond >= values.MinUnorderedOrCondition:
		Jp.op(code, addr)
		sites = append(sites, code.Len())

	case cond >= values.MinOrderedAndCondition:
		Jp.rel8.opStub(code)
		end.AddSite(code.Len())
	}

	conditionInsns[cond].jcc.op(code, addr)
	sites = append(sites, code.Len())

	end.Addr = code.Len()
	mach.updateBranches8(code, &end)
	return
}
Example #7
0
File: x86.go Project: tsavola/wag
// UpdateCalls modifies CallRel instructions, possibly while they are being
// executed.
func (mach X86) UpdateCalls(code gen.OpCoder, l *links.L) {
	funcAddr := l.FinalAddr()
	for _, retAddr := range l.Sites {
		mach.PutUint32(code.Bytes()[retAddr-4:retAddr], uint32(funcAddr-retAddr))
	}
}
Example #8
0
File: x86.go Project: tsavola/wag
// updateBranches8 modifies 8-bit relocations of Jmp and Jcc instructions.
func (mach X86) updateBranches8(code gen.OpCoder, l *links.L) {
	labelAddr := l.FinalAddr()
	for _, retAddr := range l.Sites {
		mach.updateAddr8(code, retAddr, labelAddr-retAddr)
	}
}
Example #9
0
File: x86.go Project: tsavola/wag
func (mach X86) OpSelect(code gen.RegCoder, a, b, condOperand values.Operand) values.Operand {
	defer code.Consumed(condOperand)

	var cond values.Condition

	switch condOperand.Storage {
	case values.VarMem:
		Cmp.opImmToStack(code, types.I32, condOperand.VarMemOffset(), 0)
		cond = values.Ne

	case values.VarReg, values.TempReg:
		reg := condOperand.Reg()
		Test.opFromReg(code, types.I32, reg, reg)
		cond = values.Ne

	case values.Stack:
		mach.OpAddImmToStackPtr(code, 8) // do before cmp to avoid overwriting flags
		Cmp.opImmToStack(code, types.I32, -8, 0)
		cond = values.Ne

	case values.ConditionFlags:
		cond = condOperand.Condition()

	case values.Imm:
		if condOperand.ImmValue() != 0 {
			code.Consumed(b)
			return a
		} else {
			code.Consumed(a)
			return b
		}

	default:
		panic(condOperand)
	}

	t := a.Type
	targetReg, _ := mach.opMaybeResultReg(code, b, true)

	switch t.Category() {
	case types.Int:
		cmov := conditionInsns[cond].cmov

		switch a.Storage {
		case values.VarMem:
			cmov.opFromStack(code, t, targetReg, a.VarMemOffset())

		default:
			aReg, _, own := mach.opBorrowMaybeScratchReg(code, a, true)
			if own {
				defer code.FreeReg(t, aReg)
			}

			cmov.opFromReg(code, t, targetReg, aReg)
		}

	case types.Float:
		var moveIt links.L
		var end links.L

		cond = values.InvertedConditions[cond]
		notCondJump := conditionInsns[cond].jcc

		switch {
		case cond >= values.MinUnorderedOrCondition:
			Jp.rel8.opStub(code) // move it if unordered
			moveIt.AddSite(code.Len())

			notCondJump.rel8.opStub(code) // break if not cond
			end.AddSite(code.Len())

		case cond >= values.MinOrderedAndCondition:
			Jp.rel8.opStub(code) // break if unordered
			end.AddSite(code.Len())

			notCondJump.rel8.opStub(code) // break if not cond
			end.AddSite(code.Len())

		default:
			notCondJump.rel8.opStub(code) // break if not cond
			end.AddSite(code.Len())
		}

		moveIt.Addr = code.Len()
		mach.updateBranches8(code, &moveIt)

		mach.OpMove(code, targetReg, a, false)

		end.Addr = code.Len()
		mach.updateBranches8(code, &end)

	default:
		panic(t)
	}

	// cmov zero-extends the target unconditionally
	return values.TempRegOperand(t, targetReg, true)
}
Example #10
0
File: x86.go Project: tsavola/wag
// OpMove must not update CPU's condition flags if preserveFlags is set.
//
// X86 implementation note: must not blindly rely on regScratch or regResult in
// this function because we may be moving to one of them.
func (mach X86) OpMove(code gen.Coder, targetReg regs.R, x values.Operand, preserveFlags bool) (zeroExt bool) {
	switch x.Type.Category() {
	case types.Int:
		switch x.Storage {
		case values.Imm:
			if value := x.ImmValue(); value == 0 && !preserveFlags {
				Xor.opFromReg(code, types.I32, targetReg, targetReg)
			} else {
				MovImm64.op(code, x.Type, targetReg, value)
			}
			zeroExt = true

		case values.VarMem:
			Mov.opFromStack(code, x.Type, targetReg, x.VarMemOffset())
			zeroExt = true

		case values.VarReg:
			if sourceReg := x.Reg(); sourceReg != targetReg {
				Mov.opFromReg(code, x.Type, targetReg, sourceReg)
				zeroExt = true
			}

		case values.TempReg:
			if sourceReg := x.Reg(); sourceReg != targetReg {
				Mov.opFromReg(code, x.Type, targetReg, sourceReg)
				zeroExt = true
			} else if targetReg == regResult {
				zeroExt = x.RegZeroExt()
			} else {
				panic("moving temporary integer register to itself")
			}

		case values.Stack:
			Pop.op(code, targetReg)

		case values.ConditionFlags:
			if x.Type != types.I32 {
				panic(x)
			}

			var end links.L

			cond := x.Condition()
			setcc := conditionInsns[cond].setcc

			switch {
			case cond >= values.MinUnorderedOrCondition:
				MovImm.opImm(code, x.Type, targetReg, 1) // true
				Jp.rel8.opStub(code)                     // if unordered, else
				end.AddSite(code.Len())                  //
				setcc.opReg(code, targetReg)             // cond

			case cond >= values.MinOrderedAndCondition:
				MovImm.opImm(code, x.Type, targetReg, 0) // false
				Jp.rel8.opStub(code)                     // if unordered, else
				end.AddSite(code.Len())                  //
				setcc.opReg(code, targetReg)             // cond

			default:
				setcc.opReg(code, targetReg)
				Movzx8.opFromReg(code, x.Type, targetReg, targetReg)
			}

			end.Addr = code.Len()
			mach.updateBranches8(code, &end)

			zeroExt = true

		default:
			panic(x)
		}

	case types.Float:
		switch x.Storage {
		case values.Imm:
			if value := x.ImmValue(); value == 0 {
				PxorSSE.opFromReg(code, x.Type, targetReg, targetReg)
			} else {
				MovImm64.op(code, x.Type, regScratch, value) // integer scratch register
				MovSSE.opFromReg(code, x.Type, targetReg, regScratch)
			}

		case values.VarMem:
			MovsSSE.opFromStack(code, x.Type, targetReg, x.VarMemOffset())

		case values.VarReg:
			if sourceReg := x.Reg(); sourceReg != targetReg {
				MovsSSE.opFromReg(code, x.Type, targetReg, sourceReg)
			}

		case values.TempReg:
			if sourceReg := x.Reg(); sourceReg != targetReg {
				MovsSSE.opFromReg(code, x.Type, targetReg, sourceReg)
			} else if targetReg != regResult {
				panic("moving temporary float register to itself")
			}

		case values.Stack:
			popFloatOp(code, x.Type, targetReg)

		default:
			panic(x)
		}

	default:
		panic(x)
	}

	code.Consumed(x)

	return
}
Example #11
0
func (mach X86) binaryDivmulOp(code gen.RegCoder, index uint8, a, b values.Operand) values.Operand {
	insn := binaryDivmulInsns[index]
	t := a.Type

	if b.Storage == values.Imm {
		value := b.ImmValue()

		switch {
		case value == -1:
			reg, _ := mach.opMaybeResultReg(code, a, false)
			Neg.opReg(code, t, reg)
			return values.TempRegOperand(t, reg, true)

		case insn.shiftImm.defined() && value > 0 && isPowerOfTwo(uint64(value)):
			reg, _ := mach.opMaybeResultReg(code, a, false)
			insn.shiftImm.op(code, t, reg, log2(uint64(value)))
			return values.TempRegOperand(t, reg, true)
		}
	}

	division := (index & opers.DivmulMul) == 0
	checkZero := true
	checkOverflow := true

	if b.Storage.IsReg() {
		if b.Reg() == regResult {
			newReg := regScratch

			if division {
				var ok bool

				// can't use scratch reg as divisor since it contains the dividend high bits
				newReg, ok = code.TryAllocReg(t)
				if !ok {
					// borrow a register which we don't need in this function
					MovMMX.opFromReg(code, types.I64, regScratchMMX, regTextBase)
					defer MovMMX.opToReg(code, types.I64, regTextBase, regScratchMMX)

					newReg = regTextBase
				}
			}

			Mov.opFromReg(code, t, newReg, regResult)
			b = values.RegOperand(true, t, newReg)
		}
	} else {
		if division && b.Storage == values.Imm {
			value := b.ImmValue()
			if value != 0 {
				checkZero = false
			}
			if value != -1 {
				checkOverflow = false
			}
		}

		reg, ok := code.TryAllocReg(t)
		if !ok {
			// borrow a register which we don't need in this function
			MovMMX.opFromReg(code, types.I64, regScratchMMX, regTextBase)
			defer MovMMX.opToReg(code, types.I64, regTextBase, regScratchMMX)

			reg = regTextBase
		}

		mach.OpMove(code, reg, b, true)
		b = values.RegOperand(true, t, reg)
	}

	mach.OpMove(code, regResult, a, false)

	remainder := (index & opers.DivmulRem) != 0

	var doNot links.L

	if division {
		if checkZero {
			mach.opCheckDivideByZero(code, t, b.Reg())
		}

		if a.Storage == values.Imm {
			value := a.ImmValue()
			if t.Size() == types.Size32 {
				if value != -0x80000000 {
					checkOverflow = false
				}
			} else {
				if value != -0x8000000000000000 {
					checkOverflow = false
				}
			}
		}

		signed := (index & opers.DivmulSign) != 0

		if signed && checkOverflow {
			var do links.L

			if remainder {
				Xor.opFromReg(code, types.I32, regScratch, regScratch) // moved to result at the end

				Cmp.opImm(code, t, b.Reg(), -1)
				Je.rel8.opStub(code)
				doNot.AddSite(code.Len())
			} else {
				switch t.Size() {
				case types.Size32:
					Cmp.opImm(code, t, regResult, -0x80000000)

				case types.Size64:
					MovImm64.op(code, t, regScratch, -0x8000000000000000)
					Cmp.opFromReg(code, t, regResult, regScratch)

				default:
					panic(a)
				}

				Jne.rel8.opStub(code)
				do.AddSite(code.Len())

				Cmp.opImm(code, t, b.Reg(), -1)
				Jne.rel8.opStub(code)
				do.AddSite(code.Len())

				code.OpTrapCall(traps.IntegerOverflow)
			}

			do.Addr = code.Len()
			mach.updateBranches8(code, &do)
		}

		if signed {
			// sign-extend dividend low bits to high bits
			CdqCqo.op(code, t)
		} else {
			// zero-extend dividend high bits
			Xor.opFromReg(code, types.I32, regScratch, regScratch)
		}
	}

	insn.opReg(code, t, b.Reg())
	code.Consumed(b)

	doNot.Addr = code.Len()
	mach.updateBranches8(code, &doNot)

	if remainder {
		Mov.opFromReg(code, t, regResult, regScratch)
	}

	return values.TempRegOperand(t, regResult, true)
}
Example #12
0
// StoreOp makes sure that index gets zero-extended if it's a VarReg operand.
func (mach X86) StoreOp(code gen.RegCoder, oper uint16, index, x values.Operand, offset uint32) {
	size := oper >> 8

	baseReg, indexReg, ownIndexReg, disp := mach.opMemoryAddress(code, size, index, offset)
	if ownIndexReg {
		defer code.FreeReg(types.I64, indexReg)
	}

	store := memoryStores[uint8(oper)]

	insnType := store.insnType
	if insnType == 0 {
		insnType = x.Type
	}

	if x.Storage == values.Imm {
		value := x.ImmValue()
		value32 := int32(value)

		switch {
		case size == 1:
			value32 = int32(int8(value32))

		case size == 2:
			value32 = int32(int16(value32))

		case size == 4 || (value >= -0x80000000 && value < 0x80000000):

		default:
			goto large
		}

		store.insn.opImmToIndirect(code, insnType, 0, indexReg, baseReg, disp, value32)
		return

	large:
	}

	valueReg, _, own := mach.opBorrowMaybeResultReg(code, x, false)
	if own {
		defer code.FreeReg(x.Type, valueReg)
	}

	store.insn.opToIndirect(code, insnType, valueReg, 0, indexReg, baseReg, disp)
}

// opMemoryAddress may return the scratch register as the base.
func (mach X86) opMemoryAddress(code gen.RegCoder, size uint16, index values.Operand, offset uint32) (baseReg, indexReg regs.R, ownIndexReg bool, disp int32) {
	sizeReach := uint64(size - 1)
	reachOffset := uint64(offset) + sizeReach

	if reachOffset >= 0x80000000 {
		code.OpTrapCall(traps.MemoryOutOfBounds)
		return
	}

	alreadyChecked := reachOffset < uint64(index.Bounds.Upper)

	switch index.Storage {
	case values.Imm:
		value := uint64(index.ImmValue())

		if value >= 0x80000000 {
			code.OpTrapCall(traps.MemoryOutOfBounds)
			return
		}

		addr := value + uint64(offset)
		reachAddr := addr + sizeReach

		if reachAddr >= 0x80000000 {
			code.OpTrapCall(traps.MemoryOutOfBounds)
			return
		}

		if reachAddr < uint64(code.MinMemorySize()) || alreadyChecked {
			baseReg = regMemoryBase
			indexReg = NoIndex
			disp = int32(addr)
			return
		}

		Lea.opFromIndirect(code, types.I64, regScratch, 0, NoIndex, regMemoryBase, int32(reachAddr))

	default:
		reg, zeroExt, own := mach.opBorrowMaybeScratchReg(code, index, true)

		if !zeroExt {
			Mov.opFromReg(code, types.I32, reg, reg) // zero-extend index
		}

		if alreadyChecked {
			baseReg = regMemoryBase
			indexReg = reg
			ownIndexReg = own
			disp = int32(offset)
			return
		}

		Lea.opFromIndirect(code, types.I64, regScratch, 0, reg, regMemoryBase, int32(reachOffset))

		if own {
			code.FreeReg(types.I32, reg)
		}
	}

	Cmp.opFromReg(code, types.I64, regScratch, regMemoryLimit)

	if addr := code.TrapTrampolineAddr(traps.MemoryOutOfBounds); addr != 0 {
		Jge.op(code, addr)
	} else {
		var checked links.L

		Jl.rel8.opStub(code)
		checked.AddSite(code.Len())

		code.OpTrapCall(traps.MemoryOutOfBounds)

		checked.Addr = code.Len()
		mach.updateBranches8(code, &checked)
	}

	baseReg = regScratch
	indexReg = NoIndex
	disp = -int32(sizeReach)
	return
}

func (mach X86) OpCurrentMemory(code gen.RegCoder) values.Operand {
	Mov.opFromReg(code, types.I64, regResult, regMemoryLimit)
	Sub.opFromReg(code, types.I64, regResult, regMemoryBase)
	ShrImm.op(code, types.I64, regResult, wasm.PageBits)

	return values.TempRegOperand(types.I32, regResult, true)
}

func (mach X86) OpGrowMemory(code gen.RegCoder, x values.Operand) values.Operand {
	var out links.L
	var fail links.L

	MovMMX.opToReg(code, types.I64, regScratch, regMemoryGrowLimitMMX)

	targetReg, zeroExt := mach.opMaybeResultReg(code, x, false)
	if !zeroExt {
		Mov.opFromReg(code, types.I32, targetReg, targetReg)
	}

	ShlImm.op(code, types.I64, targetReg, wasm.PageBits)
	Add.opFromReg(code, types.I64, targetReg, regMemoryLimit) // new memory limit
	Cmp.opFromReg(code, types.I64, targetReg, regScratch)

	Jg.rel8.opStub(code)
	fail.AddSite(code.Len())

	Mov.opFromReg(code, types.I64, regScratch, regMemoryLimit)
	Mov.opFromReg(code, types.I64, regMemoryLimit, targetReg)
	Sub.opFromReg(code, types.I64, regScratch, regMemoryBase)
	ShrImm.op(code, types.I64, regScratch, wasm.PageBits) // value on success
	Mov.opFromReg(code, types.I32, targetReg, regScratch)

	JmpRel.rel8.opStub(code)
	out.AddSite(code.Len())

	fail.Addr = code.Len()
	mach.updateBranches8(code, &fail)

	MovImm.opImm(code, types.I32, targetReg, -1) // value on failure

	out.Addr = code.Len()
	mach.updateBranches8(code, &out)

	return values.TempRegOperand(types.I32, targetReg, true)
}