Ejemplo n.º 1
0
func addAux2(a *Addr, v *ssa.Value, offset int64) {
	if a.Type != TYPE_MEM {
		v.Fatalf("bad addAux addr %v", a)
	}
	// add integer offset
	a.Offset += offset

	// If no additional symbol offset, we're done.
	if v.Aux == nil {
		return
	}
	// Add symbol's offset from its base register.
	switch sym := v.Aux.(type) {
	case *ssa.ExternSymbol:
		a.Name = NAME_EXTERN
		//a.Sym = Linksym(sym.Sym.(*Sym))
	case *ssa.ArgSymbol:
		n := sym.Node.(ssaVar)
		a.Name = NAME_PARAM
		a.Node = n
		a.Sym = &LSym{} //Linksym(n.Orig.Sym)
		a.Sym.Name = n.Name()
		a.Offset += n.Xoffset() // TODO: why do I have to add this here?  I don't for auto variables.
	case *ssa.AutoSymbol:
		n := sym.Node.(ssaVar)
		a.Name = NAME_AUTO
		a.Node = n
		//a.Sym = Linksym(n.Sym)
	default:
		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
	}
}
Ejemplo n.º 2
0
// regnum returns the register (in cmd/internal/obj numbering) to
// which v has been allocated.  Panics if v is not assigned to a
// register.
// TODO: Make this panic again once it stops happening routinely.
func regnum(v *ssa.Value) int16 {
	reg := v.Block.Func.RegAlloc[v.ID]
	if reg == nil {
		v.Fatalf("nil regnum for value: %s\n%s\n", v.LongString(), v.Block.Func)
		return 0
	}
	return ssaRegToReg[reg.(*ssa.Register).Num()]
}
Ejemplo n.º 3
0
func (s *genState) genValue(v *ssa.Value) []*Prog {
	var progs []*Prog
	var p *Prog
	lineno = v.Line
	switch v.Op {
	case ssa.OpAMD64ADDQ:
		// TODO: use addq instead of leaq if target is in the right register.
		p := CreateProg(x86.ALEAQ)
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		p.From.Scale = 1
		p.From.Index = regnum(v.Args[1])
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64ADDL:
		p = CreateProg(x86.ALEAL)
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		p.From.Scale = 1
		p.From.Index = regnum(v.Args[1])
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	// 2-address opcode arithmetic, symmetric
	case ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD,
		ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
		ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
		ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
		ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
		ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64PXOR:
		r := regnum(v)
		x := regnum(v.Args[0])
		y := regnum(v.Args[1])
		if x != r && y != r {
			opregreg(regMoveByTypeAMD64(v.Type), r, x)
			x = r
		}
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.To.Type = TYPE_REG
		p.To.Reg = r
		if x == r {
			p.From.Reg = y
		} else {
			p.From.Reg = x
		}
		progs = append(progs, p)
	// 2-address opcode arithmetic, not symmetric
	case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL:
		r := regnum(v)
		x := regnum(v.Args[0])
		y := regnum(v.Args[1])
		var neg bool
		if y == r {
			// compute -(y-x) instead
			x, y = y, x
			neg = true
		}
		if x != r {
			opregreg(regMoveByTypeAMD64(v.Type), r, x)
		}
		opregreg(int(v.Op.Asm()), r, y)

		if neg {
			p = CreateProg(x86.ANEGQ) // TODO: use correct size?  This is mostly a hack until regalloc does 2-address correctly
			p.To.Type = TYPE_REG
			p.To.Reg = r
		}
		progs = append(progs, p)
	case ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD:
		r := regnum(v)
		x := regnum(v.Args[0])
		y := regnum(v.Args[1])
		if y == r && x != r {
			// r/y := x op r/y, need to preserve x and rewrite to
			// r/y := r/y op x15
			x15 := int16(x86.REG_X15)
			// register move y to x15
			// register move x to y
			// rename y with x15
			opregreg(regMoveByTypeAMD64(v.Type), x15, y)
			opregreg(regMoveByTypeAMD64(v.Type), r, x)
			y = x15
		} else if x != r {
			opregreg(regMoveByTypeAMD64(v.Type), r, x)
		}
		opregreg(int(v.Op.Asm()), r, y)

	case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW,
		ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:

		// Arg[0] is already in AX as it's the only register we allow
		// and AX is the only output
		x := regnum(v.Args[1])

		// CPU faults upon signed overflow, which occurs when most
		// negative int is divided by -1.
		var j *Prog
		if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
			v.Op == ssa.OpAMD64DIVW {

			var c *Prog
			switch v.Op {
			case ssa.OpAMD64DIVQ:
				c = CreateProg(x86.ACMPQ)
				j = CreateProg(x86.AJEQ)
				// go ahead and sign extend to save doing it later
				CreateProg(x86.ACQO)

			case ssa.OpAMD64DIVL:
				c = CreateProg(x86.ACMPL)
				j = CreateProg(x86.AJEQ)
				CreateProg(x86.ACDQ)

			case ssa.OpAMD64DIVW:
				c = CreateProg(x86.ACMPW)
				j = CreateProg(x86.AJEQ)
				CreateProg(x86.ACWD)
			}
			c.From.Type = TYPE_REG
			c.From.Reg = x
			c.To.Type = TYPE_CONST
			c.To.Offset = -1

			j.To.Type = TYPE_BRANCH

		}

		// for unsigned ints, we sign extend by setting DX = 0
		// signed ints were sign extended above
		if v.Op == ssa.OpAMD64DIVQU ||
			v.Op == ssa.OpAMD64DIVLU ||
			v.Op == ssa.OpAMD64DIVWU {
			c := CreateProg(x86.AXORQ)
			c.From.Type = TYPE_REG
			c.From.Reg = x86.REG_DX
			c.To.Type = TYPE_REG
			c.To.Reg = x86.REG_DX
		}

		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = x

		// signed division, rest of the check for -1 case
		if j != nil {
			j2 := CreateProg(obj.AJMP)
			j2.To.Type = TYPE_BRANCH

			var n *Prog
			if v.Op == ssa.OpAMD64DIVQ || v.Op == ssa.OpAMD64DIVL ||
				v.Op == ssa.OpAMD64DIVW {
				// n * -1 = -n
				n = CreateProg(x86.ANEGQ)
				n.To.Type = TYPE_REG
				n.To.Reg = x86.REG_AX
			} else {
				// n % -1 == 0
				n = CreateProg(x86.AXORQ)
				n.From.Type = TYPE_REG
				n.From.Reg = x86.REG_DX
				n.To.Type = TYPE_REG
				n.To.Reg = x86.REG_DX
			}

			j.To.Val = n
			panic("TODO")
			//j2.To.Val = Pc
		}
		progs = append(progs, p)
	case ssa.OpAMD64HMULL, ssa.OpAMD64HMULW, ssa.OpAMD64HMULB,
		ssa.OpAMD64HMULLU, ssa.OpAMD64HMULWU, ssa.OpAMD64HMULBU:
		// the frontend rewrites constant division by 8/16/32 bit integers into
		// HMUL by a constant

		// Arg[0] is already in AX as it's the only register we allow
		// and DX is the only output we care about (the high bits)
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[1])

		// IMULB puts the high portion in AH instead of DL,
		// so move it to DL for consistency
		if v.Type.Size() == 1 {
			m := CreateProg(x86.AMOVB)
			m.From.Type = TYPE_REG
			m.From.Reg = x86.REG_AH
			m.To.Type = TYPE_REG
			m.To.Reg = x86.REG_DX
		}
		progs = append(progs, p)
	case ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
		ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL,
		ssa.OpAMD64SARQ, ssa.OpAMD64SARL:
		x := regnum(v.Args[0])
		r := regnum(v)
		if x != r {
			if r == x86.REG_CX {
				v.Fatalf("can't implement %s, target and shift both in CX", v.LongString())
			}
			p = CreateProg(regMoveAMD64(v.Type.Size()))
			p.From.Type = TYPE_REG
			p.From.Reg = x
			p.To.Type = TYPE_REG
			p.To.Reg = r
		}
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[1]) // should be CX
		p.To.Type = TYPE_REG
		p.To.Reg = r
		progs = append(progs, p)
	case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
		// TODO: use addq instead of leaq if target is in the right register.
		var asm int
		switch v.Op {
		case ssa.OpAMD64ADDQconst:
			asm = x86.ALEAQ
		case ssa.OpAMD64ADDLconst:
			asm = x86.ALEAL
		}
		p = CreateProg(asm)
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		p.From.Offset = v.AuxInt
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
		r := regnum(v)
		x := regnum(v.Args[0])
		if r != x {
			p = CreateProg(regMoveAMD64(v.Type.Size()))
			p.From.Type = TYPE_REG
			p.From.Reg = x
			p.To.Type = TYPE_REG
			p.To.Reg = r
		}
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_CONST
		p.From.Offset = v.AuxInt
		p.To.Type = TYPE_REG
		p.To.Reg = r
		// TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
		// instead of using the MOVQ above.
		//p.From3 = new(obj.Addr)
		//p.From3.Type = TYPE_REG
		//p.From3.Reg = regnum(v.Args[0])
		progs = append(progs, p)
	case
		ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst,
		ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
		ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
		ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
		ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
		ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst,
		ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst,
		ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst:
		// This code compensates for the fact that the register allocator
		// doesn't understand 2-address instructions yet.  TODO: fix that.
		x := regnum(v.Args[0])
		r := regnum(v)
		if x != r {
			p = CreateProg(regMoveAMD64(v.Type.Size()))
			p.From.Type = TYPE_REG
			p.From.Reg = x
			p.To.Type = TYPE_REG
			p.To.Reg = r
		}
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_CONST
		p.From.Offset = v.AuxInt
		p.To.Type = TYPE_REG
		p.To.Reg = r
		progs = append(progs, p)
	case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
		r := regnum(v)
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = r
		p.To.Type = TYPE_REG
		p.To.Reg = r
		progs = append(progs, p)
	case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
		p = CreateProg(x86.ALEAQ)
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		switch v.Op {
		case ssa.OpAMD64LEAQ1:
			p.From.Scale = 1
		case ssa.OpAMD64LEAQ2:
			p.From.Scale = 2
		case ssa.OpAMD64LEAQ4:
			p.From.Scale = 4
		case ssa.OpAMD64LEAQ8:
			p.From.Scale = 8
		}
		p.From.Index = regnum(v.Args[1])
		addAux(&p.From, v)
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64LEAQ:
		p = CreateProg(x86.ALEAQ)
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		addAux(&p.From, v)
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
		ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB:
		opregreg(int(v.Op.Asm()), regnum(v.Args[1]), regnum(v.Args[0]))
	case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
		// Go assembler has swapped operands for UCOMISx relative to CMP,
		// must account for that right here.
		opregreg(int(v.Op.Asm()), regnum(v.Args[0]), regnum(v.Args[1]))
	case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst,
		ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[0])
		p.To.Type = TYPE_CONST
		p.To.Offset = v.AuxInt
		progs = append(progs, p)
	case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
		x := regnum(v)
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_CONST
		var i int64
		switch v.Op {
		case ssa.OpAMD64MOVLconst:
			i = int64(int32(v.AuxInt))
		case ssa.OpAMD64MOVQconst:
			i = v.AuxInt
		}
		p.From.Offset = i
		p.To.Type = TYPE_REG
		p.To.Reg = x
		progs = append(progs, p)
	case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
		x := regnum(v)
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_FCONST
		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
		p.To.Type = TYPE_REG
		p.To.Reg = x
		progs = append(progs, p)
	case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVOload:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		addAux(&p.From, v)
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		addAux(&p.From, v)
		p.From.Scale = 8
		p.From.Index = regnum(v.Args[1])
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVSSloadidx4:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_MEM
		p.From.Reg = regnum(v.Args[0])
		addAux(&p.From, v)
		p.From.Scale = 4
		p.From.Index = regnum(v.Args[1])
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[1])
		p.To.Type = TYPE_MEM
		p.To.Reg = regnum(v.Args[0])
		addAux(&p.To, v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[2])
		p.To.Type = TYPE_MEM
		p.To.Reg = regnum(v.Args[0])
		p.To.Scale = 8
		p.To.Index = regnum(v.Args[1])
		addAux(&p.To, v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVSSstoreidx4:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[2])
		p.To.Type = TYPE_MEM
		p.To.Reg = regnum(v.Args[0])
		p.To.Scale = 4
		p.To.Index = regnum(v.Args[1])
		addAux(&p.To, v)
		progs = append(progs, p)
	case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_CONST
		sc := ssa.ValAndOff(v.AuxInt)
		i := sc.Val()
		switch v.Op {
		case ssa.OpAMD64MOVBstoreconst:
			i = int64(int8(i))
		case ssa.OpAMD64MOVWstoreconst:
			i = int64(int16(i))
		case ssa.OpAMD64MOVLstoreconst:
			i = int64(int32(i))
		case ssa.OpAMD64MOVQstoreconst:
		}
		p.From.Offset = i
		p.To.Type = TYPE_MEM
		p.To.Reg = regnum(v.Args[0])
		fmt.Println("P.TO.REG:", Rconv(int(p.To.Reg)))
		addAux2(&p.To, v, sc.Off())
		progs = append(progs, p)
	case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
		ssa.OpAMD64CVTSL2SS, ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSQ2SD,
		ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
		ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
		opregreg(int(v.Op.Asm()), regnum(v), regnum(v.Args[0]))
	case ssa.OpAMD64DUFFZERO:
		p = CreateProg(obj.ADUFFZERO)
		p.To.Type = TYPE_ADDR
		//p.To.Sym = Linksym(Pkglookup("duffzero", Runtimepkg))
		p.To.Offset = v.AuxInt
		progs = append(progs, p)
	case ssa.OpAMD64MOVOconst:
		if v.AuxInt != 0 {
			v.Fatalf("MOVOconst can only do constant=0")
		}
		r := regnum(v)
		opregreg(x86.AXORPS, r, r)
	case ssa.OpAMD64DUFFCOPY:
		p = CreateProg(obj.ADUFFCOPY)
		p.To.Type = TYPE_ADDR
		//p.To.Sym = Linksym(Pkglookup("duffcopy", Runtimepkg))
		p.To.Offset = v.AuxInt
		progs = append(progs, p)
	case ssa.OpCopy: // TODO: lower to MOVQ earlier?
		if v.Type.IsMemory() {
			panic("unimplementedf")
			//return
		}
		x := regnum(v.Args[0])
		y := regnum(v)
		if x != y {
			opregreg(regMoveByTypeAMD64(v.Type), y, x)
		}
	case ssa.OpLoadReg:
		if v.Type.IsFlags() {
			v.Fatalf("load flags not implemented: %v", v.LongString())
			panic("unimplementedf")
			//return
		}
		p = CreateProg(movSizeByType(v.Type))
		n, off := autoVar(v.Args[0])
		p.From.Type = TYPE_MEM
		p.From.Node = n
		//p.From.Sym = Linksym(n.Sym)
		p.From.Offset = off
		if n.Class() == PPARAM {
			p.From.Name = NAME_PARAM
			p.From.Offset += n.Xoffset()
		} else {
			p.From.Name = NAME_AUTO
		}
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpStoreReg:
		if v.Type.IsFlags() {
			v.Fatalf("store flags not implemented: %v", v.LongString())
			panic("unimplementedf")
			//return
		}
		p = CreateProg(movSizeByType(v.Type))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[0])
		n, off := autoVar(v)
		p.To.Type = TYPE_MEM
		p.To.Node = n
		//p.To.Sym = Linksym(n.Sym)
		p.To.Offset = off
		if n.Class() == PPARAM {
			p.To.Name = NAME_PARAM
			p.To.Offset += n.Xoffset()
		} else {
			p.To.Name = NAME_AUTO
		}
		progs = append(progs, p)
	case ssa.OpPhi:
		// just check to make sure regalloc and stackalloc did it right
		if v.Type.IsMemory() {
			panic("unimplementedf")
			//return
		}
		f := v.Block.Func
		loc := f.RegAlloc[v.ID]
		for _, a := range v.Args {
			if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
				v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
			}
		}
	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64, ssa.OpConstString, ssa.OpConstNil, ssa.OpConstBool,
		ssa.OpConst32F, ssa.OpConst64F:
		fmt.Println("v.ID:", v.ID)
		f := v.Block.Func
		fmt.Println("f.RegAlloc:", f.RegAlloc)
		fmt.Println("len(f.RegAlloc):", len(f.RegAlloc))
		if v.Block.Func.RegAlloc[v.ID] != nil {
			v.Fatalf("const value %v shouldn't have a location", v)
		}

	case ssa.OpInitMem:
		// memory arg needs no code
	case ssa.OpArg:
		// input args need no code
	case ssa.OpAMD64LoweredGetClosurePtr:
		// Output is hardwired to DX only,
		// and DX contains the closure pointer on
		// closure entry, and this "instruction"
		// is scheduled to the very beginning
		// of the entry block.
	case ssa.OpAMD64LoweredGetG:
		panic("unimplementedf")
	case ssa.OpAMD64CALLstatic:
		panic("unimplementedf")
	case ssa.OpAMD64CALLclosure:
		panic("unimplementedf")
	case ssa.OpAMD64CALLdefer:
		panic("unimplementedf")
	case ssa.OpAMD64CALLgo:
		panic("unimplementedf")
	case ssa.OpAMD64CALLinter:
		p = CreateProg(obj.ACALL)
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v.Args[0])
		if Maxarg < v.AuxInt {
			Maxarg = v.AuxInt
		}
		progs = append(progs, p)
	case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
		ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
		x := regnum(v.Args[0])
		r := regnum(v)
		if x != r {
			p = CreateProg(regMoveAMD64(v.Type.Size()))
			p.From.Type = TYPE_REG
			p.From.Reg = x
			p.To.Type = TYPE_REG
			p.To.Reg = r
		}
		p = CreateProg(int(v.Op.Asm()))
		p.To.Type = TYPE_REG
		p.To.Reg = r
		progs = append(progs, p)
	case ssa.OpAMD64SQRTSD:
		p = CreateProg(int(v.Op.Asm()))
		p.From.Type = TYPE_REG
		p.From.Reg = regnum(v.Args[0])
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpSP, ssa.OpSB:
		// nothing to do
	case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
		ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
		ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
		ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
		ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
		ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
		ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
		p = CreateProg(int(v.Op.Asm()))
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		progs = append(progs, p)
	case ssa.OpAMD64SETNEF:
		p = CreateProg(int(v.Op.Asm()))
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		q := CreateProg(x86.ASETPS)
		q.To.Type = TYPE_REG
		q.To.Reg = x86.REG_AX
		// TODO AORQ copied from old code generator, why not AORB?
		opregreg(x86.AORQ, regnum(v), x86.REG_AX)
		progs = append(progs, p)
	case ssa.OpAMD64SETEQF:
		p = CreateProg(int(v.Op.Asm()))
		p.To.Type = TYPE_REG
		p.To.Reg = regnum(v)
		q := CreateProg(x86.ASETPC)
		q.To.Type = TYPE_REG
		q.To.Reg = x86.REG_AX
		// TODO AANDQ copied from old code generator, why not AANDB?
		opregreg(x86.AANDQ, regnum(v), x86.REG_AX)
		progs = append(progs, p)
	case ssa.OpAMD64InvertFlags:
		v.Fatalf("InvertFlags should never make it to codegen %v", v)
	case ssa.OpAMD64REPSTOSQ:
		p := CreateProg(x86.AREP)
		q := CreateProg(x86.ASTOSQ)
		progs = append(progs, p)
		progs = append(progs, q)
	case ssa.OpAMD64REPMOVSQ:
		p := CreateProg(x86.AREP)
		q := CreateProg(x86.AMOVSQ)
		progs = append(progs, p)
		progs = append(progs, q)
	case ssa.OpVarDef:
		panic("unimplementedf")
		//Gvardef(v.Aux.(*Node))
	case ssa.OpVarKill:
		panic("unimplementedf")
		//gvarkill(v.Aux.(*Node))
	case ssa.OpAMD64LoweredNilCheck:
		// Optimization - if the subsequent block has a load or store
		// at the same address, we don't need to issue this instruction.
		for _, w := range v.Block.Succs[0].Block().Values {
			if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
				// w doesn't use a store - can't be a memory op.
				continue
			}
			if w.Args[len(w.Args)-1] != v.Args[1] {
				v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
			}
			switch w.Op {
			case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload,
				ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore:
				if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
					panic("unimplementedf")
					//return
				}
			case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
				off := ssa.ValAndOff(v.AuxInt).Off()
				if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
					panic("unimplementedf")
					//return
				}
			}
			if w.Type.IsMemory() {
				// We can't delay the nil check past the next store.
				break
			}
		}
		// Issue a load which will fault if the input is nil.
		// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
		// Should we use the 3-byte TESTB $0, (reg) instead?  It is larger
		// but it doesn't have false dependency on AX.
		// Or maybe allocate an output register and use MOVL (reg),reg2 ?
		// That trades clobbering flags for clobbering a register.
		p = CreateProg(x86.ATESTB)
		p.From.Type = TYPE_REG
		p.From.Reg = x86.REG_AX
		p.To.Type = TYPE_MEM
		p.To.Reg = regnum(v.Args[0])
		addAux(&p.To, v)
		if Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
			Warnl(int(v.Line), "generated nil check")
		}
		progs = append(progs, p)
	default:
		fmt.Println("unimplemented OP:", v.Op.String())
		v.Fatalf("genValue not implemented: %s", v.LongString())
		panic("unimplementedf")

	}
	return progs
}