func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { case ssa.OpInitMem: // memory arg needs no code case ssa.OpArg: // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpCopy, ssa.OpMIPS64MOVVconvert, ssa.OpMIPS64MOVVreg: if v.Type.IsMemory() { return } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x == y { return } as := mips.AMOVV if v.Type.IsFloat() { switch v.Type.Size() { case 4: as = mips.AMOVF case 8: as = mips.AMOVD default: panic("bad float size") } } p := gc.Prog(as) p.From.Type = obj.TYPE_REG p.From.Reg = x p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpMIPS64MOVVnop: if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { v.Unimplementedf("load flags not implemented: %v", v.LongString()) return } r := gc.SSARegNum(v) p := gc.Prog(loadByType(v.Type, r)) n, off := gc.AutoVar(v.Args[0]) p.From.Type = obj.TYPE_MEM p.From.Node = n p.From.Sym = gc.Linksym(n.Sym) p.From.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.From.Name = obj.NAME_PARAM p.From.Offset += n.Xoffset } else { p.From.Name = obj.NAME_AUTO } p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpPhi: gc.CheckLoweredPhi(v) case ssa.OpStoreReg: if v.Type.IsFlags() { v.Unimplementedf("store flags not implemented: %v", v.LongString()) return } r := gc.SSARegNum(v.Args[0]) p := gc.Prog(storeByType(v.Type, r)) p.From.Type = obj.TYPE_REG p.From.Reg = r n, off := gc.AutoVar(v) p.To.Type = obj.TYPE_MEM p.To.Node = n p.To.Sym = gc.Linksym(n.Sym) p.To.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.To.Name = obj.NAME_PARAM p.To.Offset += n.Xoffset } else { p.To.Name = obj.NAME_AUTO } case ssa.OpMIPS64ADDV, ssa.OpMIPS64SUBV, ssa.OpMIPS64AND, ssa.OpMIPS64OR, ssa.OpMIPS64XOR, ssa.OpMIPS64NOR, ssa.OpMIPS64SLLV, ssa.OpMIPS64SRLV, ssa.OpMIPS64SRAV, ssa.OpMIPS64ADDF, ssa.OpMIPS64ADDD, ssa.OpMIPS64SUBF, ssa.OpMIPS64SUBD, ssa.OpMIPS64MULF, ssa.OpMIPS64MULD, ssa.OpMIPS64DIVF, ssa.OpMIPS64DIVD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64SGT, ssa.OpMIPS64SGTU: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64ADDVconst, ssa.OpMIPS64SUBVconst, ssa.OpMIPS64ANDconst, ssa.OpMIPS64ORconst, ssa.OpMIPS64XORconst, ssa.OpMIPS64NORconst, ssa.OpMIPS64SLLVconst, ssa.OpMIPS64SRLVconst, ssa.OpMIPS64SRAVconst, ssa.OpMIPS64SGTconst, ssa.OpMIPS64SGTUconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64MULV, ssa.OpMIPS64MULVU, ssa.OpMIPS64DIVV, ssa.OpMIPS64DIVVU: // result in hi,lo p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpMIPS64MOVVconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64MOVFconst, ssa.OpMIPS64MOVDconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64CMPEQF, ssa.OpMIPS64CMPEQD, ssa.OpMIPS64CMPGEF, ssa.OpMIPS64CMPGED, ssa.OpMIPS64CMPGTF, ssa.OpMIPS64CMPGTD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[1]) case ssa.OpMIPS64MOVVaddr: p := gc.Prog(mips.AMOVV) p.From.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) var wantreg string // MOVV $sym+off(base), R // the assembler expands it as the following: // - base is SP: add constant offset to SP (R29) // when constant is large, tmp register (R23) may be used // - base is SB: load external address with relocation switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) case *ssa.ExternSymbol: wantreg = "SB" gc.AddAux(&p.From, v) case *ssa.ArgSymbol, *ssa.AutoSymbol: wantreg = "SP" gc.AddAux(&p.From, v) case nil: // No sym, just MOVV $off(SP), R wantreg = "SP" p.From.Reg = mips.REGSP p.From.Offset = v.AuxInt } if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg { v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg) } case ssa.OpMIPS64MOVBload, ssa.OpMIPS64MOVBUload, ssa.OpMIPS64MOVHload, ssa.OpMIPS64MOVHUload, ssa.OpMIPS64MOVWload, ssa.OpMIPS64MOVWUload, ssa.OpMIPS64MOVVload, ssa.OpMIPS64MOVFload, ssa.OpMIPS64MOVDload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64MOVBstore, ssa.OpMIPS64MOVHstore, ssa.OpMIPS64MOVWstore, ssa.OpMIPS64MOVVstore, ssa.OpMIPS64MOVFstore, ssa.OpMIPS64MOVDstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpMIPS64MOVBstorezero, ssa.OpMIPS64MOVHstorezero, ssa.OpMIPS64MOVWstorezero, ssa.OpMIPS64MOVVstorezero: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = mips.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpMIPS64MOVBreg, ssa.OpMIPS64MOVBUreg, ssa.OpMIPS64MOVHreg, ssa.OpMIPS64MOVHUreg, ssa.OpMIPS64MOVWreg, ssa.OpMIPS64MOVWUreg: // TODO: remove extension if after proper load fallthrough case ssa.OpMIPS64MOVWF, ssa.OpMIPS64MOVWD, ssa.OpMIPS64MOVFW, ssa.OpMIPS64MOVDW, ssa.OpMIPS64MOVVF, ssa.OpMIPS64MOVVD, ssa.OpMIPS64MOVFV, ssa.OpMIPS64MOVDV, ssa.OpMIPS64MOVFD, ssa.OpMIPS64MOVDF, ssa.OpMIPS64NEGF, ssa.OpMIPS64NEGD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64NEGV: // SUB from REGZERO p := gc.Prog(mips.ASUBVU) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = mips.REGZERO p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpMIPS64CALLstatic: if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert an actual hardware NOP that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. ginsnop() } p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpMIPS64CALLclosure: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpMIPS64CALLdefer: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Deferproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpMIPS64CALLgo: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Newproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpMIPS64CALLinter: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpMIPS64LoweredNilCheck: // TODO: optimization // Issue a load which will fault if arg is nil. p := gc.Prog(mips.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = mips.REGZERO if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Line, "generated nil check") } case ssa.OpVarDef: gc.Gvardef(v.Aux.(*gc.Node)) case ssa.OpVarKill: gc.Gvarkill(v.Aux.(*gc.Node)) case ssa.OpVarLive: gc.Gvarlive(v.Aux.(*gc.Node)) case ssa.OpKeepAlive: if !v.Args[0].Type.IsPtrShaped() { v.Fatalf("keeping non-pointer alive %v", v.Args[0]) } n, off := gc.AutoVar(v.Args[0]) if n == nil { v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0]) } if off != 0 { v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off) } gc.Gvarlive(n) case ssa.OpMIPS64FPFlagTrue, ssa.OpMIPS64FPFlagFalse: // MOVV $0, r // BFPF 2(PC) // MOVV $1, r branch := mips.ABFPF if v.Op == ssa.OpMIPS64FPFlagFalse { branch = mips.ABFPT } p := gc.Prog(mips.AMOVV) p.From.Type = obj.TYPE_REG p.From.Reg = mips.REGZERO p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) p2 := gc.Prog(branch) p2.To.Type = obj.TYPE_BRANCH p3 := gc.Prog(mips.AMOVV) p3.From.Type = obj.TYPE_CONST p3.From.Offset = 1 p3.To.Type = obj.TYPE_REG p3.To.Reg = gc.SSARegNum(v) p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land gc.Patch(p2, p4) case ssa.OpSelect0, ssa.OpSelect1: // nothing to do case ssa.OpMIPS64LoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). gc.CheckLoweredGetClosurePtr(v) default: v.Unimplementedf("genValue not implemented: %s", v.LongString()) } }
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { case ssa.OpInitMem: // memory arg needs no code case ssa.OpArg: // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpCopy, ssa.OpPPC64MOVDconvert: t := v.Type if t.IsMemory() { return } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x != y { rt := obj.TYPE_REG op := ppc64.AMOVD if t.IsFloat() { op = ppc64.AFMOVD } p := gc.Prog(op) p.From.Type = rt p.From.Reg = x p.To.Type = rt p.To.Reg = y } case ssa.OpPPC64Xf2i64: { x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) p := gc.Prog(ppc64.AFMOVD) p.From.Type = obj.TYPE_REG p.From.Reg = x scratchFpMem(s, &p.To) p = gc.Prog(ppc64.AMOVD) p.To.Type = obj.TYPE_REG p.To.Reg = y scratchFpMem(s, &p.From) } case ssa.OpPPC64Xi2f64: { x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) p := gc.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_REG p.From.Reg = x scratchFpMem(s, &p.To) p = gc.Prog(ppc64.AFMOVD) p.To.Type = obj.TYPE_REG p.To.Reg = y scratchFpMem(s, &p.From) } case ssa.OpPPC64LoweredGetClosurePtr: // Closure pointer is R11 (already) gc.CheckLoweredGetClosurePtr(v) case ssa.OpLoadReg: loadOp := loadByType(v.Type) n, off := gc.AutoVar(v.Args[0]) p := gc.Prog(loadOp) p.From.Type = obj.TYPE_MEM p.From.Node = n p.From.Sym = gc.Linksym(n.Sym) p.From.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.From.Name = obj.NAME_PARAM p.From.Offset += n.Xoffset } else { p.From.Name = obj.NAME_AUTO } p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpStoreReg: storeOp := storeByType(v.Type) n, off := gc.AutoVar(v) p := gc.Prog(storeOp) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_MEM p.To.Node = n p.To.Sym = gc.Linksym(n.Sym) p.To.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.To.Name = obj.NAME_PARAM p.To.Offset += n.Xoffset } else { p.To.Name = obj.NAME_AUTO } case ssa.OpPPC64DIVD: // For now, // // cmp arg1, -1 // be ahead // v = arg0 / arg1 // b over // ahead: v = - arg0 // over: nop r := gc.SSARegNum(v) r0 := gc.SSARegNum(v.Args[0]) r1 := gc.SSARegNum(v.Args[1]) p := gc.Prog(ppc64.ACMP) p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.To.Type = obj.TYPE_CONST p.To.Offset = -1 pbahead := gc.Prog(ppc64.ABEQ) pbahead.To.Type = obj.TYPE_BRANCH p = gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.Reg = r0 p.To.Type = obj.TYPE_REG p.To.Reg = r pbover := gc.Prog(obj.AJMP) pbover.To.Type = obj.TYPE_BRANCH p = gc.Prog(ppc64.ANEG) p.To.Type = obj.TYPE_REG p.To.Reg = r p.From.Type = obj.TYPE_REG p.From.Reg = r0 gc.Patch(pbahead, p) p = gc.Prog(obj.ANOP) gc.Patch(pbover, p) case ssa.OpPPC64DIVW: // word-width version of above r := gc.SSARegNum(v) r0 := gc.SSARegNum(v.Args[0]) r1 := gc.SSARegNum(v.Args[1]) p := gc.Prog(ppc64.ACMPW) p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.To.Type = obj.TYPE_CONST p.To.Offset = -1 pbahead := gc.Prog(ppc64.ABEQ) pbahead.To.Type = obj.TYPE_BRANCH p = gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.Reg = r0 p.To.Type = obj.TYPE_REG p.To.Reg = r pbover := gc.Prog(obj.AJMP) pbover.To.Type = obj.TYPE_BRANCH p = gc.Prog(ppc64.ANEG) p.To.Type = obj.TYPE_REG p.To.Reg = r p.From.Type = obj.TYPE_REG p.From.Reg = r0 gc.Patch(pbahead, p) p = gc.Prog(obj.ANOP) gc.Patch(pbover, p) case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU, ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW, ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU, ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64XOR, ssa.OpPPC64EQV: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpPPC64MaskIfNotCarry: r := gc.SSARegNum(v) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REGZERO p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpPPC64ADDconstForCarry: r1 := gc.SSARegNum(v.Args[0]) p := gc.Prog(v.Op.Asm()) p.Reg = r1 p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect. case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP: r := gc.SSARegNum(v) p := gc.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG p.To.Reg = r p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst: p := gc.Prog(v.Op.Asm()) p.Reg = gc.SSARegNum(v.Args[0]) if v.Aux != nil { p.From.Type = obj.TYPE_CONST p.From.Offset = gc.AuxOffset(v) } else { p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt } p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPPC64MOVDaddr: p := gc.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) var wantreg string // Suspect comment, copied from ARM code // MOVD $sym+off(base), R // the assembler expands it as the following: // - base is SP: add constant offset to SP // when constant is large, tmp register (R11) may be used // - base is SB: load external address from constant pool (use relocation) switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) case *ssa.ExternSymbol: wantreg = "SB" gc.AddAux(&p.From, v) case *ssa.ArgSymbol, *ssa.AutoSymbol: wantreg = "SP" gc.AddAux(&p.From, v) case nil: // No sym, just MOVD $off(SP), R wantreg = "SP" p.From.Reg = ppc64.REGSP p.From.Offset = v.AuxInt } if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg { v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg) } case ssa.OpPPC64MOVDconst, ssa.OpPPC64MOVWconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[1]) case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_CONST p.To.Offset = v.AuxInt case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg: // Shift in register to required size p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Reg = gc.SSARegNum(v) p.To.Type = obj.TYPE_REG case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVBload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpPPC64Equal, ssa.OpPPC64NotEqual, ssa.OpPPC64LessThan, ssa.OpPPC64FLessThan, ssa.OpPPC64LessEqual, ssa.OpPPC64GreaterThan, ssa.OpPPC64FGreaterThan, ssa.OpPPC64GreaterEqual: // On Power7 or later, can use isel instruction: // for a < b, a > b, a = b: // rt := 1 // isel rt,rt,r0,cond // for a >= b, a <= b, a != b: // rt := 1 // isel rt,0,rt,!cond // However, PPCbe support is for older machines than that, // and isel (which looks a lot like fsel) isn't recognized // yet by the Go assembler. So for now, use the old instruction // sequence, which we'll need anyway. // TODO: add support for isel on PPCle and use it. // generate boolean values // use conditional move p := gc.Prog(ppc64.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) pb := gc.Prog(condOps[v.Op]) pb.To.Type = obj.TYPE_BRANCH p = gc.Prog(ppc64.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) p = gc.Prog(obj.ANOP) gc.Patch(pb, p) case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion ssa.OpPPC64FGreaterEqual: p := gc.Prog(ppc64.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) pb0 := gc.Prog(condOps[v.Op]) pb0.To.Type = obj.TYPE_BRANCH pb1 := gc.Prog(ppc64.ABEQ) pb1.To.Type = obj.TYPE_BRANCH p = gc.Prog(ppc64.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) p = gc.Prog(obj.ANOP) gc.Patch(pb0, p) gc.Patch(pb1, p) case ssa.OpPPC64LoweredZero: // Similar to how this is done on ARM, // except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off // not store-and-increment. // Therefore R3 should be dest-align // and arg1 should be dest+size-align // HOWEVER, the input dest address cannot be dest-align because // that does not necessarily address valid memory and it's not // known how that might be optimized. Therefore, correct it in // in the expansion: // // ADD -8,R3,R3 // MOVDU R0, 8(R3) // CMP R3, Rarg1 // BL -2(PC) // arg1 is the address of the last element to zero // auxint is alignment var sz int64 var movu obj.As switch { case v.AuxInt%8 == 0: sz = 8 movu = ppc64.AMOVDU case v.AuxInt%4 == 0: sz = 4 movu = ppc64.AMOVWZU // MOVWU instruction not implemented case v.AuxInt%2 == 0: sz = 2 movu = ppc64.AMOVHU default: sz = 1 movu = ppc64.AMOVBU } p := gc.Prog(ppc64.AADD) p.Reg = gc.SSARegNum(v.Args[0]) p.From.Type = obj.TYPE_CONST p.From.Offset = -sz p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[0]) p = gc.Prog(movu) p.From.Type = obj.TYPE_REG p.From.Reg = ppc64.REG_R0 p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Offset = sz p2 := gc.Prog(ppc64.ACMPU) p2.From.Type = obj.TYPE_REG p2.From.Reg = gc.SSARegNum(v.Args[0]) p2.To.Reg = gc.SSARegNum(v.Args[1]) p2.To.Type = obj.TYPE_REG p3 := gc.Prog(ppc64.ABLT) p3.To.Type = obj.TYPE_BRANCH gc.Patch(p3, p) case ssa.OpPPC64LoweredMove: // Similar to how this is done on ARM, // except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off, // not store-and-increment. // Inputs must be valid pointers to memory, // so adjust arg0 and arg1 as part of the expansion. // arg2 should be src+size-align, // // ADD -8,R3,R3 // ADD -8,R4,R4 // MOVDU 8(R4), Rtmp // MOVDU Rtmp, 8(R3) // CMP R4, Rarg2 // BL -3(PC) // arg2 is the address of the last element of src // auxint is alignment var sz int64 var movu obj.As switch { case v.AuxInt%8 == 0: sz = 8 movu = ppc64.AMOVDU case v.AuxInt%4 == 0: sz = 4 movu = ppc64.AMOVWZU // MOVWU instruction not implemented case v.AuxInt%2 == 0: sz = 2 movu = ppc64.AMOVHU default: sz = 1 movu = ppc64.AMOVBU } p := gc.Prog(ppc64.AADD) p.Reg = gc.SSARegNum(v.Args[0]) p.From.Type = obj.TYPE_CONST p.From.Offset = -sz p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[0]) p = gc.Prog(ppc64.AADD) p.Reg = gc.SSARegNum(v.Args[1]) p.From.Type = obj.TYPE_CONST p.From.Offset = -sz p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[1]) p = gc.Prog(movu) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[1]) p.From.Offset = sz p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGTMP p2 := gc.Prog(movu) p2.From.Type = obj.TYPE_REG p2.From.Reg = ppc64.REGTMP p2.To.Type = obj.TYPE_MEM p2.To.Reg = gc.SSARegNum(v.Args[0]) p2.To.Offset = sz p3 := gc.Prog(ppc64.ACMPU) p3.From.Reg = gc.SSARegNum(v.Args[1]) p3.From.Type = obj.TYPE_REG p3.To.Reg = gc.SSARegNum(v.Args[2]) p3.To.Type = obj.TYPE_REG p4 := gc.Prog(ppc64.ABLT) p4.To.Type = obj.TYPE_BRANCH gc.Patch(p4, p) case ssa.OpPPC64CALLstatic: if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert two actual hardware NOPs that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. // PPC64 is unusual because TWO nops are required // (see gc/cgen.go, gc/plive.go -- copy of comment below) // // On ppc64, when compiling Go into position // independent code on ppc64le we insert an // instruction to reload the TOC pointer from the // stack as well. See the long comment near // jmpdefer in runtime/asm_ppc64.s for why. // If the MOVD is not needed, insert a hardware NOP // so that the same number of instructions are used // on ppc64 in both shared and non-shared modes. ginsnop() if gc.Ctxt.Flag_shared { p := gc.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_MEM p.From.Offset = 24 p.From.Reg = ppc64.REGSP p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REG_R2 } else { ginsnop() } } p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter: p := gc.Prog(ppc64.AMOVD) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REG_CTR if gc.Ctxt.Flag_shared && p.From.Reg != ppc64.REG_R12 { // Make sure function pointer is in R12 as well when // compiling Go into PIC. // TODO(mwhudson): it would obviously be better to // change the register allocation to put the value in // R12 already, but I don't know how to do that. // TODO: We have the technology now to implement TODO above. q := gc.Prog(ppc64.AMOVD) q.From = p.From q.To.Type = obj.TYPE_REG q.To.Reg = ppc64.REG_R12 } pp := gc.Prog(obj.ACALL) pp.To.Type = obj.TYPE_REG pp.To.Reg = ppc64.REG_CTR if gc.Ctxt.Flag_shared { // When compiling Go into PIC, the function we just // called via pointer might have been implemented in // a separate module and so overwritten the TOC // pointer in R2; reload it. q := gc.Prog(ppc64.AMOVD) q.From.Type = obj.TYPE_MEM q.From.Offset = 24 q.From.Reg = ppc64.REGSP q.To.Type = obj.TYPE_REG q.To.Reg = ppc64.REG_R2 } if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpPPC64CALLdefer: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Deferproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpPPC64CALLgo: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Newproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpVarDef: gc.Gvardef(v.Aux.(*gc.Node)) case ssa.OpVarKill: gc.Gvarkill(v.Aux.(*gc.Node)) case ssa.OpVarLive: gc.Gvarlive(v.Aux.(*gc.Node)) case ssa.OpKeepAlive: if !v.Args[0].Type.IsPtrShaped() { v.Fatalf("keeping non-pointer alive %v", v.Args[0]) } n, off := gc.AutoVar(v.Args[0]) if n == nil { v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0]) } if off != 0 { v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off) } gc.Gvarlive(n) case ssa.OpPhi: // just check to make sure regalloc and stackalloc did it right if v.Type.IsMemory() { return } f := v.Block.Func loc := f.RegAlloc[v.ID] for _, a := range v.Args { if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) } } case ssa.OpPPC64LoweredNilCheck: // Optimization - if the subsequent block has a load or store // at the same address, we don't need to issue this instruction. // mem := v.Args[1] // for _, w := range v.Block.Succs[0].Block().Values { // if w.Op == ssa.OpPhi { // if w.Type.IsMemory() { // mem = w // } // continue // } // if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() { // // w doesn't use a store - can't be a memory op. // continue // } // if w.Args[len(w.Args)-1] != mem { // v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w) // } // switch w.Op { // case ssa.OpPPC64MOVBload, ssa.OpPPC64MOVBUload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVHUload, // ssa.OpPPC64MOVWload, ssa.OpPPC64MOVFload, ssa.OpPPC64MOVDload, // ssa.OpPPC64MOVBstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVWstore, // ssa.OpPPC64MOVFstore, ssa.OpPPC64MOVDstore: // // arg0 is ptr, auxint is offset // if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage { // if gc.Debug_checknil != 0 && int(v.Line) > 1 { // gc.Warnl(v.Line, "removed nil check") // } // return // } // case ssa.OpPPC64DUFFZERO, ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroU: // // arg0 is ptr // if w.Args[0] == v.Args[0] { // if gc.Debug_checknil != 0 && int(v.Line) > 1 { // gc.Warnl(v.Line, "removed nil check") // } // return // } // case ssa.OpPPC64DUFFCOPY, ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveU: // // arg0 is dst ptr, arg1 is src ptr // if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] { // if gc.Debug_checknil != 0 && int(v.Line) > 1 { // gc.Warnl(v.Line, "removed nil check") // } // return // } // default: // } // if w.Type.IsMemory() { // if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive { // // these ops are OK // mem = w // continue // } // // We can't delay the nil check past the next store. // break // } // } // Issue a load which will fault if arg is nil. p := gc.Prog(ppc64.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGTMP if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Line, "generated nil check") } case ssa.OpPPC64InvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) default: v.Unimplementedf("genValue not implemented: %s", v.LongString()) } }
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { case ssa.OpInitMem: // memory arg needs no code case ssa.OpArg: // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: if v.Type.IsMemory() { return } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x == y { return } as := arm.AMOVW if v.Type.IsFloat() { switch v.Type.Size() { case 4: as = arm.AMOVF case 8: as = arm.AMOVD default: panic("bad float size") } } p := gc.Prog(as) p.From.Type = obj.TYPE_REG p.From.Reg = x p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpARMMOVWnop: if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { v.Unimplementedf("load flags not implemented: %v", v.LongString()) return } p := gc.Prog(loadByType(v.Type)) n, off := gc.AutoVar(v.Args[0]) p.From.Type = obj.TYPE_MEM p.From.Node = n p.From.Sym = gc.Linksym(n.Sym) p.From.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.From.Name = obj.NAME_PARAM p.From.Offset += n.Xoffset } else { p.From.Name = obj.NAME_AUTO } p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPhi: gc.CheckLoweredPhi(v) case ssa.OpStoreReg: if v.Type.IsFlags() { v.Unimplementedf("store flags not implemented: %v", v.LongString()) return } p := gc.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) n, off := gc.AutoVar(v) p.To.Type = obj.TYPE_MEM p.To.Node = n p.To.Sym = gc.Linksym(n.Sym) p.To.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.To.Name = obj.NAME_PARAM p.To.Offset += n.Xoffset } else { p.To.Name = obj.NAME_AUTO } case ssa.OpARMDIV, ssa.OpARMDIVU, ssa.OpARMMOD, ssa.OpARMMODU: // Note: for software division the assembler rewrite these // instructions to sequence of instructions: // - it puts numerator in R11 and denominator in g.m.divmod // and call (say) _udiv // - _udiv saves R0-R3 on stack and call udiv, restores R0-R3 // before return // - udiv does the actual work //TODO: set approperiate regmasks and call udiv directly? // need to be careful for negative case // Or, as soft div is already expensive, we don't care? fallthrough case ssa.OpARMADD, ssa.OpARMADC, ssa.OpARMSUB, ssa.OpARMSBC, ssa.OpARMRSB, ssa.OpARMAND, ssa.OpARMOR, ssa.OpARMXOR, ssa.OpARMBIC, ssa.OpARMMUL, ssa.OpARMADDF, ssa.OpARMADDD, ssa.OpARMSUBF, ssa.OpARMSUBD, ssa.OpARMMULF, ssa.OpARMMULD, ssa.OpARMDIVF, ssa.OpARMDIVD: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDS, ssa.OpARMSUBS: r := gc.SSARegNum1(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_SBIT p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMSLL, ssa.OpARMSRL, ssa.OpARMSRA: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMSRAcond: // ARM shift instructions uses only the low-order byte of the shift amount // generate conditional instructions to deal with large shifts // flag is already set // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit // SRA.LO Rarg1, Rarg0, Rdst r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(arm.ASRA) p.Scond = arm.C_SCOND_HS p.From.Type = obj.TYPE_CONST p.From.Offset = 31 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r p = gc.Prog(arm.ASRA) p.Scond = arm.C_SCOND_LO p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDconst, ssa.OpARMADCconst, ssa.OpARMSUBconst, ssa.OpARMSBCconst, ssa.OpARMRSBconst, ssa.OpARMRSCconst, ssa.OpARMANDconst, ssa.OpARMORconst, ssa.OpARMXORconst, ssa.OpARMBICconst, ssa.OpARMSLLconst, ssa.OpARMSRLconst, ssa.OpARMSRAconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMADDSconst, ssa.OpARMSUBSconst, ssa.OpARMRSBSconst: p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_SBIT p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum1(v) case ssa.OpARMSRRconst: genshift(arm.AMOVW, 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_RR, v.AuxInt) case ssa.OpARMADDshiftLL, ssa.OpARMADCshiftLL, ssa.OpARMSUBshiftLL, ssa.OpARMSBCshiftLL, ssa.OpARMRSBshiftLL, ssa.OpARMRSCshiftLL, ssa.OpARMANDshiftLL, ssa.OpARMORshiftLL, ssa.OpARMXORshiftLL, ssa.OpARMBICshiftLL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt) case ssa.OpARMADDSshiftLL, ssa.OpARMSUBSshiftLL, ssa.OpARMRSBSshiftLL: p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LL, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRL, ssa.OpARMADCshiftRL, ssa.OpARMSUBshiftRL, ssa.OpARMSBCshiftRL, ssa.OpARMRSBshiftRL, ssa.OpARMRSCshiftRL, ssa.OpARMANDshiftRL, ssa.OpARMORshiftRL, ssa.OpARMXORshiftRL, ssa.OpARMBICshiftRL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt) case ssa.OpARMADDSshiftRL, ssa.OpARMSUBSshiftRL, ssa.OpARMRSBSshiftRL: p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_LR, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRA, ssa.OpARMADCshiftRA, ssa.OpARMSUBshiftRA, ssa.OpARMSBCshiftRA, ssa.OpARMRSBshiftRA, ssa.OpARMRSCshiftRA, ssa.OpARMANDshiftRA, ssa.OpARMORshiftRA, ssa.OpARMXORshiftRA, ssa.OpARMBICshiftRA: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt) case ssa.OpARMADDSshiftRA, ssa.OpARMSUBSshiftRA, ssa.OpARMRSBSshiftRA: p := genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum1(v), arm.SHIFT_AR, v.AuxInt) p.Scond = arm.C_SBIT case ssa.OpARMMVNshiftLL: genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt) case ssa.OpARMMVNshiftRL: genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt) case ssa.OpARMMVNshiftRA: genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt) case ssa.OpARMMVNshiftLLreg: genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL) case ssa.OpARMMVNshiftRLreg: genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR) case ssa.OpARMMVNshiftRAreg: genregshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR) case ssa.OpARMADDshiftLLreg, ssa.OpARMADCshiftLLreg, ssa.OpARMSUBshiftLLreg, ssa.OpARMSBCshiftLLreg, ssa.OpARMRSBshiftLLreg, ssa.OpARMRSCshiftLLreg, ssa.OpARMANDshiftLLreg, ssa.OpARMORshiftLLreg, ssa.OpARMXORshiftLLreg, ssa.OpARMBICshiftLLreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LL) case ssa.OpARMADDSshiftLLreg, ssa.OpARMSUBSshiftLLreg, ssa.OpARMRSBSshiftLLreg: p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LL) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRLreg, ssa.OpARMADCshiftRLreg, ssa.OpARMSUBshiftRLreg, ssa.OpARMSBCshiftRLreg, ssa.OpARMRSBshiftRLreg, ssa.OpARMRSCshiftRLreg, ssa.OpARMANDshiftRLreg, ssa.OpARMORshiftRLreg, ssa.OpARMXORshiftRLreg, ssa.OpARMBICshiftRLreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_LR) case ssa.OpARMADDSshiftRLreg, ssa.OpARMSUBSshiftRLreg, ssa.OpARMRSBSshiftRLreg: p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_LR) p.Scond = arm.C_SBIT case ssa.OpARMADDshiftRAreg, ssa.OpARMADCshiftRAreg, ssa.OpARMSUBshiftRAreg, ssa.OpARMSBCshiftRAreg, ssa.OpARMRSBshiftRAreg, ssa.OpARMRSCshiftRAreg, ssa.OpARMANDshiftRAreg, ssa.OpARMORshiftRAreg, ssa.OpARMXORshiftRAreg, ssa.OpARMBICshiftRAreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum(v), arm.SHIFT_AR) case ssa.OpARMADDSshiftRAreg, ssa.OpARMSUBSshiftRAreg, ssa.OpARMRSBSshiftRAreg: p := genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), gc.SSARegNum1(v), arm.SHIFT_AR) p.Scond = arm.C_SBIT case ssa.OpARMHMUL, ssa.OpARMHMULU: // 32-bit high multiplication p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_REGREG p.To.Reg = gc.SSARegNum(v) p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register case ssa.OpARMMULLU: // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1 p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_REGREG p.To.Reg = gc.SSARegNum0(v) // high 32-bit p.To.Offset = int64(gc.SSARegNum1(v)) // low 32-bit case ssa.OpARMMULA: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_REGREG2 p.To.Reg = gc.SSARegNum(v) // result p.To.Offset = int64(gc.SSARegNum(v.Args[2])) // addend case ssa.OpARMMOVWconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMMOVFconst, ssa.OpARMMOVDconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMCMP, ssa.OpARMCMN, ssa.OpARMTST, ssa.OpARMTEQ, ssa.OpARMCMPF, ssa.OpARMCMPD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // Special layout in ARM assembly // Comparing to x86, the operands of ARM's CMP are reversed. p.From.Reg = gc.SSARegNum(v.Args[1]) p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMCMPconst, ssa.OpARMCMNconst, ssa.OpARMTSTconst, ssa.OpARMTEQconst: // Special layout in ARM assembly p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMCMPF0, ssa.OpARMCMPD0: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMCMPshiftLL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LL, v.AuxInt) case ssa.OpARMCMPshiftRL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_LR, v.AuxInt) case ssa.OpARMCMPshiftRA: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm.SHIFT_AR, v.AuxInt) case ssa.OpARMCMPshiftLLreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LL) case ssa.OpARMCMPshiftRLreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_LR) case ssa.OpARMCMPshiftRAreg: genregshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v.Args[2]), 0, arm.SHIFT_AR) case ssa.OpARMMOVWaddr: p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) var wantreg string // MOVW $sym+off(base), R // the assembler expands it as the following: // - base is SP: add constant offset to SP (R13) // when constant is large, tmp register (R11) may be used // - base is SB: load external address from constant pool (use relocation) switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) case *ssa.ExternSymbol: wantreg = "SB" gc.AddAux(&p.From, v) case *ssa.ArgSymbol, *ssa.AutoSymbol: wantreg = "SP" gc.AddAux(&p.From, v) case nil: // No sym, just MOVW $off(SP), R wantreg = "SP" p.From.Reg = arm.REGSP p.From.Offset = v.AuxInt } if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg { v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg) } case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload, ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore, ssa.OpARMMOVFstore, ssa.OpARMMOVDstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpARMMOVWloadidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWloadshiftLL: p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LL, v.AuxInt) p.From.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMMOVWloadshiftRL: p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_LR, v.AuxInt) p.From.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMMOVWloadshiftRA: p := genshift(v.Op.Asm(), 0, gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm.SHIFT_AR, v.AuxInt) p.From.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMMOVWstoreidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWstoreshiftLL: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[2]) p.To.Type = obj.TYPE_SHIFT p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LL, v.AuxInt)) case ssa.OpARMMOVWstoreshiftRL: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[2]) p.To.Type = obj.TYPE_SHIFT p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_LR, v.AuxInt)) case ssa.OpARMMOVWstoreshiftRA: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[2]) p.To.Type = obj.TYPE_SHIFT p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Offset = int64(makeshift(gc.SSARegNum(v.Args[1]), arm.SHIFT_AR, v.AuxInt)) case ssa.OpARMMOVBreg, ssa.OpARMMOVBUreg, ssa.OpARMMOVHreg, ssa.OpARMMOVHUreg: a := v.Args[0] for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop { a = a.Args[0] } if a.Op == ssa.OpLoadReg { t := a.Type switch { case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(), v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(), v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(), v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned(): // arg is a proper-typed load, already zero/sign-extended, don't extend again if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) { return } p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) return default: } } fallthrough case ssa.OpARMMVN, ssa.OpARMSQRTD, ssa.OpARMNEGF, ssa.OpARMNEGD, ssa.OpARMMOVWF, ssa.OpARMMOVWD, ssa.OpARMMOVFW, ssa.OpARMMOVDW, ssa.OpARMMOVFD, ssa.OpARMMOVDF: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMMOVWUF, ssa.OpARMMOVWUD, ssa.OpARMMOVFWU, ssa.OpARMMOVDWU: p := gc.Prog(v.Op.Asm()) p.Scond = arm.C_UBIT p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMCMOVWHSconst: p := gc.Prog(arm.AMOVW) p.Scond = arm.C_SCOND_HS p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMCMOVWLSconst: p := gc.Prog(arm.AMOVW) p.Scond = arm.C_SCOND_LS p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARMCALLstatic: if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert an actual hardware NOP that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. ginsnop() } p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLclosure: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLdefer: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Deferproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLgo: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Newproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMCALLinter: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARMDUFFZERO: p := gc.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = v.AuxInt case ssa.OpARMDUFFCOPY: p := gc.Prog(obj.ADUFFCOPY) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) p.To.Offset = v.AuxInt case ssa.OpARMLoweredNilCheck: // Optimization - if the subsequent block has a load or store // at the same address, we don't need to issue this instruction. mem := v.Args[1] for _, w := range v.Block.Succs[0].Block().Values { if w.Op == ssa.OpPhi { if w.Type.IsMemory() { mem = w } continue } if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() { // w doesn't use a store - can't be a memory op. continue } if w.Args[len(w.Args)-1] != mem { v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w) } switch w.Op { case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload, ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload, ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore, ssa.OpARMMOVFstore, ssa.OpARMMOVDstore: // arg0 is ptr, auxint is offset if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero: // arg0 is ptr if w.Args[0] == v.Args[0] { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove: // arg0 is dst ptr, arg1 is src ptr if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } default: } if w.Type.IsMemory() { if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive { // these ops are OK mem = w continue } // We can't delay the nil check past the next store. break } } // Issue a load which will fault if arg is nil. p := gc.Prog(arm.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = arm.REGTMP if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Line, "generated nil check") } case ssa.OpARMLoweredZero: // MOVW.P Rarg2, 4(R1) // CMP Rarg1, R1 // BLE -2(PC) // arg1 is the address of the last element to zero // arg2 is known to be zero // auxint is alignment var sz int64 var mov obj.As switch { case v.AuxInt%4 == 0: sz = 4 mov = arm.AMOVW case v.AuxInt%2 == 0: sz = 2 mov = arm.AMOVH default: sz = 1 mov = arm.AMOVB } p := gc.Prog(mov) p.Scond = arm.C_PBIT p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[2]) p.To.Type = obj.TYPE_MEM p.To.Reg = arm.REG_R1 p.To.Offset = sz p2 := gc.Prog(arm.ACMP) p2.From.Type = obj.TYPE_REG p2.From.Reg = gc.SSARegNum(v.Args[1]) p2.Reg = arm.REG_R1 p3 := gc.Prog(arm.ABLE) p3.To.Type = obj.TYPE_BRANCH gc.Patch(p3, p) case ssa.OpARMLoweredMove: // MOVW.P 4(R1), Rtmp // MOVW.P Rtmp, 4(R2) // CMP Rarg2, R1 // BLE -3(PC) // arg2 is the address of the last element of src // auxint is alignment var sz int64 var mov obj.As switch { case v.AuxInt%4 == 0: sz = 4 mov = arm.AMOVW case v.AuxInt%2 == 0: sz = 2 mov = arm.AMOVH default: sz = 1 mov = arm.AMOVB } p := gc.Prog(mov) p.Scond = arm.C_PBIT p.From.Type = obj.TYPE_MEM p.From.Reg = arm.REG_R1 p.From.Offset = sz p.To.Type = obj.TYPE_REG p.To.Reg = arm.REGTMP p2 := gc.Prog(mov) p2.Scond = arm.C_PBIT p2.From.Type = obj.TYPE_REG p2.From.Reg = arm.REGTMP p2.To.Type = obj.TYPE_MEM p2.To.Reg = arm.REG_R2 p2.To.Offset = sz p3 := gc.Prog(arm.ACMP) p3.From.Type = obj.TYPE_REG p3.From.Reg = gc.SSARegNum(v.Args[2]) p3.Reg = arm.REG_R1 p4 := gc.Prog(arm.ABLE) p4.To.Type = obj.TYPE_BRANCH gc.Patch(p4, p) case ssa.OpVarDef: gc.Gvardef(v.Aux.(*gc.Node)) case ssa.OpVarKill: gc.Gvarkill(v.Aux.(*gc.Node)) case ssa.OpVarLive: gc.Gvarlive(v.Aux.(*gc.Node)) case ssa.OpKeepAlive: if !v.Args[0].Type.IsPtrShaped() { v.Fatalf("keeping non-pointer alive %v", v.Args[0]) } n, off := gc.AutoVar(v.Args[0]) if n == nil { v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0]) } if off != 0 { v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off) } gc.Gvarlive(n) case ssa.OpARMEqual, ssa.OpARMNotEqual, ssa.OpARMLessThan, ssa.OpARMLessEqual, ssa.OpARMGreaterThan, ssa.OpARMGreaterEqual, ssa.OpARMLessThanU, ssa.OpARMLessEqualU, ssa.OpARMGreaterThanU, ssa.OpARMGreaterEqualU: // generate boolean values // use conditional move p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_CONST p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) p = gc.Prog(arm.AMOVW) p.Scond = condBits[v.Op] p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpSelect0, ssa.OpSelect1: // nothing to do case ssa.OpARMLoweredGetClosurePtr: // Closure pointer is R7 (arm.REGCTXT). gc.CheckLoweredGetClosurePtr(v) case ssa.OpARMFlagEQ, ssa.OpARMFlagLT_ULT, ssa.OpARMFlagLT_UGT, ssa.OpARMFlagGT_ULT, ssa.OpARMFlagGT_UGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) case ssa.OpARMInvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) default: v.Unimplementedf("genValue not implemented: %s", v.LongString()) } }
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { case ssa.OpInitMem: // memory arg needs no code case ssa.OpArg: // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do case ssa.OpCopy, ssa.OpARM64MOVDconvert, ssa.OpARM64MOVDreg: if v.Type.IsMemory() { return } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x == y { return } as := arm64.AMOVD if v.Type.IsFloat() { switch v.Type.Size() { case 4: as = arm64.AFMOVS case 8: as = arm64.AFMOVD default: panic("bad float size") } } p := gc.Prog(as) p.From.Type = obj.TYPE_REG p.From.Reg = x p.To.Type = obj.TYPE_REG p.To.Reg = y case ssa.OpARM64MOVDnop: if gc.SSARegNum(v) != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) } // nothing to do case ssa.OpLoadReg: if v.Type.IsFlags() { v.Unimplementedf("load flags not implemented: %v", v.LongString()) return } p := gc.Prog(loadByType(v.Type)) n, off := gc.AutoVar(v.Args[0]) p.From.Type = obj.TYPE_MEM p.From.Node = n p.From.Sym = gc.Linksym(n.Sym) p.From.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.From.Name = obj.NAME_PARAM p.From.Offset += n.Xoffset } else { p.From.Name = obj.NAME_AUTO } p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpPhi: gc.CheckLoweredPhi(v) case ssa.OpStoreReg: if v.Type.IsFlags() { v.Unimplementedf("store flags not implemented: %v", v.LongString()) return } p := gc.Prog(storeByType(v.Type)) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) n, off := gc.AutoVar(v) p.To.Type = obj.TYPE_MEM p.To.Node = n p.To.Sym = gc.Linksym(n.Sym) p.To.Offset = off if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT { p.To.Name = obj.NAME_PARAM p.To.Offset += n.Xoffset } else { p.To.Name = obj.NAME_AUTO } case ssa.OpARM64ADD, ssa.OpARM64SUB, ssa.OpARM64AND, ssa.OpARM64OR, ssa.OpARM64XOR, ssa.OpARM64BIC, ssa.OpARM64MUL, ssa.OpARM64MULW, ssa.OpARM64MULH, ssa.OpARM64UMULH, ssa.OpARM64MULL, ssa.OpARM64UMULL, ssa.OpARM64DIV, ssa.OpARM64UDIV, ssa.OpARM64DIVW, ssa.OpARM64UDIVW, ssa.OpARM64MOD, ssa.OpARM64UMOD, ssa.OpARM64MODW, ssa.OpARM64UMODW, ssa.OpARM64SLL, ssa.OpARM64SRL, ssa.OpARM64SRA, ssa.OpARM64FADDS, ssa.OpARM64FADDD, ssa.OpARM64FSUBS, ssa.OpARM64FSUBD, ssa.OpARM64FMULS, ssa.OpARM64FMULD, ssa.OpARM64FDIVS, ssa.OpARM64FDIVD: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = r2 p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARM64ADDconst, ssa.OpARM64SUBconst, ssa.OpARM64ANDconst, ssa.OpARM64ORconst, ssa.OpARM64XORconst, ssa.OpARM64BICconst, ssa.OpARM64SLLconst, ssa.OpARM64SRLconst, ssa.OpARM64SRAconst, ssa.OpARM64RORconst, ssa.OpARM64RORWconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64ADDshiftLL, ssa.OpARM64SUBshiftLL, ssa.OpARM64ANDshiftLL, ssa.OpARM64ORshiftLL, ssa.OpARM64XORshiftLL, ssa.OpARM64BICshiftLL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LL, v.AuxInt) case ssa.OpARM64ADDshiftRL, ssa.OpARM64SUBshiftRL, ssa.OpARM64ANDshiftRL, ssa.OpARM64ORshiftRL, ssa.OpARM64XORshiftRL, ssa.OpARM64BICshiftRL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_LR, v.AuxInt) case ssa.OpARM64ADDshiftRA, ssa.OpARM64SUBshiftRA, ssa.OpARM64ANDshiftRA, ssa.OpARM64ORshiftRA, ssa.OpARM64XORshiftRA, ssa.OpARM64BICshiftRA: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), gc.SSARegNum(v), arm64.SHIFT_AR, v.AuxInt) case ssa.OpARM64MOVDconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64FMOVSconst, ssa.OpARM64FMOVDconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_FCONST p.From.Val = math.Float64frombits(uint64(v.AuxInt)) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64CMP, ssa.OpARM64CMPW, ssa.OpARM64CMN, ssa.OpARM64CMNW, ssa.OpARM64FCMPS, ssa.OpARM64FCMPD: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARM64CMPconst, ssa.OpARM64CMPWconst, ssa.OpARM64CMNconst, ssa.OpARM64CMNWconst: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARM64CMPshiftLL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LL, v.AuxInt) case ssa.OpARM64CMPshiftRL: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_LR, v.AuxInt) case ssa.OpARM64CMPshiftRA: genshift(v.Op.Asm(), gc.SSARegNum(v.Args[0]), gc.SSARegNum(v.Args[1]), 0, arm64.SHIFT_AR, v.AuxInt) case ssa.OpARM64MOVDaddr: p := gc.Prog(arm64.AMOVD) p.From.Type = obj.TYPE_ADDR p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) var wantreg string // MOVD $sym+off(base), R // the assembler expands it as the following: // - base is SP: add constant offset to SP (R13) // when constant is large, tmp register (R11) may be used // - base is SB: load external address from constant pool (use relocation) switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) case *ssa.ExternSymbol: wantreg = "SB" gc.AddAux(&p.From, v) case *ssa.ArgSymbol, *ssa.AutoSymbol: wantreg = "SP" gc.AddAux(&p.From, v) case nil: // No sym, just MOVD $off(SP), R wantreg = "SP" p.From.Reg = arm64.REGSP p.From.Offset = v.AuxInt } if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg { v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg) } case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload, ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload, ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore, ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[1]) p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpARM64MOVBstorezero, ssa.OpARM64MOVHstorezero, ssa.OpARM64MOVWstorezero, ssa.OpARM64MOVDstorezero: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = arm64.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.To, v) case ssa.OpARM64MOVBreg, ssa.OpARM64MOVBUreg, ssa.OpARM64MOVHreg, ssa.OpARM64MOVHUreg, ssa.OpARM64MOVWreg, ssa.OpARM64MOVWUreg: a := v.Args[0] for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg { a = a.Args[0] } if a.Op == ssa.OpLoadReg { t := a.Type switch { case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(), v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(), v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(), v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(), v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(), v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned(): // arg is a proper-typed load, already zero/sign-extended, don't extend again if gc.SSARegNum(v) == gc.SSARegNum(v.Args[0]) { return } p := gc.Prog(arm64.AMOVD) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) return default: } } fallthrough case ssa.OpARM64MVN, ssa.OpARM64NEG, ssa.OpARM64FNEGS, ssa.OpARM64FNEGD, ssa.OpARM64FSQRTD, ssa.OpARM64FCVTZSSW, ssa.OpARM64FCVTZSDW, ssa.OpARM64FCVTZUSW, ssa.OpARM64FCVTZUDW, ssa.OpARM64FCVTZSS, ssa.OpARM64FCVTZSD, ssa.OpARM64FCVTZUS, ssa.OpARM64FCVTZUD, ssa.OpARM64SCVTFWS, ssa.OpARM64SCVTFWD, ssa.OpARM64SCVTFS, ssa.OpARM64SCVTFD, ssa.OpARM64UCVTFWS, ssa.OpARM64UCVTFWD, ssa.OpARM64UCVTFS, ssa.OpARM64UCVTFD, ssa.OpARM64FCVTSD, ssa.OpARM64FCVTDS, ssa.OpARM64REV, ssa.OpARM64REVW, ssa.OpARM64REV16W: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64CSELULT, ssa.OpARM64CSELULT0: r1 := int16(arm64.REGZERO) if v.Op == ssa.OpARM64CSELULT { r1 = gc.SSARegNum(v.Args[1]) } p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Reg = arm64.COND_LO p.Reg = gc.SSARegNum(v.Args[0]) p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1} p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpARM64DUFFZERO: // runtime.duffzero expects start address - 8 in R16 p := gc.Prog(arm64.ASUB) p.From.Type = obj.TYPE_CONST p.From.Offset = 8 p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = arm64.REG_R16 p = gc.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg)) p.To.Offset = v.AuxInt case ssa.OpARM64LoweredZero: // MOVD.P ZR, 8(R16) // CMP Rarg1, R16 // BLE -2(PC) // arg1 is the address of the last element to zero p := gc.Prog(arm64.AMOVD) p.Scond = arm64.C_XPOST p.From.Type = obj.TYPE_REG p.From.Reg = arm64.REGZERO p.To.Type = obj.TYPE_MEM p.To.Reg = arm64.REG_R16 p.To.Offset = 8 p2 := gc.Prog(arm64.ACMP) p2.From.Type = obj.TYPE_REG p2.From.Reg = gc.SSARegNum(v.Args[1]) p2.Reg = arm64.REG_R16 p3 := gc.Prog(arm64.ABLE) p3.To.Type = obj.TYPE_BRANCH gc.Patch(p3, p) case ssa.OpARM64LoweredMove: // MOVD.P 8(R16), Rtmp // MOVD.P Rtmp, 8(R17) // CMP Rarg2, R16 // BLE -3(PC) // arg2 is the address of the last element of src p := gc.Prog(arm64.AMOVD) p.Scond = arm64.C_XPOST p.From.Type = obj.TYPE_MEM p.From.Reg = arm64.REG_R16 p.From.Offset = 8 p.To.Type = obj.TYPE_REG p.To.Reg = arm64.REGTMP p2 := gc.Prog(arm64.AMOVD) p2.Scond = arm64.C_XPOST p2.From.Type = obj.TYPE_REG p2.From.Reg = arm64.REGTMP p2.To.Type = obj.TYPE_MEM p2.To.Reg = arm64.REG_R17 p2.To.Offset = 8 p3 := gc.Prog(arm64.ACMP) p3.From.Type = obj.TYPE_REG p3.From.Reg = gc.SSARegNum(v.Args[2]) p3.Reg = arm64.REG_R16 p4 := gc.Prog(arm64.ABLE) p4.To.Type = obj.TYPE_BRANCH gc.Patch(p4, p) case ssa.OpARM64CALLstatic: if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line // of the instruction byte before the return PC. // To avoid that being an unrelated instruction, // insert an actual hardware NOP that will have the right line number. // This is different from obj.ANOP, which is a virtual no-op // that doesn't make it into the instruction stream. ginsnop() } p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym)) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARM64CALLclosure: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARM64CALLdefer: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Deferproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARM64CALLgo: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Linksym(gc.Newproc.Sym) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARM64CALLinter: p := gc.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Offset = 0 p.To.Reg = gc.SSARegNum(v.Args[0]) if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } case ssa.OpARM64LoweredNilCheck: // Optimization - if the subsequent block has a load or store // at the same address, we don't need to issue this instruction. mem := v.Args[1] for _, w := range v.Block.Succs[0].Block().Values { if w.Op == ssa.OpPhi { if w.Type.IsMemory() { mem = w } continue } if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() { // w doesn't use a store - can't be a memory op. continue } if w.Args[len(w.Args)-1] != mem { v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w) } switch w.Op { case ssa.OpARM64MOVBload, ssa.OpARM64MOVBUload, ssa.OpARM64MOVHload, ssa.OpARM64MOVHUload, ssa.OpARM64MOVWload, ssa.OpARM64MOVWUload, ssa.OpARM64MOVDload, ssa.OpARM64FMOVSload, ssa.OpARM64FMOVDload, ssa.OpARM64MOVBstore, ssa.OpARM64MOVHstore, ssa.OpARM64MOVWstore, ssa.OpARM64MOVDstore, ssa.OpARM64FMOVSstore, ssa.OpARM64FMOVDstore: // arg0 is ptr, auxint is offset if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } case ssa.OpARM64DUFFZERO, ssa.OpARM64LoweredZero: // arg0 is ptr if w.Args[0] == v.Args[0] { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } case ssa.OpARM64LoweredMove: // arg0 is dst ptr, arg1 is src ptr if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] { if gc.Debug_checknil != 0 && int(v.Line) > 1 { gc.Warnl(v.Line, "removed nil check") } return } default: } if w.Type.IsMemory() { if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive { // these ops are OK mem = w continue } // We can't delay the nil check past the next store. break } } // Issue a load which will fault if arg is nil. p := gc.Prog(arm64.AMOVB) p.From.Type = obj.TYPE_MEM p.From.Reg = gc.SSARegNum(v.Args[0]) gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = arm64.REGTMP if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers gc.Warnl(v.Line, "generated nil check") } case ssa.OpVarDef: gc.Gvardef(v.Aux.(*gc.Node)) case ssa.OpVarKill: gc.Gvarkill(v.Aux.(*gc.Node)) case ssa.OpVarLive: gc.Gvarlive(v.Aux.(*gc.Node)) case ssa.OpKeepAlive: if !v.Args[0].Type.IsPtrShaped() { v.Fatalf("keeping non-pointer alive %v", v.Args[0]) } n, off := gc.AutoVar(v.Args[0]) if n == nil { v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0]) } if off != 0 { v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off) } gc.Gvarlive(n) case ssa.OpARM64Equal, ssa.OpARM64NotEqual, ssa.OpARM64LessThan, ssa.OpARM64LessEqual, ssa.OpARM64GreaterThan, ssa.OpARM64GreaterEqual, ssa.OpARM64LessThanU, ssa.OpARM64LessEqualU, ssa.OpARM64GreaterThanU, ssa.OpARM64GreaterEqualU: // generate boolean values using CSET p := gc.Prog(arm64.ACSET) p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Reg = condBits[v.Op] p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) case ssa.OpSelect0, ssa.OpSelect1: // nothing to do case ssa.OpARM64LoweredGetClosurePtr: // Closure pointer is R26 (arm64.REGCTXT). gc.CheckLoweredGetClosurePtr(v) case ssa.OpARM64FlagEQ, ssa.OpARM64FlagLT_ULT, ssa.OpARM64FlagLT_UGT, ssa.OpARM64FlagGT_ULT, ssa.OpARM64FlagGT_UGT: v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) case ssa.OpARM64InvertFlags: v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) default: v.Unimplementedf("genValue not implemented: %s", v.LongString()) } }