func oplook(ctxt *obj.Link, p *obj.Prog) *Optab { if oprange[AOR&obj.AMask].start == nil { buildop(ctxt) } a1 := int(p.Optab) if a1 != 0 { return &optab[a1-1:][0] } a1 = int(p.From.Class) if a1 == 0 { a1 = aclass(ctxt, &p.From) + 1 p.From.Class = int8(a1) } a1-- a3 := int(p.To.Class) if a3 == 0 { a3 = aclass(ctxt, &p.To) + 1 p.To.Class = int8(a3) } a3-- a2 := C_NONE if p.Reg != 0 { a2 = C_REG } //print("oplook %P %d %d %d\n", p, a1, a2, a3); r0 := p.As & obj.AMask o := oprange[r0].start if o == nil { o = oprange[r0].stop /* just generate an error */ } e := oprange[r0].stop c1 := xcmp[a1][:] c3 := xcmp[a3][:] for ; -cap(o) < -cap(e); o = o[1:] { if int(o[0].a2) == a2 { if c1[o[0].a1] != 0 { if c3[o[0].a3] != 0 { p.Optab = uint16((-cap(o) + cap(optab)) + 1) return &o[0] } } } } ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3)) prasm(p) if o == nil { o = optab } return &o[0] }
// as2variant returns the variant (V_*) flags of instruction as. func as2variant(as int) int { initvariants() for i := int(0); i < len(varianttable[as]); i++ { if varianttable[as][i] == as { return i } } gc.Fatalf("as2variant: instruction %v is not a variant of itself", obj.Aconv(as)) return 0 }
/* * shortprop eliminates redundant zero/sign extensions. * * MOVBS x, R * <no use R> * MOVBS R, R' * * changed to * * MOVBS x, R * ... * MOVB R, R' (compiled to mov) * * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU. */ func shortprop(r *gc.Flow) bool { p := (*obj.Prog)(r.Prog) r1 := (*gc.Flow)(findpre(r, &p.From)) if r1 == nil { return false } p1 := (*obj.Prog)(r1.Prog) if p1.As == p.As { // Two consecutive extensions. goto gotit } if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 { // Loaded an immediate. goto gotit } return false gotit: if gc.Debug['P'] != 0 { fmt.Printf("shortprop\n%v\n%v", p1, p) } switch p.As { case arm.AMOVBS, arm.AMOVBU: p.As = arm.AMOVB case arm.AMOVHS, arm.AMOVHU: p.As = arm.AMOVH } if gc.Debug['P'] != 0 { fmt.Printf(" => %v\n", obj.Aconv(int(p.As))) } return true }
// If s==nil, copyu returns the set/use of v in p; otherwise, it // modifies p to replace reads of v with reads of s and returns 0 for // success or non-zero for failure. // // If s==nil, copy returns one of the following values: // 1 if v only used // 2 if v is set and used in one address (read-alter-rewrite; // can't substitute) // 3 if v is only set // 4 if v is set in one address and used in another (so addresses // can be rewritten independently) // 0 otherwise (not touched) func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { if p.From3Type() != obj.TYPE_NONE { // never generates a from3 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) } switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case obj.ANOP, /* read p->from, write p->to */ mips.AMOVV, mips.AMOVF, mips.AMOVD, mips.AMOVH, mips.AMOVHU, mips.AMOVB, mips.AMOVBU, mips.AMOVW, mips.AMOVWU, mips.AMOVFD, mips.AMOVDF, mips.AMOVDW, mips.AMOVWD, mips.AMOVFW, mips.AMOVWF, mips.AMOVDV, mips.AMOVVD, mips.AMOVFV, mips.AMOVVF, mips.ATRUNCFV, mips.ATRUNCDV, mips.ATRUNCFW, mips.ATRUNCDW: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { // Fix up implicit from if p.From.Type == obj.TYPE_NONE { p.From = p.To } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { // p->to only indirectly uses v return 1 } return 0 case mips.ASGT, /* read p->from, read p->reg, write p->to */ mips.ASGTU, mips.AADD, mips.AADDU, mips.ASUB, mips.ASUBU, mips.ASLL, mips.ASRL, mips.ASRA, mips.AOR, mips.ANOR, mips.AAND, mips.AXOR, mips.AADDV, mips.AADDVU, mips.ASUBV, mips.ASUBVU, mips.ASLLV, mips.ASRLV, mips.ASRAV, mips.AADDF, mips.AADDD, mips.ASUBF, mips.ASUBD, mips.AMULF, mips.AMULD, mips.ADIVF, mips.ADIVD: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Reg == 0 { // Fix up implicit reg (e.g., ADD // R3,R4 -> ADD R3,R4,R4) so we can // update reg and to separately. p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case obj.ACHECKNIL, /* read p->from */ mips.ABEQ, /* read p->from, read p->reg */ mips.ABNE, mips.ABGTZ, mips.ABGEZ, mips.ABLTZ, mips.ABLEZ, mips.ACMPEQD, mips.ACMPEQF, mips.ACMPGED, mips.ACMPGEF, mips.ACMPGTD, mips.ACMPGTF, mips.ABFPF, mips.ABFPT, mips.AMUL, mips.AMULU, mips.ADIV, mips.ADIVU, mips.AMULV, mips.AMULVU, mips.ADIVV, mips.ADIVVU: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub1(p, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case mips.AJMP: /* read p->to */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case mips.ARET: /* funny */ if s != nil { return 0 } // All registers die at this point, so claim // everything is set (and not used). return 3 case mips.AJAL: /* funny */ if v.Type == obj.TYPE_REG { // TODO(rsc): REG_R0 and REG_F0 used to be // (when register numbers started at 0) exregoffset and exfregoffset, // which are unset entirely. // It's strange that this handles R0 and F0 differently from the other // registers. Possible failure to optimize? if mips.REG_R0 < v.Reg && v.Reg <= mips.REG_R31 { return 2 } if v.Reg == mips.REGARG { return 2 } if mips.REG_F0 < v.Reg && v.Reg <= mips.REG_F31 { return 2 } } if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R0 is zero, used by DUFFZERO, cannot be substituted. // R1 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == 0 { return 1 } if v.Reg == 1 { return 2 } } return 0 // R1, R2 are ptr to src, dst, used and set, cannot be substituted. // R3 is scratch, set by DUFFCOPY, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == 1 || v.Reg == 2 { return 2 } if v.Reg == 3 { return 3 } } return 0 case obj.ATEXT: /* funny */ if v.Type == obj.TYPE_REG { if v.Reg == mips.REGARG { return 3 } } return 0 case obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
/* * return * 1 if v only used (and substitute), * 2 if read-alter-rewrite * 3 if set * 4 if set and used * 0 otherwise (not touched) */ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case arm.AMOVM: if v.Type != obj.TYPE_REG { return 0 } if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */ if s != nil { if p.From.Offset&(1<<uint(v.Reg)) != 0 { return 1 } if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { if p.Scond&arm.C_WBIT != 0 { return 2 } return 1 } if p.From.Offset&(1<<uint(v.Reg)) != 0 { return 1 /* read/rar, write reglist */ } } else { if s != nil { if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 1 } if copysub(&p.From, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.From, v) { if p.Scond&arm.C_WBIT != 0 { return 2 } if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 4 } return 1 } if p.To.Offset&(1<<uint(v.Reg)) != 0 { return 3 } } return 0 case obj.ANOP, /* read,, write */ arm.ASQRTD, arm.AMOVW, arm.AMOVF, arm.AMOVD, arm.AMOVH, arm.AMOVHS, arm.AMOVHU, arm.AMOVB, arm.AMOVBS, arm.AMOVBU, arm.AMOVFW, arm.AMOVWF, arm.AMOVDW, arm.AMOVWD, arm.AMOVFD, arm.AMOVDF: if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 { if v.Type == obj.TYPE_REG { if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT { if p.From.Reg == v.Reg { return 2 } } else { if p.To.Reg == v.Reg { return 2 } } } } if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Scond != arm.C_SCOND_NONE { return 2 } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm.AMULLU, /* read, read, write, write */ arm.AMULL, arm.AMULA, arm.AMVN: return 2 case arm.AADD, /* read, read, write */ arm.AADC, arm.ASUB, arm.ASBC, arm.ARSB, arm.ASLL, arm.ASRL, arm.ASRA, arm.AORR, arm.AAND, arm.AEOR, arm.AMUL, arm.AMULU, arm.ADIV, arm.ADIVU, arm.AMOD, arm.AMODU, arm.AADDF, arm.AADDD, arm.ASUBF, arm.ASUBD, arm.AMULF, arm.AMULD, arm.ADIVF, arm.ADIVD, obj.ACHECKNIL, /* read */ arm.ACMPF, /* read, read, */ arm.ACMPD, arm.ACMP, arm.ACMN, arm.ATST: /* read,, */ if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Scond != arm.C_SCOND_NONE { return 2 } if p.Reg == 0 { p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm.ABEQ, /* read, read */ arm.ABNE, arm.ABCS, arm.ABHS, arm.ABCC, arm.ABLO, arm.ABMI, arm.ABPL, arm.ABVS, arm.ABVC, arm.ABHI, arm.ABLS, arm.ABGE, arm.ABLT, arm.ABGT, arm.ABLE: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub1(p, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case arm.AB: /* funny */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case obj.ARET: /* funny */ if s != nil { return 1 } return 3 case arm.ABL: /* funny */ if v.Type == obj.TYPE_REG { // TODO(rsc): REG_R0 and REG_F0 used to be // (when register numbers started at 0) exregoffset and exfregoffset, // which are unset entirely. // It's strange that this handles R0 and F0 differently from the other // registers. Possible failure to optimize? if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT { return 2 } if v.Reg == arm.REGARG { return 2 } if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT { return 2 } } if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R0 is zero, used by DUFFZERO, cannot be substituted. // R1 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == arm.REG_R0 { return 1 } if v.Reg == arm.REG_R0+1 { return 2 } } return 0 // R0 is scratch, set by DUFFCOPY, cannot be substituted. // R1, R2 areptr to src, dst, used and set, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == arm.REG_R0 { return 3 } if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 { return 2 } } return 0 case obj.ATEXT: /* funny */ if v.Type == obj.TYPE_REG { if v.Reg == arm.REGARG { return 3 } } return 0 case obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
func peep(firstp *obj.Prog) { g := (*gc.Graph)(gc.Flowstart(firstp, nil)) if g == nil { return } gactive = 0 var p *obj.Prog var r *gc.Flow var t int loop1: if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { gc.Dumpit("loop1", g.Start, 0) } t = 0 for r = g.Start; r != nil; r = r.Link { p = r.Prog // TODO(minux) Handle smaller moves. arm and amd64 // distinguish between moves that *must* sign/zero // extend and moves that don't care so they // can eliminate moves that don't care without // breaking moves that do care. This might let us // simplify or remove the next peep loop, too. if p.As == arm64.AMOVD || p.As == arm64.AFMOVD { if regtyp(&p.To) { // Try to eliminate reg->reg moves if regtyp(&p.From) { if p.From.Type == p.To.Type { if copyprop(r) { excise(r) t++ } else if subprop(r) && copyprop(r) { excise(r) t++ } } } } } } if t != 0 { goto loop1 } /* * look for MOVB x,R; MOVB R,R (for small MOVs not handled above) */ var p1 *obj.Prog var r1 *gc.Flow for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { default: continue case arm64.AMOVH, arm64.AMOVHU, arm64.AMOVB, arm64.AMOVBU, arm64.AMOVW, arm64.AMOVWU: if p.To.Type != obj.TYPE_REG { continue } } r1 = r.Link if r1 == nil { continue } p1 = r1.Prog if p1.As != p.As { continue } if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { continue } if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg { continue } excise(r1) } if gc.Debug['D'] > 1 { goto ret /* allow following code improvement to be suppressed */ } // MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R for r := (*gc.Flow)(g.Start); r != nil; r = r.Link { p = r.Prog switch p.As { default: continue case arm64.AMOVD: if p.To.Type != obj.TYPE_REG { continue } if p.From.Type != obj.TYPE_CONST { continue } if p.From.Offset < 0 || 4096 <= p.From.Offset { continue } } r1 = r.Link if r1 == nil { continue } p1 = r1.Prog if p1.As != arm64.AADD && p1.As != arm64.ASUB { // TODO(aram): also logical after we have bimm. continue } if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg { continue } if p1.To.Type != obj.TYPE_REG { continue } if gc.Debug['P'] != 0 { fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1) } p1.From.Type = obj.TYPE_CONST p1.From = p.From excise(r) } /* TODO(minux): * look for OP x,y,R; CMP R, $0 -> OP.S x,y,R * when OP can set condition codes correctly */ ret: gc.Flowend(g) }
// If s==nil, copyu returns the set/use of v in p; otherwise, it // modifies p to replace reads of v with reads of s and returns 0 for // success or non-zero for failure. // // If s==nil, copy returns one of the following values: // 1 if v only used // 2 if v is set and used in one address (read-alter-rewrite; // can't substitute) // 3 if v is only set // 4 if v is set in one address and used in another (so addresses // can be rewritten independently) // 0 otherwise (not touched) func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { if p.From3Type() != obj.TYPE_NONE { // 7g never generates a from3 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) } if p.RegTo2 != obj.REG_NONE { // 7g never generates a to2 fmt.Printf("copyu: RegTo2 (%v) not implemented\n", obj.Rconv(int(p.RegTo2))) } switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case obj.ANOP, /* read p->from, write p->to */ arm64.ANEG, arm64.AFNEGD, arm64.AFNEGS, arm64.AFSQRTD, arm64.AFCVTZSD, arm64.AFCVTZSS, arm64.AFCVTZSDW, arm64.AFCVTZSSW, arm64.AFCVTZUD, arm64.AFCVTZUS, arm64.AFCVTZUDW, arm64.AFCVTZUSW, arm64.AFCVTSD, arm64.AFCVTDS, arm64.ASCVTFD, arm64.ASCVTFS, arm64.ASCVTFWD, arm64.ASCVTFWS, arm64.AUCVTFD, arm64.AUCVTFS, arm64.AUCVTFWD, arm64.AUCVTFWS, arm64.AMOVB, arm64.AMOVBU, arm64.AMOVH, arm64.AMOVHU, arm64.AMOVW, arm64.AMOVWU, arm64.AMOVD, arm64.AFMOVS, arm64.AFMOVD: if p.Scond == 0 { if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { // Fix up implicit from if p.From.Type == obj.TYPE_NONE { p.From = p.To } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { // p->to only indirectly uses v return 1 } return 0 } /* rar p->from, write p->to or read p->from, rar p->to */ if p.From.Type == obj.TYPE_MEM { if copyas(&p.From, v) { // No s!=nil check; need to fail // anyway in that case return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyas(&p.To, v) { return 3 } } else if p.To.Type == obj.TYPE_MEM { if copyas(&p.To, v) { return 2 } if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.From, v) { return 1 } } else { fmt.Printf("copyu: bad %v\n", p) } return 0 case arm64.AADD, /* read p->from, read p->reg, write p->to */ arm64.ASUB, arm64.AAND, arm64.AORR, arm64.AEOR, arm64.AMUL, arm64.ASMULL, arm64.AUMULL, arm64.ASMULH, arm64.AUMULH, arm64.ASDIV, arm64.AUDIV, arm64.ALSL, arm64.ALSR, arm64.AASR, arm64.AFADDD, arm64.AFADDS, arm64.AFSUBD, arm64.AFSUBS, arm64.AFMULD, arm64.AFMULS, arm64.AFDIVD, arm64.AFDIVS: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Reg == 0 { // Fix up implicit reg (e.g., ADD // R3,R4 -> ADD R3,R4,R4) so we can // update reg and to separately. p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case arm64.ABEQ, arm64.ABNE, arm64.ABGE, arm64.ABLT, arm64.ABGT, arm64.ABLE, arm64.ABLO, arm64.ABLS, arm64.ABHI, arm64.ABHS: return 0 case obj.ACHECKNIL, /* read p->from */ arm64.ACMP, /* read p->from, read p->reg */ arm64.AFCMPD, arm64.AFCMPS: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub1(p, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } return 0 case arm64.AB: /* read p->to */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case obj.ARET: /* funny */ if s != nil { return 0 } // All registers die at this point, so claim // everything is set (and not used). return 3 case arm64.ABL: /* funny */ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R31 is zero, used by DUFFZERO, cannot be substituted. // R16 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == 31 { return 1 } if v.Reg == 16 { return 2 } } return 0 // R16, R17 are ptr to src, dst, used and set, cannot be substituted. // R27 is scratch, set by DUFFCOPY, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == 16 || v.Reg == 17 { return 2 } if v.Reg == 27 { return 3 } } return 0 case arm64.AHINT, obj.ATEXT, obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil { ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0) } ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text autoffset := int32(p.To.Offset) if autoffset < 0 { autoffset = 0 } var bpsize int if p.Mode == 64 && obj.Framepointer_enabled != 0 && autoffset > 0 { // Make room for to save a base pointer. If autoffset == 0, // this might do something special like a tail jump to // another function, so in that case we omit this. bpsize = ctxt.Arch.Ptrsize autoffset += int32(bpsize) p.To.Offset += int64(bpsize) } else { bpsize = 0 } textarg := int64(p.To.Val.(int32)) cursym.Args = int32(textarg) cursym.Locals = int32(p.To.Offset) // TODO(rsc): Remove. if p.Mode == 32 && cursym.Locals < 0 { cursym.Locals = 0 } // TODO(rsc): Remove 'p.Mode == 64 &&'. if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 { for q := p; q != nil; q = q.Link { if q.As == obj.ACALL { goto noleaf } if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 { goto noleaf } } p.From3.Offset |= obj.NOSPLIT noleaf: } if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 { p = obj.Appendp(ctxt, p) p = load_g_cx(ctxt, p) // load g into CX } if cursym.Text.From3Offset()&obj.NOSPLIT == 0 { p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check } if autoffset != 0 { if autoffset%int32(ctxt.Arch.Regsize) != 0 { ctxt.Diag("unaligned stack size %d", autoffset) } p = obj.Appendp(ctxt, p) p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autoffset) p.Spadj = autoffset } else { // zero-byte stack adjustment. // Insert a fake non-zero adjustment so that stkcheck can // recognize the end of the stack-splitting prolog. p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(-ctxt.Arch.Ptrsize) p = obj.Appendp(ctxt, p) p.As = obj.ANOP p.Spadj = int32(ctxt.Arch.Ptrsize) } deltasp := autoffset if bpsize > 0 { // Save caller's BP p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_SP p.To.Scale = 1 p.To.Offset = int64(autoffset) - int64(bpsize) // Move current frame to BP p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP } if cursym.Text.From3Offset()&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVQ g_panic(CX), BX // TESTQ BX, BX // JEQ end // LEAQ (autoffset+8)(SP), DI // CMPQ panic_argp(BX), DI // JNE end // MOVQ SP, panic_argp(BX) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_CX p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_CX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = ATESTQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_BX p.To.Type = obj.TYPE_REG p.To.Reg = REG_BX if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ATESTL } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH p1 := p p = obj.Appendp(ctxt, p) p.As = ALEAQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = int64(autoffset) + int64(ctxt.Arch.Regsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { p.As = ALEAL } p = obj.Appendp(ctxt, p) p.As = ACMPQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_BX p.From.Offset = 0 // Panic.argp p.To.Type = obj.TYPE_REG p.To.Reg = REG_DI if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = ACMPL p.From.Type = obj.TYPE_MEM p.From.Reg = REG_R15 p.From.Scale = 1 p.From.Index = REG_BX } if p.Mode == 32 { p.As = ACMPL } p = obj.Appendp(ctxt, p) p.As = AJNE p.To.Type = obj.TYPE_BRANCH p2 := p p = obj.Appendp(ctxt, p) p.As = AMOVQ p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP p.To.Type = obj.TYPE_MEM p.To.Reg = REG_BX p.To.Offset = 0 // Panic.argp if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { p.As = AMOVL p.To.Type = obj.TYPE_MEM p.To.Reg = REG_R15 p.To.Scale = 1 p.To.Index = REG_BX } if p.Mode == 32 { p.As = AMOVL } p = obj.Appendp(ctxt, p) p.As = obj.ANOP p1.Pcond = p p2.Pcond = p } var a int var pcsize int for ; p != nil; p = p.Link { pcsize = int(p.Mode) / 8 a = int(p.From.Name) if a == obj.NAME_AUTO { p.From.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From.Offset += int64(deltasp) + int64(pcsize) } if p.From3 != nil { a = int(p.From3.Name) if a == obj.NAME_AUTO { p.From3.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.From3.Offset += int64(deltasp) + int64(pcsize) } } a = int(p.To.Name) if a == obj.NAME_AUTO { p.To.Offset += int64(deltasp) - int64(bpsize) } if a == obj.NAME_PARAM { p.To.Offset += int64(deltasp) + int64(pcsize) } switch p.As { default: continue case APUSHL, APUSHFL: deltasp += 4 p.Spadj = 4 continue case APUSHQ, APUSHFQ: deltasp += 8 p.Spadj = 8 continue case APUSHW, APUSHFW: deltasp += 2 p.Spadj = 2 continue case APOPL, APOPFL: deltasp -= 4 p.Spadj = -4 continue case APOPQ, APOPFQ: deltasp -= 8 p.Spadj = -8 continue case APOPW, APOPFW: deltasp -= 2 p.Spadj = -2 continue case obj.ARET: break } if autoffset != deltasp { ctxt.Diag("unbalanced PUSH/POP") } if autoffset != 0 { if bpsize > 0 { // Restore caller's BP p.As = AMOVQ p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Scale = 1 p.From.Offset = int64(autoffset) - int64(bpsize) p.To.Type = obj.TYPE_REG p.To.Reg = REG_BP p = obj.Appendp(ctxt, p) } p.As = AADJSP p.From.Type = obj.TYPE_CONST p.From.Offset = int64(-autoffset) p.Spadj = -autoffset p = obj.Appendp(ctxt, p) p.As = obj.ARET // If there are instructions following // this ARET, they come from a branch // with the same stackframe, so undo // the cleanup. p.Spadj = +autoffset } if p.To.Sym != nil { // retjmp p.As = obj.AJMP } } } func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { a.Type = obj.TYPE_MEM a.Reg = REG_R15 a.Index = REG_CX a.Scale = 1 return } a.Type = obj.TYPE_MEM a.Reg = REG_CX } // Append code to p to load g into cx. // Overwrites p with the first instruction (no first appendp). // Overwriting p is unusual but it lets use this in both the // prologue (caller must call appendp first) and in the epilogue. // Returns last new instruction. func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog { p.As = AMOVQ if ctxt.Arch.Ptrsize == 4 { p.As = AMOVL } p.From.Type = obj.TYPE_MEM p.From.Reg = REG_TLS p.From.Offset = 0 p.To.Type = obj.TYPE_REG p.To.Reg = REG_CX next := p.Link progedit(ctxt, p) for p.Link != next { p = p.Link } if p.From.Index == REG_TLS { p.From.Scale = 2 } return p } // Append code to p to check for stack split. // Appends to (does not overwrite) p. // Assumes g is in CX. // Returns last new instruction. func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog { cmp := ACMPQ lea := ALEAQ mov := AMOVQ sub := ASUBQ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { cmp = ACMPL lea = ALEAL mov = AMOVL sub = ASUBL } var q1 *obj.Prog if framesize <= obj.StackSmall { // small stack: SP <= stackguard // CMPQ SP, stackguard p = obj.Appendp(ctxt, p) p.As = int16(cmp) p.From.Type = obj.TYPE_REG p.From.Reg = REG_SP indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else if framesize <= obj.StackBig { // large stack: SP-framesize <= stackguard-StackSmall // LEAQ -xxx(SP), AX // CMPQ AX, stackguard p = obj.Appendp(ctxt, p) p.As = int16(lea) p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = -(int64(framesize) - obj.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = int16(cmp) p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX indir_cx(ctxt, p, &p.To) p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } } else { // Such a large stack we need to protect against wraparound. // If SP is close to zero: // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) // The +StackGuard on both sides is required to keep the left side positive: // SP is allowed to be slightly below stackguard. See stack.h. // // Preemption sets stackguard to StackPreempt, a very large value. // That breaks the math above, so we have to check for that explicitly. // MOVQ stackguard, CX // CMPQ CX, $StackPreempt // JEQ label-of-call-to-morestack // LEAQ StackGuard(SP), AX // SUBQ CX, AX // CMPQ AX, $(framesize+(StackGuard-StackSmall)) p = obj.Appendp(ctxt, p) p.As = int16(mov) indir_cx(ctxt, p, &p.From) p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0 if ctxt.Cursym.Cfunc != 0 { p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1 } p.To.Type = obj.TYPE_REG p.To.Reg = REG_SI p = obj.Appendp(ctxt, p) p.As = int16(cmp) p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_CONST p.To.Offset = obj.StackPreempt if p.Mode == 32 { p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) } p = obj.Appendp(ctxt, p) p.As = AJEQ p.To.Type = obj.TYPE_BRANCH q1 = p p = obj.Appendp(ctxt, p) p.As = int16(lea) p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP p.From.Offset = obj.StackGuard p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = int16(sub) p.From.Type = obj.TYPE_REG p.From.Reg = REG_SI p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX p = obj.Appendp(ctxt, p) p.As = int16(cmp) p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX p.To.Type = obj.TYPE_CONST p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) } // common jls := obj.Appendp(ctxt, p) jls.As = AJLS jls.To.Type = obj.TYPE_BRANCH var last *obj.Prog for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { } spfix := obj.Appendp(ctxt, last) spfix.As = obj.ANOP spfix.Spadj = -framesize call := obj.Appendp(ctxt, spfix) call.Lineno = ctxt.Cursym.Text.Lineno call.Mode = ctxt.Cursym.Text.Mode call.As = obj.ACALL call.To.Type = obj.TYPE_BRANCH morestack := "runtime.morestack" switch { case ctxt.Cursym.Cfunc != 0: morestack = "runtime.morestackc" case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0: morestack = "runtime.morestack_noctxt" } call.To.Sym = obj.Linklookup(ctxt, morestack, 0) jmp := obj.Appendp(ctxt, call) jmp.As = obj.AJMP jmp.To.Type = obj.TYPE_BRANCH jmp.Pcond = ctxt.Cursym.Text.Link jmp.Spadj = +framesize jls.Pcond = call if q1 != nil { q1.Pcond = call } return jls } func follow(ctxt *obj.Link, s *obj.LSym) { ctxt.Cursym = s firstp := ctxt.NewProg() lastp := firstp xfol(ctxt, s.Text, &lastp) lastp.Link = nil s.Text = firstp.Link } func nofollow(a int) bool { switch a { case obj.AJMP, obj.ARET, AIRETL, AIRETQ, AIRETW, ARETFL, ARETFQ, ARETFW, obj.AUNDEF: return true } return false } func pushpop(a int) bool { switch a { case APUSHL, APUSHFL, APUSHQ, APUSHFQ, APUSHW, APUSHFW, APOPL, APOPFL, APOPQ, APOPFQ, APOPW, APOPFW: return true } return false } func relinv(a int16) int16 { switch a { case AJEQ: return AJNE case AJNE: return AJEQ case AJLE: return AJGT case AJLS: return AJHI case AJLT: return AJGE case AJMI: return AJPL case AJGE: return AJLT case AJPL: return AJMI case AJGT: return AJLE case AJHI: return AJLS case AJCS: return AJCC case AJCC: return AJCS case AJPS: return AJPC case AJPC: return AJPS case AJOS: return AJOC case AJOC: return AJOS } log.Fatalf("unknown relation: %s", obj.Aconv(int(a))) return 0 } func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { var q *obj.Prog var i int var a int loop: if p == nil { return } if p.As == obj.AJMP { q = p.Pcond if q != nil && q.As != obj.ATEXT { /* mark instruction as done and continue layout at target of jump */ p.Mark = 1 p = q if p.Mark == 0 { goto loop } } } if p.Mark != 0 { /* * p goes here, but already used it elsewhere. * copy up to 4 instructions or else branch to other copy. */ i = 0 q = p for ; i < 4; i, q = i+1, q.Link { if q == nil { break } if q == *last { break } a = int(q.As) if a == obj.ANOP { i-- continue } if nofollow(a) || pushpop(a) { break // NOTE(rsc): arm does goto copy } if q.Pcond == nil || q.Pcond.Mark != 0 { continue } if a == obj.ACALL || a == ALOOP { continue } for { if p.As == obj.ANOP { p = p.Link continue } q = obj.Copyp(ctxt, p) p = p.Link q.Mark = 1 (*last).Link = q *last = q if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 { continue } q.As = relinv(q.As) p = q.Pcond q.Pcond = q.Link q.Link = p xfol(ctxt, q.Link, last) p = q.Link if p.Mark != 0 { return } goto loop /* */ } } q = ctxt.NewProg() q.As = obj.AJMP q.Lineno = p.Lineno q.To.Type = obj.TYPE_BRANCH q.To.Offset = p.Pc q.Pcond = p p = q } /* emit p */ p.Mark = 1 (*last).Link = p *last = p a = int(p.As) /* continue loop with what comes after p */ if nofollow(a) { return } if p.Pcond != nil && a != obj.ACALL { /* * some kind of conditional branch. * recurse to follow one path. * continue loop on the other. */ q = obj.Brchain(ctxt, p.Pcond) if q != nil { p.Pcond = q } q = obj.Brchain(ctxt, p.Link) if q != nil { p.Link = q } if p.From.Type == obj.TYPE_CONST { if p.From.Offset == 1 { /* * expect conditional jump to be taken. * rewrite so that's the fall-through case. */ p.As = relinv(int16(a)) q = p.Link p.Link = p.Pcond p.Pcond = q } } else { q = p.Link if q.Mark != 0 { if a != ALOOP { p.As = relinv(int16(a)) p.Link = p.Pcond p.Pcond = q } } } xfol(ctxt, p.Link, last) if p.Pcond.Mark != 0 { return } p = p.Pcond goto loop } p = p.Link goto loop } var unaryDst = map[int]bool{ ABSWAPL: true, ABSWAPQ: true, ACMPXCHG8B: true, ADECB: true, ADECL: true, ADECQ: true, ADECW: true, AINCB: true, AINCL: true, AINCQ: true, AINCW: true, ANEGB: true, ANEGL: true, ANEGQ: true, ANEGW: true, ANOTB: true, ANOTL: true, ANOTQ: true, ANOTW: true, APOPL: true, APOPQ: true, APOPW: true, ASETCC: true, ASETCS: true, ASETEQ: true, ASETGE: true, ASETGT: true, ASETHI: true, ASETLE: true, ASETLS: true, ASETLT: true, ASETMI: true, ASETNE: true, ASETOC: true, ASETOS: true, ASETPC: true, ASETPL: true, ASETPS: true, AFFREE: true, AFLDENV: true, AFSAVE: true, AFSTCW: true, AFSTENV: true, AFSTSW: true, AFXSAVE: true, AFXSAVE64: true, ASTMXCSR: true, } var Linkamd64 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 8, Regsize: 8, } var Linkamd64p32 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "amd64p32", Thechar: '6', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 8, } var Link386 = obj.LinkArch{ ByteOrder: binary.LittleEndian, Name: "386", Thechar: '8', Preprocess: preprocess, Assemble: span6, Follow: follow, Progedit: progedit, UnaryDst: unaryDst, Minlc: 1, Ptrsize: 4, Regsize: 4, }
// getRegister checks that addr represents a register and returns its value. func (p *Parser) getRegister(prog *obj.Prog, op int, addr *obj.Addr) int16 { if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 { p.errorf("%s: expected register; found %s", obj.Aconv(op), obj.Dconv(prog, addr)) } return addr.Reg }
// getImmediate checks that addr represents an immediate constant and returns its value. func (p *Parser) getImmediate(prog *obj.Prog, op int, addr *obj.Addr) int64 { if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 { p.errorf("%s: expected immediate constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr)) } return addr.Offset }
// asmInstruction assembles an instruction. // MOVW R9, (R10) func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) { // fmt.Printf("%s %+v\n", obj.Aconv(op), a) prog := &obj.Prog{ Ctxt: p.ctxt, Lineno: p.histLineNum, As: int16(op), } switch len(a) { case 0: // Nothing to do. case 1: if p.arch.UnaryDst[op] { // prog.From is no address. prog.To = a[0] } else { prog.From = a[0] // prog.To is no address. } if p.arch.Thechar == '9' && arch.IsPPC64NEG(op) { // NEG: From and To are both a[0]. prog.To = a[0] prog.From = a[0] break } case 2: if p.arch.Thechar == '5' { if arch.IsARMCMP(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break } // Strange special cases. if arch.IsARMSTREX(op) { /* STREX x, (y) from=(y) reg=x to=x STREX (x), y from=(x) reg=y to=y */ if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG { prog.From = a[1] prog.Reg = a[0].Reg prog.To = a[0] break } else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG { prog.From = a[0] prog.Reg = a[1].Reg prog.To = a[1] break } p.errorf("unrecognized addressing for %s", obj.Aconv(op)) return } if arch.IsARMFloatCmp(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break } } else if p.arch.Thechar == '7' && arch.IsARM64CMP(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break } else if p.arch.Thechar == '0' { if arch.IsMIPS64CMP(op) || arch.IsMIPS64MUL(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break } } prog.From = a[0] prog.To = a[1] case 3: switch p.arch.Thechar { case '0': prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] case '5': // Special cases. if arch.IsARMSTREX(op) { /* STREX x, (y), z from=(y) reg=x to=z */ prog.From = a[1] prog.Reg = p.getRegister(prog, op, &a[0]) prog.To = a[2] break } // Otherwise the 2nd operand (a[1]) must be a register. prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] case '7': // ARM64 instructions with one input and two outputs. if arch.IsARM64STLXR(op) { prog.From = a[0] prog.To = a[1] if a[2].Type != obj.TYPE_REG { p.errorf("invalid addressing modes for third operand to %s instruction, must be register", obj.Aconv(op)) return } prog.RegTo2 = a[2].Reg break } prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] case '6', '8': prog.From = a[0] prog.From3 = newAddr(a[1]) prog.To = a[2] case '9': if arch.IsPPC64CMP(op) { // CMPW etc.; third argument is a CR register that goes into prog.Reg. prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[2]) prog.To = a[1] break } // Arithmetic. Choices are: // reg reg reg // imm reg reg // reg imm reg // If the immediate is the middle argument, use From3. switch a[1].Type { case obj.TYPE_REG: prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] case obj.TYPE_CONST: prog.From = a[0] prog.From3 = newAddr(a[1]) prog.To = a[2] default: p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op)) return } default: p.errorf("TODO: implement three-operand instructions for this architecture") return } case 4: if p.arch.Thechar == '5' && arch.IsARMMULA(op) { // All must be registers. p.getRegister(prog, op, &a[0]) r1 := p.getRegister(prog, op, &a[1]) p.getRegister(prog, op, &a[2]) r3 := p.getRegister(prog, op, &a[3]) prog.From = a[0] prog.To = a[2] prog.To.Type = obj.TYPE_REGREG2 prog.To.Offset = int64(r3) prog.Reg = r1 break } if p.arch.Thechar == '7' { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.From3 = newAddr(a[2]) prog.To = a[3] break } if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) { // 2nd operand must always be a register. // TODO: Do we need to guard this with the instruction type? // That is, are there 4-operand instructions without this property? prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.From3 = newAddr(a[2]) prog.To = a[3] break } p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op)) return case 5: if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) { // Always reg, reg, con, con, reg. (con, con is a 'mask'). prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) mask1 := p.getConstant(prog, op, &a[2]) mask2 := p.getConstant(prog, op, &a[3]) var mask uint32 if mask1 < mask2 { mask = (^uint32(0) >> uint(mask1)) & (^uint32(0) << uint(31-mask2)) } else { mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1))) } prog.From3 = &obj.Addr{ Type: obj.TYPE_CONST, Offset: int64(mask), } prog.To = a[4] break } p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op)) return case 6: if p.arch.Thechar == '5' && arch.IsARMMRC(op) { // Strange special case: MCR, MRC. prog.To.Type = obj.TYPE_CONST x0 := p.getConstant(prog, op, &a[0]) x1 := p.getConstant(prog, op, &a[1]) x2 := int64(p.getRegister(prog, op, &a[2])) x3 := int64(p.getRegister(prog, op, &a[3])) x4 := int64(p.getRegister(prog, op, &a[4])) x5 := p.getConstant(prog, op, &a[5]) // Cond is handled specially for this instruction. offset, MRC, ok := arch.ARMMRCOffset(op, cond, x0, x1, x2, x3, x4, x5) if !ok { p.errorf("unrecognized condition code .%q", cond) } prog.To.Offset = offset cond = "" prog.As = MRC // Both instructions are coded as MRC. break } fallthrough default: p.errorf("can't handle %s instruction with %d operands", obj.Aconv(op), len(a)) return } p.append(prog, cond, true) }
// asmJump assembles a jump instruction. // JMP R1 // JMP exit // JMP 3(PC) func (p *Parser) asmJump(op int, cond string, a []obj.Addr) { var target *obj.Addr prog := &obj.Prog{ Ctxt: p.ctxt, Lineno: p.histLineNum, As: int16(op), } switch len(a) { case 1: target = &a[0] case 2: // Special 2-operand jumps. target = &a[1] prog.From = a[0] case 3: if p.arch.Thechar == '9' { // Special 3-operand jumps. // First two must be constants; a[1] is a register number. target = &a[2] prog.From = obj.Addr{ Type: obj.TYPE_CONST, Offset: p.getConstant(prog, op, &a[0]), } reg := int16(p.getConstant(prog, op, &a[1])) reg, ok := p.arch.RegisterNumber("R", int16(reg)) if !ok { p.errorf("bad register number %d", reg) return } prog.Reg = reg break } if p.arch.Thechar == '0' { // 3-operand jumps. // First two must be registers target = &a[2] prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break } fallthrough default: p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op)) return } switch { case target.Type == obj.TYPE_BRANCH: // JMP 4(PC) prog.To = obj.Addr{ Type: obj.TYPE_BRANCH, Offset: p.pc + 1 + target.Offset, // +1 because p.pc is incremented in append, below. } case target.Type == obj.TYPE_REG: // JMP R1 prog.To = *target case target.Type == obj.TYPE_MEM && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC): // JMP main·morestack(SB) prog.To = *target case target.Type == obj.TYPE_INDIR && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC): // JMP *main·morestack(SB) prog.To = *target prog.To.Type = obj.TYPE_INDIR case target.Type == obj.TYPE_MEM && target.Reg == 0 && target.Offset == 0: // JMP exit if target.Sym == nil { // Parse error left name unset. return } targetProg := p.labels[target.Sym.Name] if targetProg == nil { p.toPatch = append(p.toPatch, Patch{prog, target.Sym.Name}) } else { p.branch(prog, targetProg) } case target.Type == obj.TYPE_MEM && target.Name == obj.NAME_NONE: // JMP 4(R0) prog.To = *target // On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same. if p.arch.Thechar == '9' && target.Offset == 0 { prog.To.Type = obj.TYPE_REG } case target.Type == obj.TYPE_CONST: // JMP $4 prog.To = a[0] default: p.errorf("cannot assemble jump %+v", target) return } p.append(prog, cond, true) }
func buildop(ctxt *obj.Link) { var n int for i := 0; i < C_NCLASS; i++ { for n = 0; n < C_NCLASS; n++ { if cmp(n, i) { xcmp[i][n] = 1 } } } for n = 0; optab[n].as != obj.AXXX; n++ { } sort.Sort(ocmp(optab[:n])) for i := 0; i < n; i++ { r := optab[i].as r0 := r & obj.AMask oprange[r0].start = optab[i:] for optab[i].as == r { i++ } oprange[r0].stop = optab[i:] i-- switch r { default: ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r))) log.Fatalf("bad code") case AABSF: opset(AMOVFD, r0) opset(AMOVDF, r0) opset(AMOVWF, r0) opset(AMOVFW, r0) opset(AMOVWD, r0) opset(AMOVDW, r0) opset(ANEGF, r0) opset(ANEGD, r0) opset(AABSD, r0) opset(ATRUNCDW, r0) opset(ATRUNCFW, r0) opset(ATRUNCDV, r0) opset(ATRUNCFV, r0) opset(AMOVVF, r0) opset(AMOVFV, r0) opset(AMOVVD, r0) opset(AMOVDV, r0) case AADD: opset(ASGT, r0) opset(ASGTU, r0) opset(AADDU, r0) opset(AADDV, r0) opset(AADDVU, r0) case AADDF: opset(ADIVF, r0) opset(ADIVD, r0) opset(AMULF, r0) opset(AMULD, r0) opset(ASUBF, r0) opset(ASUBD, r0) opset(AADDD, r0) case AAND: opset(AOR, r0) opset(AXOR, r0) case ABEQ: opset(ABNE, r0) case ABLEZ: opset(ABGEZ, r0) opset(ABGEZAL, r0) opset(ABLTZ, r0) opset(ABLTZAL, r0) opset(ABGTZ, r0) case AMOVB: opset(AMOVH, r0) case AMOVBU: opset(AMOVHU, r0) case AMUL: opset(AREM, r0) opset(AREMU, r0) opset(ADIVU, r0) opset(AMULU, r0) opset(ADIV, r0) opset(ADIVV, r0) opset(ADIVVU, r0) opset(AMULV, r0) opset(AMULVU, r0) opset(AREMV, r0) opset(AREMVU, r0) case ASLL: opset(ASRL, r0) opset(ASRA, r0) opset(ASLLV, r0) opset(ASRAV, r0) opset(ASRLV, r0) case ASUB: opset(ASUBU, r0) opset(ASUBV, r0) opset(ASUBVU, r0) opset(ANOR, r0) case ASYSCALL: opset(ATLBP, r0) opset(ATLBR, r0) opset(ATLBWI, r0) opset(ATLBWR, r0) case ACMPEQF: opset(ACMPGTF, r0) opset(ACMPGTD, r0) opset(ACMPGEF, r0) opset(ACMPGED, r0) opset(ACMPEQD, r0) case ABFPT: opset(ABFPF, r0) case AMOVWL: opset(AMOVWR, r0) opset(AMOVVR, r0) opset(AMOVVL, r0) case AMOVW, AMOVD, AMOVF, AMOVV, ABREAK, ARFE, AJAL, AJMP, AMOVWU, AWORD, obj.ANOP, obj.ATEXT, obj.AUNDEF, obj.AUSEFIELD, obj.AFUNCDATA, obj.APCDATA, obj.ADUFFZERO, obj.ADUFFCOPY: break } } }
func opirr(ctxt *obj.Link, a int) uint32 { switch a { case AADD: return SP(1, 0) case AADDU: return SP(1, 1) case ASGT: return SP(1, 2) case ASGTU: return SP(1, 3) case AAND: return SP(1, 4) case AOR: return SP(1, 5) case AXOR: return SP(1, 6) case ALAST: return SP(1, 7) /* lui */ case ASLL: return OP(0, 0) case ASRL: return OP(0, 2) case ASRA: return OP(0, 3) case AADDV: return SP(3, 0) case AADDVU: return SP(3, 1) case AJMP: return SP(0, 2) case AJAL, obj.ADUFFZERO, obj.ADUFFCOPY: return SP(0, 3) case ABEQ: return SP(0, 4) case ABEQ + ALAST: return SP(2, 4) /* likely */ case ABNE: return SP(0, 5) case ABNE + ALAST: return SP(2, 5) /* likely */ case ABGEZ: return SP(0, 1) | BCOND(0, 1) case ABGEZ + ALAST: return SP(0, 1) | BCOND(0, 3) /* likely */ case ABGEZAL: return SP(0, 1) | BCOND(2, 1) case ABGEZAL + ALAST: return SP(0, 1) | BCOND(2, 3) /* likely */ case ABGTZ: return SP(0, 7) case ABGTZ + ALAST: return SP(2, 7) /* likely */ case ABLEZ: return SP(0, 6) case ABLEZ + ALAST: return SP(2, 6) /* likely */ case ABLTZ: return SP(0, 1) | BCOND(0, 0) case ABLTZ + ALAST: return SP(0, 1) | BCOND(0, 2) /* likely */ case ABLTZAL: return SP(0, 1) | BCOND(2, 0) case ABLTZAL + ALAST: return SP(0, 1) | BCOND(2, 2) /* likely */ case ABFPT: return SP(2, 1) | (257 << 16) case ABFPT + ALAST: return SP(2, 1) | (259 << 16) /* likely */ case ABFPF: return SP(2, 1) | (256 << 16) case ABFPF + ALAST: return SP(2, 1) | (258 << 16) /* likely */ case AMOVB, AMOVBU: return SP(5, 0) case AMOVH, AMOVHU: return SP(5, 1) case AMOVW, AMOVWU: return SP(5, 3) case AMOVV: return SP(7, 7) case AMOVF: return SP(7, 1) case AMOVD: return SP(7, 5) case AMOVWL: return SP(5, 2) case AMOVWR: return SP(5, 6) case AMOVVL: return SP(5, 4) case AMOVVR: return SP(5, 5) case ABREAK: return SP(5, 7) case AMOVWL + ALAST: return SP(4, 2) case AMOVWR + ALAST: return SP(4, 6) case AMOVVL + ALAST: return SP(3, 2) case AMOVVR + ALAST: return SP(3, 3) case AMOVB + ALAST: return SP(4, 0) case AMOVBU + ALAST: return SP(4, 4) case AMOVH + ALAST: return SP(4, 1) case AMOVHU + ALAST: return SP(4, 5) case AMOVW + ALAST: return SP(4, 3) case AMOVWU + ALAST: return SP(4, 7) case AMOVV + ALAST: return SP(6, 7) case AMOVF + ALAST: return SP(6, 1) case AMOVD + ALAST: return SP(6, 5) case ASLLV: return OP(7, 0) case ASRLV: return OP(7, 2) case ASRAV: return OP(7, 3) case ASLLV + ALAST: return OP(7, 4) case ASRLV + ALAST: return OP(7, 6) case ASRAV + ALAST: return OP(7, 7) } if a >= ALAST { ctxt.Diag("bad irr opcode %v+ALAST", obj.Aconv(a-ALAST)) } else { ctxt.Diag("bad irr opcode %v", obj.Aconv(a)) } return 0 }
func oprrr(ctxt *obj.Link, a int) uint32 { switch a { case AADD: return OP(4, 0) case AADDU: return OP(4, 1) case ASGT: return OP(5, 2) case ASGTU: return OP(5, 3) case AAND: return OP(4, 4) case AOR: return OP(4, 5) case AXOR: return OP(4, 6) case ASUB: return OP(4, 2) case ASUBU: return OP(4, 3) case ANOR: return OP(4, 7) case ASLL: return OP(0, 4) case ASRL: return OP(0, 6) case ASRA: return OP(0, 7) case ASLLV: return OP(2, 4) case ASRLV: return OP(2, 6) case ASRAV: return OP(2, 7) case AADDV: return OP(5, 4) case AADDVU: return OP(5, 5) case ASUBV: return OP(5, 6) case ASUBVU: return OP(5, 7) case AREM, ADIV: return OP(3, 2) case AREMU, ADIVU: return OP(3, 3) case AMUL: return OP(3, 0) case AMULU: return OP(3, 1) case AREMV, ADIVV: return OP(3, 6) case AREMVU, ADIVVU: return OP(3, 7) case AMULV: return OP(3, 4) case AMULVU: return OP(3, 5) case AJMP: return OP(1, 0) case AJAL: return OP(1, 1) case ABREAK: return OP(1, 5) case ASYSCALL: return OP(1, 4) case ATLBP: return MMU(1, 0) case ATLBR: return MMU(0, 1) case ATLBWI: return MMU(0, 2) case ATLBWR: return MMU(0, 6) case ARFE: return MMU(2, 0) case ADIVF: return FPF(0, 3) case ADIVD: return FPD(0, 3) case AMULF: return FPF(0, 2) case AMULD: return FPD(0, 2) case ASUBF: return FPF(0, 1) case ASUBD: return FPD(0, 1) case AADDF: return FPF(0, 0) case AADDD: return FPD(0, 0) case ATRUNCFV: return FPF(1, 1) case ATRUNCDV: return FPD(1, 1) case ATRUNCFW: return FPF(1, 5) case ATRUNCDW: return FPD(1, 5) case AMOVFV: return FPF(4, 5) case AMOVDV: return FPD(4, 5) case AMOVVF: return FPV(4, 0) case AMOVVD: return FPV(4, 1) case AMOVFW: return FPF(4, 4) case AMOVDW: return FPD(4, 4) case AMOVWF: return FPW(4, 0) case AMOVDF: return FPD(4, 0) case AMOVWD: return FPW(4, 1) case AMOVFD: return FPF(4, 1) case AABSF: return FPF(0, 5) case AABSD: return FPD(0, 5) case AMOVF: return FPF(0, 6) case AMOVD: return FPD(0, 6) case ANEGF: return FPF(0, 7) case ANEGD: return FPD(0, 7) case ACMPEQF: return FPF(6, 2) case ACMPEQD: return FPD(6, 2) case ACMPGTF: return FPF(7, 4) case ACMPGTD: return FPD(7, 4) case ACMPGEF: return FPF(7, 6) case ACMPGED: return FPD(7, 6) } if a >= ALAST { ctxt.Diag("bad rrr opcode %v+ALAST", obj.Aconv(a-ALAST)) } else { ctxt.Diag("bad rrr opcode %v", obj.Aconv(a)) } return 0 }
// If s==nil, copyu returns the set/use of v in p; otherwise, it // modifies p to replace reads of v with reads of s and returns 0 for // success or non-zero for failure. // // If s==nil, copy returns one of the following values: // 1 if v only used // 2 if v is set and used in one address (read-alter-rewrite; // can't substitute) // 3 if v is only set // 4 if v is set in one address and used in another (so addresses // can be rewritten independently) // 0 otherwise (not touched) func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { if p.From3Type() != obj.TYPE_NONE { // 9g never generates a from3 fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) } switch p.As { default: fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As))) return 2 case obj.ANOP, /* read p->from, write p->to */ ppc64.AMOVH, ppc64.AMOVHZ, ppc64.AMOVB, ppc64.AMOVBZ, ppc64.AMOVW, ppc64.AMOVWZ, ppc64.AMOVD, ppc64.ANEG, ppc64.ANEGCC, ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDZE, ppc64.AADDZECC, ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.AFCTIW, ppc64.AFCTIWZ, ppc64.AFCTID, ppc64.AFCTIDZ, ppc64.AFCFID, ppc64.AFCFIDCC, ppc64.AFMOVS, ppc64.AFMOVD, ppc64.AFRSP, ppc64.AFNEG, ppc64.AFNEGCC: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { // Fix up implicit from if p.From.Type == obj.TYPE_NONE { p.From = p.To } if copyau(&p.From, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { // p->to only indirectly uses v return 1 } return 0 case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */ ppc64.AMOVBZU, ppc64.AMOVHU, ppc64.AMOVHZU, ppc64.AMOVWZU, ppc64.AMOVDU: if p.From.Type == obj.TYPE_MEM { if copyas(&p.From, v) { // No s!=nil check; need to fail // anyway in that case return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyas(&p.To, v) { return 3 } } else if p.To.Type == obj.TYPE_MEM { if copyas(&p.To, v) { return 2 } if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.From, v) { return 1 } } else { fmt.Printf("copyu: bad %v\n", p) } return 0 case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */ ppc64.ARLWMICC: if copyas(&p.To, v) { return 2 } fallthrough /* fall through */ case ppc64.AADD, /* read p->from, read p->reg, write p->to */ ppc64.AADDC, ppc64.AADDE, ppc64.ASUB, ppc64.ASLW, ppc64.ASRW, ppc64.ASRAW, ppc64.ASLD, ppc64.ASRD, ppc64.ASRAD, ppc64.AOR, ppc64.AORCC, ppc64.AORN, ppc64.AORNCC, ppc64.AAND, ppc64.AANDCC, ppc64.AANDN, ppc64.AANDNCC, ppc64.ANAND, ppc64.ANANDCC, ppc64.ANOR, ppc64.ANORCC, ppc64.AXOR, ppc64.AMULHW, ppc64.AMULHWU, ppc64.AMULLW, ppc64.AMULLD, ppc64.ADIVW, ppc64.ADIVD, ppc64.ADIVWU, ppc64.ADIVDU, ppc64.AREM, ppc64.AREMU, ppc64.AREMD, ppc64.AREMDU, ppc64.ARLWNM, ppc64.ARLWNMCC, ppc64.AFADDS, ppc64.AFADD, ppc64.AFSUBS, ppc64.AFSUB, ppc64.AFMULS, ppc64.AFMUL, ppc64.AFDIVS, ppc64.AFDIV: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } if copysub1(p, v, s, 1) != 0 { return 1 } // Update only indirect uses of v in p->to if !copyas(&p.To, v) { if copysub(&p.To, v, s, 1) != 0 { return 1 } } return 0 } if copyas(&p.To, v) { if p.Reg == 0 { // Fix up implicit reg (e.g., ADD // R3,R4 -> ADD R3,R4,R4) so we can // update reg and to separately. p.Reg = p.To.Reg } if copyau(&p.From, v) { return 4 } if copyau1(p, v) { return 4 } return 3 } if copyau(&p.From, v) { return 1 } if copyau1(p, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 case ppc64.ABEQ, ppc64.ABGT, ppc64.ABGE, ppc64.ABLT, ppc64.ABLE, ppc64.ABNE, ppc64.ABVC, ppc64.ABVS: return 0 case obj.ACHECKNIL, /* read p->from */ ppc64.ACMP, /* read p->from, read p->to */ ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPO, ppc64.AFCMPU: if s != nil { if copysub(&p.From, v, s, 1) != 0 { return 1 } return copysub(&p.To, v, s, 1) } if copyau(&p.From, v) { return 1 } if copyau(&p.To, v) { return 1 } return 0 // 9g never generates a branch to a GPR (this isn't // even a normal instruction; liblink turns it in to a // mov and a branch). case ppc64.ABR: /* read p->to */ if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 1 } return 0 case obj.ARET: /* funny */ if s != nil { return 0 } // All registers die at this point, so claim // everything is set (and not used). return 3 case ppc64.ABL: /* funny */ if v.Type == obj.TYPE_REG { // TODO(rsc): REG_R0 and REG_F0 used to be // (when register numbers started at 0) exregoffset and exfregoffset, // which are unset entirely. // It's strange that this handles R0 and F0 differently from the other // registers. Possible failure to optimize? if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT { return 2 } if v.Reg == ppc64.REGARG { return 2 } if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT { return 2 } } if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg { return 2 } if s != nil { if copysub(&p.To, v, s, 1) != 0 { return 1 } return 0 } if copyau(&p.To, v) { return 4 } return 3 // R0 is zero, used by DUFFZERO, cannot be substituted. // R3 is ptr to memory, used and set, cannot be substituted. case obj.ADUFFZERO: if v.Type == obj.TYPE_REG { if v.Reg == 0 { return 1 } if v.Reg == 3 { return 2 } } return 0 // R3, R4 are ptr to src, dst, used and set, cannot be substituted. // R5 is scratch, set by DUFFCOPY, cannot be substituted. case obj.ADUFFCOPY: if v.Type == obj.TYPE_REG { if v.Reg == 3 || v.Reg == 4 { return 2 } if v.Reg == 5 { return 3 } } return 0 case obj.ATEXT: /* funny */ if v.Type == obj.TYPE_REG { if v.Reg == ppc64.REGARG { return 3 } } return 0 case obj.APCDATA, obj.AFUNCDATA, obj.AVARDEF, obj.AVARKILL, obj.AVARLIVE, obj.AUSEFIELD: return 0 } }