func dmethodptr(s *Sym, off int, x *Sym) int { duintptr(s, off, 0) r := obj.Addrel(Linksym(s)) r.Off = int32(off) r.Siz = uint8(Widthptr) r.Sym = Linksym(x) r.Type = obj.R_METHOD return off + Widthptr }
func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int { duintxxLSym(s, ot, 0, 4) r := obj.Addrel(s) r.Off = int32(ot) r.Siz = 4 r.Sym = x r.Type = obj.R_METHODOFF return ot + 4 }
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { o1 := uint32(0) o2 := uint32(0) o3 := uint32(0) o4 := uint32(0) switch o.type_ { default: ctxt.Diag("unknown type %d %v", o.type_) prasm(p) case 0: /* pseudo ops */ break case 1: /* mov r1,r2 ==> OR r1,r0,r2 */ o1 = OP_RRR(oprrr(ctxt, AOR), uint32(p.From.Reg), uint32(REGZERO), uint32(p.To.Reg)) case 2: /* add/sub r1,[r2],r3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) case 3: /* mov $soreg, r ==> or/add $i,o,r */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } a := AADDVU if o.a1 == C_ANDCON { a = AOR } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg)) case 4: /* add $scon,[r1],r2 */ v := regoff(ctxt, &p.From) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg)) case 5: /* syscall */ o1 = uint32(oprrr(ctxt, int(p.As))) case 6: /* beq r1,[r2],sbra */ v := int32(0) if p.Pcond == nil { v = int32(-4) >> 2 } else { v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { ctxt.Diag("short branch too far\n%v", p) } o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(p.From.Reg), uint32(p.Reg)) // for ABFPT and ABFPF only: always fill delay slot with 0 // see comments in func preprocess for details. o2 = 0 case 7: /* mov r, soreg ==> sw o(r) */ r := int(p.To.Reg) if r == 0 { r = int(o.param) } v := regoff(ctxt, &p.To) o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.From.Reg)) case 8: /* mov soreg, r ==> lw o(r) */ r := int(p.From.Reg) if r == 0 { r = int(o.param) } v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(v), uint32(r), uint32(p.To.Reg)) case 9: /* sll r1,[r2],r3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */ v := regoff(ctxt, &p.From) a := AOR if v < 0 { a = AADDU } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 11: /* jmp lbra */ v := int32(0) if aclass(ctxt, &p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra if p.Pcond == nil { v = int32(-4) >> 2 } else { v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(opirr(ctxt, ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) break } } if p.Pcond == nil { v = int32(p.Pc) >> 2 } else { v = int32(p.Pcond.Pc) >> 2 } o1 = OP_JMP(opirr(ctxt, int(p.As)), uint32(v)) if p.To.Sym == nil { p.To.Sym = ctxt.Cursym.Text.From.Sym p.To.Offset = p.Pcond.Pc } rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset if p.As == AJAL { rel.Type = obj.R_CALLMIPS } else { rel.Type = obj.R_JMPMIPS } case 12: /* movbs r,r */ v := 16 if p.As == AMOVB { v = 24 } o1 = OP_SRR(opirr(ctxt, ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg)) o2 = OP_SRR(opirr(ctxt, ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) case 13: /* movbu r,r */ if p.As == AMOVBU { o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg)) } else { o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg)) } case 14: /* movwu r,r */ o1 = OP_SRR(opirr(ctxt, ASLLV+ALAST), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) if p.As == AMOVWU { o2 = OP_SRR(opirr(ctxt, ASRLV+ALAST), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) } else { o2 = OP_SRR(opirr(ctxt, ASRAV+ALAST), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) } case 16: /* sll $c,[r1],r2 */ v := regoff(ctxt, &p.From) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } /* OP_SRR will use only the low 5 bits of the shift value */ if v >= 32 && vshift(p.As) { o1 = OP_SRR(opirr(ctxt, int(p.As)+ALAST), uint32(v-32), uint32(r), uint32(p.To.Reg)) } else { o1 = OP_SRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg)) } case 18: /* jmp [r1],0(r2) */ r := int(p.Reg) if r == 0 { r = int(o.param) } o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.To.Reg), uint32(r)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 0 rel.Type = obj.R_CALLIND case 19: /* mov $lcon,r ==> lu+or */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) if p.From.Sym != nil { rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 8 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPS } case 20: /* mov lo/hi,r */ a := OP(2, 0) /* mfhi */ if p.From.Reg == REG_LO { a = OP(2, 2) /* mflo */ } o1 = OP_RRR(a, uint32(REGZERO), uint32(REGZERO), uint32(p.To.Reg)) case 21: /* mov r,lo/hi */ a := OP(2, 1) /* mthi */ if p.To.Reg == REG_LO { a = OP(2, 3) /* mtlo */ } o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO)) case 22: /* mul r1,r2 */ o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO)) case 23: /* add $lcon,r1,r2 ==> lu+or+add */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o3 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 24: /* mov $ucon,r ==> lu r */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg)) case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) r := int(p.From.Reg) if r == 0 { r = int(o.param) } o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } a := AMOVF + ALAST if p.As == AMOVD { a = AMOVD + ALAST } switch o.size { case 16: o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) o3 = OP_RRR(opirr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP)) o4 = OP_IRR(opirr(ctxt, a), uint32(0), uint32(r), uint32(p.To.Reg)) case 4: o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg)) } case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */ v := regoff(ctxt, &p.To) r := int(p.To.Reg) if r == 0 { r = int(o.param) } a := AMOVF if p.As == AMOVD { a = AMOVD } switch o.size { case 16: o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) o3 = OP_RRR(opirr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP)) o4 = OP_IRR(opirr(ctxt, a), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) case 4: o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.From.Reg)) } case 30: /* movw r,fr */ a := SP(2, 1) | (4 << 21) /* mtc1 */ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 31: /* movw fr,r */ a := SP(2, 1) | (0 << 21) /* mtc1 */ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 32: /* fadd fr1,[fr2],fr3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) case 33: /* fabs fr1, fr3 */ o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */ v := regoff(ctxt, &p.From) a := AADDU if o.a1 == C_ANDCON { a = AOR } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP)) o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */ case 35: /* mov r,lext/auto/oreg ==> sw o(r) */ v := regoff(ctxt, &p.To) r := int(p.To.Reg) if r == 0 { r = int(o.param) } o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP)) o4 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) case 36: /* mov lext/auto/oreg,r ==> lw o(r30) */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP)) o4 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) case 37: /* movw r,mr */ a := SP(2, 0) | (4 << 21) /* mtc0 */ if p.As == AMOVV { a = SP(2, 0) | (5 << 21) /* dmtc0 */ } o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 38: /* movw mr,r */ a := SP(2, 0) | (0 << 21) /* mfc0 */ if p.As == AMOVV { a = SP(2, 0) | (1 << 21) /* dmfc0 */ } o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 40: /* word */ o1 = uint32(regoff(ctxt, &p.From)) case 41: /* movw f,fcr */ o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(REGZERO), uint32(0), uint32(p.To.Reg)) /* mfcc1 */ o2 = OP_RRR(SP(2, 1)|(6<<21), uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) /* mtcc1 */ case 42: /* movw fcr,r */ o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) /* mfcc1 */ case 47: /* movv r,fr */ a := SP(2, 1) | (5 << 21) /* dmtc1 */ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 48: /* movv fr,r */ a := SP(2, 1) | (1 << 21) /* dmtc1 */ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 49: /* undef */ o1 = 8 /* JMP (R0) */ /* relocation operations */ case 50: /* mov r,addr ==> lu + or + sw (REGTMP) */ o1 = OP_IRR(opirr(ctxt, ALAST), uint32(0), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 8 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = obj.R_ADDRMIPS o3 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) case 51: /* mov addr,r ==> lu + or + lw (REGTMP) */ o1 = OP_IRR(opirr(ctxt, ALAST), uint32(0), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 8 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPS o3 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) } out[0] = o1 out[1] = o2 out[2] = o3 out[3] = o4 return }
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { // TODO(minux): add morestack short-cuts with small fixed frame-size. ctxt.Cursym = cursym if cursym.Text == nil || cursym.Text.Link == nil { return } p := cursym.Text textstksiz := p.To.Offset if textstksiz == -8 { // Compatibility hack. p.From3.Offset |= obj.NOFRAME textstksiz = 0 } if textstksiz%8 != 0 { ctxt.Diag("frame size %d not a multiple of 8", textstksiz) } if p.From3.Offset&obj.NOFRAME != 0 { if textstksiz != 0 { ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz) } } cursym.Args = p.To.Val.(int32) cursym.Locals = int32(textstksiz) /* * find leaf subroutines * strip NOPs * expand RET * expand BECOME pseudo */ if ctxt.Debugvlog != 0 { fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime()) } ctxt.Bso.Flush() var q *obj.Prog var q1 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { switch p.As { /* too hard, just leave alone */ case obj.ATEXT: q = p p.Mark |= LABEL | LEAF | SYNC if p.Link != nil { p.Link.Mark |= LABEL } case ANOR: q = p if p.To.Type == obj.TYPE_REG { if p.To.Reg == REGZERO { p.Mark |= LABEL | SYNC } } case ALWAR, ASTWCCC, AECIWX, AECOWX, AEIEIO, AICBI, AISYNC, ATLBIE, ATLBIEL, ASLBIA, ASLBIE, ASLBMFEE, ASLBMFEV, ASLBMTE, ADCBF, ADCBI, ADCBST, ADCBT, ADCBTST, ADCBZ, ASYNC, ATLBSYNC, APTESYNC, ATW, AWORD, ARFI, ARFCI, ARFID, AHRFID: q = p p.Mark |= LABEL | SYNC continue case AMOVW, AMOVWZ, AMOVD: q = p if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL { p.Mark |= LABEL | SYNC } continue case AFABS, AFABSCC, AFADD, AFADDCC, AFCTIW, AFCTIWCC, AFCTIWZ, AFCTIWZCC, AFDIV, AFDIVCC, AFMADD, AFMADDCC, AFMOVD, AFMOVDU, /* case AFMOVDS: */ AFMOVS, AFMOVSU, /* case AFMOVSD: */ AFMSUB, AFMSUBCC, AFMUL, AFMULCC, AFNABS, AFNABSCC, AFNEG, AFNEGCC, AFNMADD, AFNMADDCC, AFNMSUB, AFNMSUBCC, AFRSP, AFRSPCC, AFSUB, AFSUBCC: q = p p.Mark |= FLOAT continue case ABL, ABCL, obj.ADUFFZERO, obj.ADUFFCOPY: cursym.Text.Mark &^= LEAF fallthrough case ABC, ABEQ, ABGE, ABGT, ABLE, ABLT, ABNE, ABR, ABVC, ABVS: p.Mark |= BRANCH q = p q1 = p.Pcond if q1 != nil { for q1.As == obj.ANOP { q1 = q1.Link p.Pcond = q1 } if q1.Mark&LEAF == 0 { q1.Mark |= LABEL } } else { p.Mark |= LABEL } q1 = p.Link if q1 != nil { q1.Mark |= LABEL } continue case AFCMPO, AFCMPU: q = p p.Mark |= FCMP | FLOAT continue case obj.ARET: q = p if p.Link != nil { p.Link.Mark |= LABEL } continue case obj.ANOP: q1 = p.Link q.Link = q1 /* q is non-nop */ q1.Mark |= p.Mark continue default: q = p continue } } autosize := int32(0) var aoffset int var mov int var o int var p1 *obj.Prog var p2 *obj.Prog for p := cursym.Text; p != nil; p = p.Link { o = int(p.As) switch o { case obj.ATEXT: mov = AMOVD aoffset = 0 autosize = int32(textstksiz) if p.Mark&LEAF != 0 && autosize == 0 && p.From3.Offset&obj.NOFRAME == 0 { // A leaf function with no locals has no frame. p.From3.Offset |= obj.NOFRAME } if p.From3.Offset&obj.NOFRAME == 0 { // If there is a stack frame at all, it includes // space to save the LR. autosize += int32(ctxt.FixedFrameSize()) } p.To.Offset = int64(autosize) q = p if ctxt.Flag_shared != 0 && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" { // When compiling Go into PIC, all functions must start // with instructions to load the TOC pointer into r2: // // addis r2, r12, .TOC.-func@ha // addi r2, r2, .TOC.-func@l+4 // // We could probably skip this prologue in some situations // but it's a bit subtle. However, it is both safe and // necessary to leave the prologue off duffzero and // duffcopy as we rely on being able to jump to a specific // instruction offset for them. // // These are AWORDS because there is no (afaict) way to // generate the addis instruction except as part of the // load of a large constant, and in that case there is no // way to use r12 as the source. q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x3c4c0000 q = obj.Appendp(ctxt, q) q.As = AWORD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = 0x38420000 rel := obj.Addrel(ctxt.Cursym) rel.Off = 0 rel.Siz = 8 rel.Sym = obj.Linklookup(ctxt, ".TOC.", 0) rel.Type = obj.R_ADDRPOWER_PCREL } if cursym.Text.From3.Offset&obj.NOSPLIT == 0 { q = stacksplit(ctxt, q, autosize) // emit split check } if autosize != 0 { /* use MOVDU to adjust R1 when saving R31, if autosize is small */ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG { mov = AMOVDU aoffset = int(-autosize) } else { q = obj.Appendp(ctxt, q) q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(-autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = +autosize } } else if cursym.Text.Mark&LEAF == 0 { // A very few functions that do not return to their caller // (e.g. gogo) are not identified as leaves but still have // no frame. cursym.Text.Mark |= LEAF } if cursym.Text.Mark&LEAF != 0 { cursym.Leaf = 1 break } q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_LR q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q = obj.Appendp(ctxt, q) q.As = int16(mov) q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_MEM q.To.Offset = int64(aoffset) q.To.Reg = REGSP if q.As == AMOVDU { q.Spadj = int32(-aoffset) } if ctxt.Flag_shared != 0 { q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.To.Type = obj.TYPE_MEM q.To.Reg = REGSP q.To.Offset = 24 } if cursym.Text.From3.Offset&obj.WRAPPER != 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // // MOVD g_panic(g), R3 // CMP R0, R3 // BEQ end // MOVD panic_argp(R3), R4 // ADD $(autosize+8), R1, R5 // CMP R4, R5 // BNE end // ADD $8, R1, R6 // MOVD R6, panic_argp(R3) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes. q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REGG q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R0 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 q = obj.Appendp(ctxt, q) q.As = ABEQ q.To.Type = obj.TYPE_BRANCH p1 = q q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_MEM q.From.Reg = REG_R3 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) + ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R4 q.To.Type = obj.TYPE_REG q.To.Reg = REG_R5 q = obj.Appendp(ctxt, q) q.As = ABNE q.To.Type = obj.TYPE_BRANCH p2 = q q = obj.Appendp(ctxt, q) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = ctxt.FixedFrameSize() q.Reg = REGSP q.To.Type = obj.TYPE_REG q.To.Reg = REG_R6 q = obj.Appendp(ctxt, q) q.As = AMOVD q.From.Type = obj.TYPE_REG q.From.Reg = REG_R6 q.To.Type = obj.TYPE_MEM q.To.Reg = REG_R3 q.To.Offset = 0 // Panic.argp q = obj.Appendp(ctxt, q) q.As = obj.ANOP p1.Pcond = q p2.Pcond = q } case obj.ARET: if p.From.Type == obj.TYPE_CONST { ctxt.Diag("using BECOME (%v) is not supported!", p) break } retTarget := p.To.Sym if cursym.Text.Mark&LEAF != 0 { if autosize == 0 { p.As = ABR p.From = obj.Addr{} if retTarget == nil { p.To.Type = obj.TYPE_REG p.To.Reg = REG_LR } else { p.To.Type = obj.TYPE_BRANCH p.To.Sym = retTarget } p.Mark |= BRANCH break } p.As = AADD p.From.Type = obj.TYPE_CONST p.From.Offset = int64(autosize) p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -autosize q = ctxt.NewProg() q.As = ABR q.Lineno = p.Lineno q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Mark |= BRANCH q.Spadj = +autosize q.Link = p.Link p.Link = q break } p.As = AMOVD p.From.Type = obj.TYPE_MEM p.From.Offset = 0 p.From.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_REG q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REG_LR q.Link = p.Link p.Link = q p = q if false { // Debug bad returns q = ctxt.NewProg() q.As = AMOVD q.Lineno = p.Lineno q.From.Type = obj.TYPE_MEM q.From.Offset = 0 q.From.Reg = REGTMP q.To.Type = obj.TYPE_REG q.To.Reg = REGTMP q.Link = p.Link p.Link = q p = q } if autosize != 0 { q = ctxt.NewProg() q.As = AADD q.Lineno = p.Lineno q.From.Type = obj.TYPE_CONST q.From.Offset = int64(autosize) q.To.Type = obj.TYPE_REG q.To.Reg = REGSP q.Spadj = -autosize q.Link = p.Link p.Link = q } q1 = ctxt.NewProg() q1.As = ABR q1.Lineno = p.Lineno if retTarget == nil { q1.To.Type = obj.TYPE_REG q1.To.Reg = REG_LR } else { q1.To.Type = obj.TYPE_BRANCH q1.To.Sym = retTarget } q1.Mark |= BRANCH q1.Spadj = +autosize q1.Link = q.Link q.Link = q1 case AADD: if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { p.Spadj = int32(-p.From.Offset) } } } }
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { o1 := uint32(0) o2 := uint32(0) o3 := uint32(0) o4 := uint32(0) add := AADDU if ctxt.Mode&Mips64 != 0 { add = AADDVU } switch o.type_ { default: ctxt.Diag("unknown type %d %v", o.type_) prasm(p) case 0: /* pseudo ops */ break case 1: /* mov r1,r2 ==> OR r1,r0,r2 */ a := AOR if p.As == AMOVW && ctxt.Mode&Mips64 != 0 { a = AADDU // sign-extended to high 32 bits } o1 = OP_RRR(oprrr(ctxt, a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) case 2: /* add/sub r1,[r2],r3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) case 3: /* mov $soreg, r ==> or/add $i,o,r */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } a := add if o.a1 == C_ANDCON { a = AOR } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg)) case 4: /* add $scon,[r1],r2 */ v := regoff(ctxt, &p.From) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg)) case 5: /* syscall */ o1 = oprrr(ctxt, p.As) case 6: /* beq r1,[r2],sbra */ v := int32(0) if p.Pcond == nil { v = int32(-4) >> 2 } else { v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 != v { ctxt.Diag("short branch too far\n%v", p) } o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg)) // for ABFPT and ABFPF only: always fill delay slot with 0 // see comments in func preprocess for details. o2 = 0 case 7: /* mov r, soreg ==> sw o(r) */ r := int(p.To.Reg) if r == 0 { r = int(o.param) } v := regoff(ctxt, &p.To) o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg)) case 8: /* mov soreg, r ==> lw o(r) */ r := int(p.From.Reg) if r == 0 { r = int(o.param) } v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg)) case 9: /* sll r1,[r2],r3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */ v := regoff(ctxt, &p.From) a := AOR if v < 0 { a = AADDU } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 11: /* jmp lbra */ v := int32(0) if aclass(ctxt, &p.To) == C_SBRA && p.To.Sym == nil && p.As == AJMP { // use PC-relative branch for short branches // BEQ R0, R0, sbra if p.Pcond == nil { v = int32(-4) >> 2 } else { v = int32(p.Pcond.Pc-p.Pc-4) >> 2 } if (v<<16)>>16 == v { o1 = OP_IRR(opirr(ctxt, ABEQ), uint32(v), uint32(REGZERO), uint32(REGZERO)) break } } if p.Pcond == nil { v = int32(p.Pc) >> 2 } else { v = int32(p.Pcond.Pc) >> 2 } o1 = OP_JMP(opirr(ctxt, p.As), uint32(v)) if p.To.Sym == nil { p.To.Sym = ctxt.Cursym.Text.From.Sym p.To.Offset = p.Pcond.Pc } rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset if p.As == AJAL { rel.Type = obj.R_CALLMIPS } else { rel.Type = obj.R_JMPMIPS } case 12: /* movbs r,r */ v := 16 if p.As == AMOVB { v = 24 } o1 = OP_SRR(opirr(ctxt, ASLL), uint32(v), uint32(p.From.Reg), uint32(p.To.Reg)) o2 = OP_SRR(opirr(ctxt, ASRA), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) case 13: /* movbu r,r */ if p.As == AMOVBU { o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xff), uint32(p.From.Reg), uint32(p.To.Reg)) } else { o1 = OP_IRR(opirr(ctxt, AAND), uint32(0xffff), uint32(p.From.Reg), uint32(p.To.Reg)) } case 14: /* movwu r,r */ o1 = OP_SRR(opirr(ctxt, -ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) o2 = OP_SRR(opirr(ctxt, -ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) case 15: /* teq $c r,r */ v := regoff(ctxt, &p.From) r := int(p.Reg) if r == 0 { r = REGZERO } /* only use 10 bits of trap code */ o1 = OP_IRR(opirr(ctxt, p.As), (uint32(v)&0x3FF)<<6, uint32(p.Reg), uint32(p.To.Reg)) case 16: /* sll $c,[r1],r2 */ v := regoff(ctxt, &p.From) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } /* OP_SRR will use only the low 5 bits of the shift value */ if v >= 32 && vshift(p.As) { o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg)) } else { o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg)) } case 17: o1 = OP_RRR(oprrr(ctxt, p.As), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) case 18: /* jmp [r1],0(r2) */ r := int(p.Reg) if r == 0 { r = int(o.param) } o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 0 rel.Type = obj.R_CALLIND case 19: /* mov $lcon,r ==> lu+or */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg)) case 20: /* mov lo/hi,r */ a := OP(2, 0) /* mfhi */ if p.From.Reg == REG_LO { a = OP(2, 2) /* mflo */ } o1 = OP_RRR(a, uint32(REGZERO), uint32(REGZERO), uint32(p.To.Reg)) case 21: /* mov r,lo/hi */ a := OP(2, 1) /* mthi */ if p.To.Reg == REG_LO { a = OP(2, 3) /* mtlo */ } o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO)) case 22: /* mul r1,r2 [r3]*/ if p.To.Reg != 0 { r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } a := SP(3, 4) | 2 /* mul */ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) } else { o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO)) } case 23: /* add $lcon,r1,r2 ==> lu+or+add */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 24: /* mov $ucon,r ==> lu r */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg)) case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */ v := regoff(ctxt, &p.From) o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP)) r := int(p.From.Reg) if r == 0 { r = int(o.param) } o3 = OP_RRR(oprrr(ctxt, add), uint32(REGTMP), uint32(r), uint32(p.To.Reg)) case 27: /* mov [sl]ext/auto/oreg,fr ==> lwc1 o(r) */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } a := -AMOVF if p.As == AMOVD { a = -AMOVD } switch o.size { case 12: o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP)) o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) case 4: o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.To.Reg)) } case 28: /* mov fr,[sl]ext/auto/oreg ==> swc1 o(r) */ v := regoff(ctxt, &p.To) r := int(p.To.Reg) if r == 0 { r = int(o.param) } a := AMOVF if p.As == AMOVD { a = AMOVD } switch o.size { case 12: o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP)) o3 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) case 4: o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(r), uint32(p.From.Reg)) } case 30: /* movw r,fr */ a := SP(2, 1) | (4 << 21) /* mtc1 */ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 31: /* movw fr,r */ a := SP(2, 1) | (0 << 21) /* mtc1 */ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 32: /* fadd fr1,[fr2],fr3 */ r := int(p.Reg) if r == 0 { r = int(p.To.Reg) } o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg)) case 33: /* fabs fr1, fr3 */ o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */ v := regoff(ctxt, &p.From) a := AADDU if o.a1 == C_ANDCON { a = AOR } o1 = OP_IRR(opirr(ctxt, a), uint32(v), uint32(0), uint32(REGTMP)) o2 = OP_RRR(SP(2, 1)|(4<<21), uint32(REGTMP), uint32(0), uint32(p.To.Reg)) /* mtc1 */ case 35: /* mov r,lext/auto/oreg ==> sw o(REGTMP) */ v := regoff(ctxt, &p.To) r := int(p.To.Reg) if r == 0 { r = int(o.param) } o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP)) o3 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(REGTMP), uint32(p.From.Reg)) case 36: /* mov lext/auto/oreg,r ==> lw o(REGTMP) */ v := regoff(ctxt, &p.From) r := int(p.From.Reg) if r == 0 { r = int(o.param) } o1 = OP_IRR(opirr(ctxt, ALUI), uint32((v+1<<15)>>16), uint32(REGZERO), uint32(REGTMP)) o2 = OP_RRR(oprrr(ctxt, add), uint32(r), uint32(REGTMP), uint32(REGTMP)) o3 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(REGTMP), uint32(p.To.Reg)) case 37: /* movw r,mr */ a := SP(2, 0) | (4 << 21) /* mtc0 */ if p.As == AMOVV { a = SP(2, 0) | (5 << 21) /* dmtc0 */ } o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 38: /* movw mr,r */ a := SP(2, 0) | (0 << 21) /* mfc0 */ if p.As == AMOVV { a = SP(2, 0) | (1 << 21) /* dmfc0 */ } o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 40: /* word */ o1 = uint32(regoff(ctxt, &p.From)) case 41: /* movw f,fcr */ o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(REGZERO), uint32(0), uint32(p.To.Reg)) /* mfcc1 */ o2 = OP_RRR(SP(2, 1)|(6<<21), uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) /* mtcc1 */ case 42: /* movw fcr,r */ o1 = OP_RRR(SP(2, 1)|(2<<21), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) /* mfcc1 */ case 47: /* movv r,fr */ a := SP(2, 1) | (5 << 21) /* dmtc1 */ o1 = OP_RRR(a, uint32(p.From.Reg), uint32(0), uint32(p.To.Reg)) case 48: /* movv fr,r */ a := SP(2, 1) | (1 << 21) /* dmtc1 */ o1 = OP_RRR(a, uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) case 49: /* undef */ o1 = 52 /* trap -- teq r0, r0 */ /* relocation operations */ case 50: /* mov r,addr ==> lu + add REGSB, REGTMP + sw o(REGTMP) */ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = obj.R_ADDRMIPSU o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) rel2 := obj.Addrel(ctxt.Cursym) rel2.Off = int32(ctxt.Pc + 4) rel2.Siz = 4 rel2.Sym = p.To.Sym rel2.Add = p.To.Offset rel2.Type = obj.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP)) rel2.Off += 4 } case 51: /* mov addr,r ==> lu + add REGSB, REGTMP + lw o(REGTMP) */ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPSU o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) rel2 := obj.Addrel(ctxt.Cursym) rel2.Off = int32(ctxt.Pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset rel2.Type = obj.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(REGTMP), uint32(REGTMP)) rel2.Off += 4 } case 52: /* mov $lext, r ==> lu + add REGSB, r + add */ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(p.To.Reg)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPSU o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) rel2 := obj.Addrel(ctxt.Cursym) rel2.Off = int32(ctxt.Pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset rel2.Type = obj.R_ADDRMIPS if o.size == 12 { o3 = o2 o2 = OP_RRR(oprrr(ctxt, AADDVU), uint32(REGSB), uint32(p.To.Reg), uint32(p.To.Reg)) rel2.Off += 4 } case 53: /* mov r, tlsvar ==> rdhwr + sw o(r3) */ // clobbers R3 ! // load thread pointer with RDHWR, R3 is used for fast kernel emulation on Linux o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REG_R3), uint32(p.From.Reg)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc + 4) rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset rel.Type = obj.R_ADDRMIPSTLS case 54: /* mov tlsvar, r ==> rdhwr + lw o(r3) */ // clobbers R3 ! o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REG_R3), uint32(p.To.Reg)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc + 4) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPSTLS case 55: /* mov $tlsvar, r ==> rdhwr + add */ // clobbers R3 ! o1 = (037<<26 + 073) | (29 << 11) | (3 << 16) // rdhwr $29, r3 o2 = OP_IRR(opirr(ctxt, add), uint32(0), uint32(REG_R3), uint32(p.To.Reg)) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc + 4) rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset rel.Type = obj.R_ADDRMIPSTLS } out[0] = o1 out[1] = o2 out[2] = o3 out[3] = o4 return }
func dcommontype(s *Sym, ot int, t *Type) int { if ot != 0 { Fatalf("dcommontype %d", ot) } sizeofAlg := 2 * Widthptr if dcommontype_algarray == nil { dcommontype_algarray = Pkglookup("algarray", Runtimepkg) } dowidth(t) alg := algtype(t) var algsym *Sym if alg < 0 || alg == AMEM { algsym = dalgsym(t) } tptr := Ptrto(t) if !Isptr[t.Etype] && (t.Sym != nil || methods(tptr) != nil) { sptr := dtypesym(tptr) r := obj.Addrel(Linksym(s)) r.Off = 0 r.Siz = 0 r.Sym = sptr.Lsym r.Type = obj.R_USETYPE } gcsym, useGCProg, ptrdata := dgcsym(t) // ../../../../reflect/type.go:/^type.rtype // actual type structure // type rtype struct { // size uintptr // ptrdata uintptr // hash uint32 // tflag tflag // align uint8 // fieldAlign uint8 // kind uint8 // alg *typeAlg // gcdata *byte // string *string // } ot = duintptr(s, ot, uint64(t.Width)) ot = duintptr(s, ot, uint64(ptrdata)) ot = duint32(s, ot, typehash(t)) var tflag uint8 if uncommonSize(t) != 0 { tflag |= tflagUncommon } ot = duint8(s, ot, tflag) // runtime (and common sense) expects alignment to be a power of two. i := int(t.Align) if i == 0 { i = 1 } if i&(i-1) != 0 { Fatalf("invalid alignment %d for %v", t.Align, t) } ot = duint8(s, ot, t.Align) // align ot = duint8(s, ot, t.Align) // fieldAlign i = kinds[t.Etype] if t.Etype == TARRAY && t.Bound < 0 { i = obj.KindSlice } if !haspointers(t) { i |= obj.KindNoPointers } if isdirectiface(t) { i |= obj.KindDirectIface } if useGCProg { i |= obj.KindGCProg } ot = duint8(s, ot, uint8(i)) // kind if algsym == nil { ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) } else { ot = dsymptr(s, ot, algsym, 0) } ot = dsymptr(s, ot, gcsym, 0) // gcdata p := Tconv(t, FmtLeft|FmtUnsigned) // If we're writing out type T, // we are very likely to write out type *T as well. // Use the string "*T"[1:] for "T", so that the two // share storage. This is a cheap way to reduce the // amount of space taken up by reflect strings. prefix := 0 if !strings.HasPrefix(p, "*") { p = "*" + p prefix = 1 } _, symdata := stringsym(p) // string ot = dsymptr(s, ot, symdata, prefix) ot = duintxx(s, ot, uint64(len(p)-prefix), Widthint) return ot }
func dcommontype(s *Sym, ot int, t *Type) int { if ot != 0 { Fatalf("dcommontype %d", ot) } sizeofAlg := 2 * Widthptr if dcommontype_algarray == nil { dcommontype_algarray = Pkglookup("algarray", Runtimepkg) } dowidth(t) alg := algtype(t) var algsym *Sym if alg < 0 || alg == AMEM { algsym = dalgsym(t) } tptr := Ptrto(t) if !Isptr[t.Etype] && (t.Sym != nil || methods(tptr) != nil) { sptr := dtypesym(tptr) r := obj.Addrel(Linksym(s)) r.Off = 0 r.Siz = 0 r.Sym = sptr.Lsym r.Type = obj.R_USETYPE } gcsym, useGCProg, ptrdata := dgcsym(t) // ../../../../reflect/type.go:/^type.rtype // actual type structure // type rtype struct { // size uintptr // ptrdata uintptr // hash uint32 // _ uint8 // align uint8 // fieldAlign uint8 // kind uint8 // alg *typeAlg // gcdata *byte // string *string // *uncommonType // } ot = duintptr(s, ot, uint64(t.Width)) ot = duintptr(s, ot, uint64(ptrdata)) ot = duint32(s, ot, typehash(t)) ot = duint8(s, ot, 0) // unused // runtime (and common sense) expects alignment to be a power of two. i := int(t.Align) if i == 0 { i = 1 } if i&(i-1) != 0 { Fatalf("invalid alignment %d for %v", t.Align, t) } ot = duint8(s, ot, t.Align) // align ot = duint8(s, ot, t.Align) // fieldAlign i = kinds[t.Etype] if t.Etype == TARRAY && t.Bound < 0 { i = obj.KindSlice } if !haspointers(t) { i |= obj.KindNoPointers } if isdirectiface(t) { i |= obj.KindDirectIface } if useGCProg { i |= obj.KindGCProg } ot = duint8(s, ot, uint8(i)) // kind if algsym == nil { ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg) } else { ot = dsymptr(s, ot, algsym, 0) } ot = dsymptr(s, ot, gcsym, 0) // gcdata p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned) _, symdata := stringsym(p) // string ot = dsymptr(s, ot, symdata, 0) ot = duintxx(s, ot, uint64(len(p)), Widthint) //fmt.Printf("dcommontype: %s\n", p) // skip pointer to extraType, // which follows the rest of this type structure. // caller will fill in if needed. // otherwise linker will assume 0. ot += Widthptr return ot }