func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 { switch r.Variant & ld.RV_TYPE_MASK { default: ld.Diag("unexpected relocation variant %d", r.Variant) fallthrough case ld.RV_NONE: return t case ld.RV_POWER_LO: if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 24, // ori 26, // xori 28: // andi if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_HA: t += 0x8000 fallthrough // Fallthrough case ld.RV_POWER_HI: t >>= 16 if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 25, // oris 27, // xoris 29: // andis if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_DS: var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(ld.Be16(s.P[r.Off:])) } else { o1 = uint32(ld.Le16(s.P[r.Off:])) } if t&3 != 0 { ld.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if (r.Variant&ld.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { goto overflow } return int64(o1)&0x3 | int64(int16(t)) } overflow: ld.Diag("relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) return t }
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { // TODO(minux): translate R_ADDRPOWER and R_CALLPOWER into standard ELF relocations. // R_ADDRPOWER corresponds to R_PPC_ADDR16_HA and R_PPC_ADDR16_LO. // R_CALLPOWER corresponds to R_PPC_REL24. return -1 } switch r.Type { case ld.R_CONST: *val = r.Add return 0 case ld.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 case ld.R_ADDRPOWER: // r->add is two ppc64 instructions holding an immediate 32-bit constant. // We want to add r->sym's address to that constant. // The encoding of the immediate x<<16 + y, // where x is the low 16 bits of the first instruction and y is the low 16 // bits of the second. Both x and y are signed (int16, not uint16). o1 := uint32(r.Add >> 32) o2 := uint32(r.Add) t := ld.Symaddr(r.Sym) if t < 0 { ld.Ctxt.Diag("relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym)) } t += int64((o1&0xffff)<<16 + uint32(int32(o2)<<16>>16)) if t&0x8000 != 0 { t += 0x10000 } o1 = o1&0xffff0000 | (uint32(t)>>16)&0xffff o2 = o2&0xffff0000 | uint32(t)&0xffff // when laid out, the instruction order must always be o1, o2. if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o1)<<32 | int64(o2) } else { *val = int64(o2)<<32 | int64(o1) } return 0 case ld.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 var o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off:]) } else { o1 = ld.Le32(s.P[r.Off:]) } t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off)) if t&3 != 0 { ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if int64(int32(t<<6)>>6) != t { // TODO(austin) This can happen if text > 32M. // Add a call trampoline to .text in that case. ld.Ctxt.Diag("relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) } *val = int64(o1&0xfc000003 | uint32(t)&^0xfc000003) return 0 case ld.R_POWER_TOC: // S + A - .TOC. *val = ld.Symaddr(r.Sym) + r.Add - symtoc(s) return 0 } return -1 }