func adddynrel(s *ld.LSym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_X86_64_PC32: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_X86_64_PC32 relocation for dynamic symbol %s", targ.Name) } if targ.Type == 0 || targ.Type == obj.SXREF { ld.Diag("unknown symbol %s in pcrel", targ.Name) } r.Type = obj.R_PCREL r.Add += 4 return case 256 + ld.R_X86_64_PLT32: r.Type = obj.R_PCREL r.Add += 4 if targ.Type == obj.SDYNIMPORT { addpltsym(targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add += int64(targ.Plt) } return case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX: if targ.Type != obj.SDYNIMPORT { // have symbol if r.Off >= 2 && s.P[r.Off-2] == 0x8b { // turn MOVQ of GOT entry into LEAQ of symbol itself s.P[r.Off-2] = 0x8d r.Type = obj.R_PCREL r.Add += 4 return } } // fall back to using GOT and hope for the best (CMOV*) // TODO: just needs relocation, no need to put in .dynsym addgotsym(targ) r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += 4 r.Add += int64(targ.Got) return case 256 + ld.R_X86_64_64: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_X86_64_64 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return // Handle relocations found in Mach-O object files. case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 0, 512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 0, 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 0: // TODO: What is the difference between all these? r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected reloc for dynamic symbol %s", targ.Name) } return case 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1: if targ.Type == obj.SDYNIMPORT { addpltsym(targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) r.Type = obj.R_PCREL return } fallthrough // fall through case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_2*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1: r.Type = obj.R_PCREL if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected pc-relative reloc for dynamic symbol %s", targ.Name) } return case 512 + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1: if targ.Type != obj.SDYNIMPORT { // have symbol // turn MOVQ of GOT entry into LEAQ of symbol itself if r.Off < 2 || s.P[r.Off-2] != 0x8b { ld.Diag("unexpected GOT_LOAD reloc for non-dynamic symbol %s", targ.Name) return } s.P[r.Off-2] = 0x8d r.Type = obj.R_PCREL return } fallthrough // fall through case 512 + ld.MACHO_X86_64_RELOC_GOT*2 + 1: if targ.Type != obj.SDYNIMPORT { ld.Diag("unexpected GOT reloc for non-dynamic symbol %s", targ.Name) } addgotsym(targ) r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += int64(targ.Got) return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } switch r.Type { case obj.R_CALL, obj.R_PCREL: if ld.HEADTYPE == obj.Hwindows { // nothing to do, the relocation will be laid out in pereloc1 return } else { // for both ELF and Mach-O addpltsym(targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) return } case obj.R_ADDR: if s.Type == obj.STEXT && ld.Iself { if ld.HEADTYPE == obj.Hsolaris { addpltsym(targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add += int64(targ.Plt) return } // The code is asking for the address of an external // function. We provide it with the address of the // correspondent GOT symbol. addgotsym(targ) r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += int64(targ.Got) return } if s.Type != obj.SDATA { break } if ld.Iself { ld.Adddynsym(ld.Ctxt, targ) rela := ld.Linklookup(ld.Ctxt, ".rela", 0) ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off)) if r.Siz == 8 { ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64)) } else { ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_32)) } ld.Adduint64(ld.Ctxt, rela, uint64(r.Add)) r.Type = 256 // ignore during relocsym return } if ld.HEADTYPE == obj.Hdarwin && s.Size == int64(ld.Thearch.Ptrsize) && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. // Instead, interpret the C declaration // void *_Cvar_stderr = &stderr; // as making _Cvar_stderr the name of a GOT entry // for stderr. This is separate from the usual GOT entry, // just in case the C code assigns to the variable, // and of course it only works for single pointers, // but we only need to support cgo and that's all it needs. ld.Adddynsym(ld.Ctxt, targ) got := ld.Linklookup(ld.Ctxt, ".got", 0) s.Type = got.Type | obj.SSUB s.Outer = got s.Sub = got.Sub got.Sub = s s.Value = got.Size ld.Adduint64(ld.Ctxt, got, 0) ld.Adduint32(ld.Ctxt, ld.Linklookup(ld.Ctxt, ".linkedit.got", 0), uint32(targ.Dynid)) r.Type = 256 // ignore during relocsym return } if ld.HEADTYPE == obj.Hwindows { // nothing to do, the relocation will be laid out in pereloc1 return } } ld.Ctxt.Cursym = s ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) }
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { default: return -1 case obj.R_ARM64_GOTPCREL: var o1, o2 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(*val >> 32) o2 = uint32(*val) } else { o1 = uint32(*val) o2 = uint32(*val >> 32) } // Any relocation against a function symbol is redirected to // be against a local symbol instead (see putelfsym in // symtab.go) but unfortunately the system linker was buggy // when confronted with a R_AARCH64_ADR_GOT_PAGE relocation // against a local symbol until May 2015 // (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So // we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp; // add + R_ADDRARM64. if !(r.Sym.Version != 0 || (r.Sym.Type&obj.SHIDDEN != 0) || r.Sym.Local) && r.Sym.Type == obj.STEXT && ld.DynlinkingGo() { if o2&0xffc00000 != 0xf9400000 { ld.Ctxt.Diag("R_ARM64_GOTPCREL against unexpected instruction %x", o2) } o2 = 0x91000000 | (o2 & 0x000003ff) r.Type = obj.R_ADDRARM64 } if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o1)<<32 | int64(o2) } else { *val = int64(o2)<<32 | int64(o1) } fallthrough case obj.R_ADDRARM64: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Diag("missing section for %s", rs.Name) } r.Xsym = rs // Note: ld64 currently has a bug that any non-zero addend for BR26 relocation // will make the linking fail because it thinks the code is not PIC even though // the BR26 relocation should be fully resolved at link time. // That is the reason why the next if block is disabled. When the bug in ld64 // is fixed, we can enable this block and also enable duff's device in cmd/7g. if false && ld.HEADTYPE == obj.Hdarwin { var o0, o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o0 = uint32(*val >> 32) o1 = uint32(*val) } else { o0 = uint32(*val) o1 = uint32(*val >> 32) } // Mach-O wants the addend to be encoded in the instruction // Note that although Mach-O supports ARM64_RELOC_ADDEND, it // can only encode 24-bit of signed addend, but the instructions // supports 33-bit of signed addend, so we always encode the // addend in place. o0 |= (uint32((r.Xadd>>12)&3) << 29) | (uint32((r.Xadd>>12>>2)&0x7ffff) << 5) o1 |= uint32(r.Xadd&0xfff) << 10 r.Xadd = 0 // when laid out, the instruction order must always be o1, o2. if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o0)<<32 | int64(o1) } else { *val = int64(o1)<<32 | int64(o0) } } return 0 case obj.R_CALLARM64, obj.R_ARM64_TLS_LE, obj.R_ARM64_TLS_IE: r.Done = 0 r.Xsym = r.Sym r.Xadd = r.Add return 0 } } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 case obj.R_ADDRARM64: t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ld.Diag("program too large, address relocation distance = %d", t) } var o0, o1 uint32 if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { o0 = uint32(*val >> 32) o1 = uint32(*val) } else { o0 = uint32(*val) o1 = uint32(*val >> 32) } o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) o1 |= uint32(t&0xfff) << 10 // when laid out, the instruction order must always be o1, o2. if ld.Ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o0)<<32 | int64(o1) } else { *val = int64(o1)<<32 | int64(o0) } return 0 case obj.R_ARM64_TLS_LE: r.Done = 0 if ld.HEADTYPE != obj.Hlinux { ld.Diag("TLS reloc on unsupported OS %s", ld.Headstr(int(ld.HEADTYPE))) } // The TCB is two pointers. This is not documented anywhere, but is // de facto part of the ABI. v := r.Sym.Value + int64(2*ld.Thearch.Ptrsize) if v < 0 || v >= 32678 { ld.Diag("TLS offset out of range %d", v) } *val |= v << 5 return 0 case obj.R_CALLARM64: t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off)) if t >= 1<<27 || t < -1<<27 { ld.Diag("program too large, call relocation distance = %d", t) } *val |= (t >> 2) & 0x03ffffff return 0 } return -1 }
func adddynrel(s *ld.LSym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_386_PC32: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_386_PC32 relocation for dynamic symbol %s", targ.Name) } if targ.Type == 0 || targ.Type == obj.SXREF { ld.Diag("unknown symbol %s in pcrel", targ.Name) } r.Type = obj.R_PCREL r.Add += 4 return case 256 + ld.R_386_PLT32: r.Type = obj.R_PCREL r.Add += 4 if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add += int64(targ.Plt) } return case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X: if targ.Type != obj.SDYNIMPORT { // have symbol if r.Off >= 2 && s.P[r.Off-2] == 0x8b { // turn MOVL of GOT entry into LEAL of symbol address, relative to GOT. s.P[r.Off-2] = 0x8d r.Type = obj.R_GOTOFF return } if r.Off >= 2 && s.P[r.Off-2] == 0xff && s.P[r.Off-1] == 0xb3 { // turn PUSHL of GOT entry into PUSHL of symbol itself. // use unnecessary SS prefix to keep instruction same length. s.P[r.Off-2] = 0x36 s.P[r.Off-1] = 0x68 r.Type = obj.R_ADDR return } ld.Diag("unexpected GOT reloc for non-dynamic symbol %s", targ.Name) return } addgotsym(ld.Ctxt, targ) r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil r.Add += int64(targ.Got) return case 256 + ld.R_386_GOTOFF: r.Type = obj.R_GOTOFF return case 256 + ld.R_386_GOTPC: r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += 4 return case 256 + ld.R_386_32: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_386_32 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 0: r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected reloc for dynamic symbol %s", targ.Name) } return case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 1: if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) r.Type = obj.R_PCREL return } r.Type = obj.R_PCREL return case 512 + ld.MACHO_FAKE_GOTPCREL: if targ.Type != obj.SDYNIMPORT { // have symbol // turn MOVL of GOT entry into LEAL of symbol itself if r.Off < 2 || s.P[r.Off-2] != 0x8b { ld.Diag("unexpected GOT reloc for non-dynamic symbol %s", targ.Name) return } s.P[r.Off-2] = 0x8d r.Type = obj.R_PCREL return } addgotsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += int64(targ.Got) r.Type = obj.R_PCREL return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } switch r.Type { case obj.R_CALL, obj.R_PCREL: addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) return case obj.R_ADDR: if s.Type != obj.SDATA { break } if ld.Iself { ld.Adddynsym(ld.Ctxt, targ) rel := ld.Linklookup(ld.Ctxt, ".rel", 0) ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off)) ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32)) r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil return } if ld.HEADTYPE == obj.Hdarwin && s.Size == PtrSize && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. // Instead, interpret the C declaration // void *_Cvar_stderr = &stderr; // as making _Cvar_stderr the name of a GOT entry // for stderr. This is separate from the usual GOT entry, // just in case the C code assigns to the variable, // and of course it only works for single pointers, // but we only need to support cgo and that's all it needs. ld.Adddynsym(ld.Ctxt, targ) got := ld.Linklookup(ld.Ctxt, ".got", 0) s.Type = got.Type | obj.SSUB s.Outer = got s.Sub = got.Sub got.Sub = s s.Value = got.Size ld.Adduint32(ld.Ctxt, got, 0) ld.Adduint32(ld.Ctxt, ld.Linklookup(ld.Ctxt, ".linkedit.got", 0), uint32(targ.Dynid)) r.Type = 256 // ignore during relocsym return } if ld.HEADTYPE == obj.Hwindows && s.Size == PtrSize { // nothing to do, the relocation will be laid out in pereloc1 return } } ld.Ctxt.Cursym = s ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) }
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { case obj.R_CALLARM: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add if r.Xadd&0x800000 != 0 { r.Xadd |= ^0xffffff } r.Xadd *= 4 for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Diag("missing section for %s", rs.Name) } r.Xsym = rs // ld64 for arm seems to want the symbol table to contain offset // into the section rather than pseudo virtual address that contains // the section load address. // we need to compensate that by removing the instruction's address // from addend. if ld.HEADTYPE == obj.Hdarwin { r.Xadd -= ld.Symaddr(s) + int64(r.Off) } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))) return 0 } return -1 } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 // The following three arch specific relocations are only for generation of // Linux/ARM ELF's PLT entry (3 assembler instruction) case obj.R_PLT0: // add ip, pc, #0xXX00000 if ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got.plt", 0)) < ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0)) { ld.Diag(".got.plt should be placed after .plt section.") } *val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add)) >> 20)) return 0 case obj.R_PLT1: // add ip, ip, #0xYY000 *val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)) return 0 case obj.R_PLT2: // ldr pc, [ip, #0xZZZ]! *val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add+8))) return 0 case obj.R_CALLARM: // bl XXXXXX or b YYYYYY *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32((ld.Symaddr(r.Sym)+int64((uint32(r.Add))*4)-(s.Value+int64(r.Off)))/4)))) return 0 } return -1 }
func machoreloc1(r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym // ld64 has a bug handling MACHO_ARM64_RELOC_UNSIGNED with !extern relocation. // see cmd/internal/ld/data.go for details. The workarond is that don't use !extern // UNSIGNED relocation at all. if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM64 || r.Type == obj.R_ADDRARM64 || r.Type == obj.R_ADDR { if rs.Dynid < 0 { ld.Diag("reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Diag("reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28 case obj.R_CALLARM64: if r.Xadd != 0 { ld.Diag("ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28 case obj.R_ADDRARM64: r.Siz = 4 // Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21 // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. if r.Xadd != 0 { ld.Thearch.Lput(uint32(sectoff + 4)) ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } ld.Thearch.Lput(uint32(sectoff + 4)) ld.Thearch.Lput(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) if r.Xadd != 0 { ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 }
func adddynrel(s *ld.LSym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_ARM_PLT32: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return case 256 + ld.R_ARM_THM_PC22: // R_ARM_THM_CALL ld.Exitf("R_ARM_THM_CALL, are you using -marm?") return case 256 + ld.R_ARM_GOT32: // R_ARM_GOT_BREL if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ld.Ctxt, targ) } else { addgotsym(ld.Ctxt, targ) } r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil r.Add += int64(targ.Got) return case 256 + ld.R_ARM_GOT_PREL: // GOT(nil) + A - nil if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ld.Ctxt, targ) } else { addgotsym(ld.Ctxt, targ) } r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += int64(targ.Got) + 4 return case 256 + ld.R_ARM_GOTOFF: // R_ARM_GOTOFF32 r.Type = obj.R_GOTOFF return case 256 + ld.R_ARM_GOTPC: // R_ARM_BASE_PREL r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += 4 return case 256 + ld.R_ARM_CALL: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return case 256 + ld.R_ARM_REL32: // R_ARM_REL32 r.Type = obj.R_PCREL r.Add += 4 return case 256 + ld.R_ARM_ABS32: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_ARM_ABS32 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return // we can just ignore this, because we are targeting ARM V5+ anyway case 256 + ld.R_ARM_V4BX: if r.Sym != nil { // R_ARM_V4BX is ABS relocation, so this symbol is a dummy symbol, ignore it r.Sym.Type = 0 } r.Sym = nil return case 256 + ld.R_ARM_PC24, 256 + ld.R_ARM_JUMP24: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } switch r.Type { case obj.R_CALLARM: addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) return case obj.R_ADDR: if s.Type != obj.SDATA { break } if ld.Iself { ld.Adddynsym(ld.Ctxt, targ) rel := ld.Linklookup(ld.Ctxt, ".rel", 0) ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off)) ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil return } } ld.Ctxt.Cursym = s ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) }
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { default: return -1 case obj.R_POWER_TLS, obj.R_POWER_TLS_LE, obj.R_POWER_TLS_IE: r.Done = 0 // check Outer is nil, Type is TLSBSS? r.Xadd = r.Add r.Xsym = r.Sym return 0 case obj.R_ADDRPOWER, obj.R_ADDRPOWER_DS, obj.R_ADDRPOWER_TOCREL, obj.R_ADDRPOWER_TOCREL_DS, obj.R_ADDRPOWER_GOT, obj.R_ADDRPOWER_PCREL: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Diag("missing section for %s", rs.Name) } r.Xsym = rs return 0 case obj.R_CALLPOWER: r.Done = 0 r.Xsym = r.Sym r.Xadd = r.Add return 0 } } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 case obj.R_ADDRPOWER, obj.R_ADDRPOWER_DS: return archrelocaddr(r, s, val) case obj.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off)) if t&3 != 0 { ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if int64(int32(t<<6)>>6) != t { // TODO(austin) This can happen if text > 32M. // Add a call trampoline to .text in that case. ld.Ctxt.Diag("relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) } *val |= int64(uint32(t) &^ 0xfc000003) return 0 case obj.R_POWER_TOC: // S + A - .TOC. *val = ld.Symaddr(r.Sym) + r.Add - symtoc(s) return 0 case obj.R_POWER_TLS_LE: // The thread pointer points 0x7000 bytes after the start of the the // thread local storage area as documented in section "3.7.2 TLS // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI // Specification". v := r.Sym.Value - 0x7000 if int64(int16(v)) != v { ld.Diag("TLS offset out of range %d", v) } *val = (*val &^ 0xffff) | (v & 0xffff) return 0 } return -1 }
func genplt() { var s *ld.LSym var stub *ld.LSym var pprevtextp **ld.LSym var r *ld.Reloc var n string var o1 uint32 var i int // The ppc64 ABI PLT has similar concepts to other // architectures, but is laid out quite differently. When we // see an R_PPC64_REL24 relocation to a dynamic symbol // (indicating that the call needs to go through the PLT), we // generate up to three stubs and reserve a PLT slot. // // 1) The call site will be bl x; nop (where the relocation // applies to the bl). We rewrite this to bl x_stub; ld // r2,24(r1). The ld is necessary because x_stub will save // r2 (the TOC pointer) at 24(r1) (the "TOC save slot"). // // 2) We reserve space for a pointer in the .plt section (once // per referenced dynamic function). .plt is a data // section filled solely by the dynamic linker (more like // .plt.got on other architectures). Initially, the // dynamic linker will fill each slot with a pointer to the // corresponding x@plt entry point. // // 3) We generate the "call stub" x_stub (once per dynamic // function/object file pair). This saves the TOC in the // TOC save slot, reads the function pointer from x's .plt // slot and calls it like any other global entry point // (including setting r12 to the function address). // // 4) We generate the "symbol resolver stub" x@plt (once per // dynamic function). This is solely a branch to the glink // resolver stub. // // 5) We generate the glink resolver stub (only once). This // computes which symbol resolver stub we came through and // invokes the dynamic resolver via a pointer provided by // the dynamic linker. This will patch up the .plt slot to // point directly at the function so future calls go // straight from the call stub to the real function, and // then call the function. // NOTE: It's possible we could make ppc64 closer to other // architectures: ppc64's .plt is like .plt.got on other // platforms and ppc64's .glink is like .plt on other // platforms. // Find all R_PPC64_REL24 relocations that reference dynamic // imports. Reserve PLT entries for these symbols and // generate call stubs. The call stubs need to live in .text, // which is why we need to do this pass this early. // // This assumes "case 1" from the ABI, where the caller needs // us to save and restore the TOC pointer. pprevtextp = &ld.Ctxt.Textp for s = *pprevtextp; s != nil; pprevtextp, s = &s.Next, s.Next { for i = range s.R { r = &s.R[i] if r.Type != 256+ld.R_PPC64_REL24 || r.Sym.Type != obj.SDYNIMPORT { continue } // Reserve PLT entry and generate symbol // resolver addpltsym(ld.Ctxt, r.Sym) // Generate call stub n = fmt.Sprintf("%s.%s", s.Name, r.Sym.Name) stub = ld.Linklookup(ld.Ctxt, n, 0) stub.Reachable = stub.Reachable || s.Reachable if stub.Size == 0 { // Need outer to resolve .TOC. stub.Outer = s // Link in to textp before s (we could // do it after, but would have to skip // the subsymbols) *pprevtextp = stub stub.Next = s pprevtextp = &stub.Next gencallstub(1, stub, r.Sym) } // Update the relocation to use the call stub r.Sym = stub // Restore TOC after bl. The compiler put a // nop here for us to overwrite. o1 = 0xe8410018 // ld r2,24(r1) ld.Ctxt.Arch.ByteOrder.PutUint32(s.P[r.Off+4:], o1) } } }
func elfreloc1(r *ld.Reloc, sectoff int64) int { ld.Thearch.Vput(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: return -1 case obj.R_ADDR: switch r.Siz { case 4: ld.Thearch.Vput(ld.R_PPC64_ADDR32 | uint64(elfsym)<<32) case 8: ld.Thearch.Vput(ld.R_PPC64_ADDR64 | uint64(elfsym)<<32) default: return -1 } case obj.R_POWER_TLS: ld.Thearch.Vput(ld.R_PPC64_TLS | uint64(elfsym)<<32) case obj.R_POWER_TLS_LE: ld.Thearch.Vput(ld.R_PPC64_TPREL16 | uint64(elfsym)<<32) case obj.R_POWER_TLS_IE: ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_LO_DS | uint64(elfsym)<<32) case obj.R_ADDRPOWER: ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO | uint64(elfsym)<<32) case obj.R_ADDRPOWER_DS: ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO_DS | uint64(elfsym)<<32) case obj.R_ADDRPOWER_GOT: ld.Thearch.Vput(ld.R_PPC64_GOT16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_GOT16_LO_DS | uint64(elfsym)<<32) case obj.R_ADDRPOWER_PCREL: ld.Thearch.Vput(ld.R_PPC64_REL16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_REL16_LO | uint64(elfsym)<<32) r.Xadd += 4 case obj.R_ADDRPOWER_TOCREL: ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_TOC16_LO | uint64(elfsym)<<32) case obj.R_ADDRPOWER_TOCREL_DS: ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32) ld.Thearch.Vput(uint64(r.Xadd)) ld.Thearch.Vput(uint64(sectoff + 4)) ld.Thearch.Vput(ld.R_PPC64_TOC16_LO_DS | uint64(elfsym)<<32) case obj.R_CALLPOWER: if r.Siz != 4 { return -1 } ld.Thearch.Vput(ld.R_PPC64_REL24 | uint64(elfsym)<<32) } ld.Thearch.Vput(uint64(r.Xadd)) return 0 }
func adddynrel(s *ld.LSym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_PPC64_REL24: r.Type = obj.R_CALLPOWER // This is a local call, so the caller isn't setting // up r12 and r2 is the same for the caller and // callee. Hence, we need to go to the local entry // point. (If we don't do this, the callee will try // to use r12 to compute r2.) r.Add += int64(r.Sym.Localentry) * 4 if targ.Type == obj.SDYNIMPORT { // Should have been handled in elfsetupplt ld.Diag("unexpected R_PPC64_REL24 for dyn import") } return case 256 + ld.R_PPC_REL32: r.Type = obj.R_PCREL r.Add += 4 if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_PPC_REL32 for dyn import") } return case 256 + ld.R_PPC64_ADDR64: r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { // These happen in .toc sections ld.Adddynsym(ld.Ctxt, targ) rela := ld.Linklookup(ld.Ctxt, ".rela", 0) ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off)) ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64)) ld.Adduint64(ld.Ctxt, rela, uint64(r.Add)) r.Type = 256 // ignore during relocsym } return case 256 + ld.R_PPC64_TOC16: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_LO: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO return case 256 + ld.R_PPC64_TOC16_HA: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_HI: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS | ld.RV_CHECK_OVERFLOW return case 256 + ld.R_PPC64_TOC16_LO_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS return case 256 + ld.R_PPC64_REL16_LO: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_LO r.Add += 2 // Compensate for relocation size of 2 return case 256 + ld.R_PPC64_REL16_HI: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW r.Add += 2 return case 256 + ld.R_PPC64_REL16_HA: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW r.Add += 2 return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } // TODO(austin): Translate our relocations to ELF ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) }