// Convert the direct jump relocation r to refer to a trampoline if the target is too far func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { switch r.Type { case obj.R_CALLARM: // r.Add is the instruction // low 24-bit encodes the target address t := (ld.Symaddr(r.Sym) + int64(signext24(r.Add&0xffffff)*4) - (s.Value + int64(r.Off))) / 4 if t > 0x7fffff || t < -0x800000 || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) { // direct call too far, need to insert trampoline. // look up existing trampolines first. if we found one within the range // of direct call, we can reuse it. otherwise create a new one. offset := (signext24(r.Add&0xffffff) + 2) * 4 var tramp *ld.Symbol for i := 0; ; i++ { name := r.Sym.Name + fmt.Sprintf("%+d-tramp%d", offset, i) tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version)) if tramp.Type == obj.SDYNIMPORT { // don't reuse trampoline defined in other module continue } if tramp.Value == 0 { // either the trampoline does not exist -- we need to create one, // or found one the address which is not assigned -- this will be // laid down immediately after the current function. use this one. break } t = (ld.Symaddr(tramp) - 8 - (s.Value + int64(r.Off))) / 4 if t >= -0x800000 && t < 0x7fffff { // found an existing trampoline that is not too far // we can just use it break } } if tramp.Type == 0 { // trampoline does not exist, create one ctxt.AddTramp(tramp) if ctxt.DynlinkingGo() { if immrot(uint32(offset)) == 0 { ld.Errorf(s, "odd offset in dynlink direct call: %v+%d", r.Sym, offset) } gentrampdyn(tramp, r.Sym, int64(offset)) } else if ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.Buildmode == ld.BuildmodePIE { gentramppic(tramp, r.Sym, int64(offset)) } else { gentramp(tramp, r.Sym, int64(offset)) } } // modify reloc to point to tramp, which will be resolved later r.Sym = tramp r.Add = r.Add&0xff000000 | 0xfffffe // clear the offset embedded in the instruction r.Done = 0 } default: ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type) } }
func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym if rs.Type == obj.SHOSTOBJ { if rs.Dynid < 0 { ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 case obj.R_CALL, obj.R_PCREL: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 }
func archrelocaddr(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(*val >> 32) o2 = uint32(*val) } else { o1 = uint32(*val) o2 = uint32(*val >> 32) } // We are spreading a 31-bit address across two instructions, putting the // high (adjusted) part in the low 16 bits of the first instruction and the // low part in the low 16 bits of the second instruction, or, in the DS case, // bits 15-2 (inclusive) of the address into bits 15-2 of the second // instruction (it is an error in this case if the low 2 bits of the address // are non-zero). t := ld.Symaddr(r.Sym) + r.Add if t < 0 || t >= 1<<31 { ld.Errorf(s, "relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym)) } if t&0x8000 != 0 { t += 0x10000 } switch r.Type { case obj.R_ADDRPOWER: o1 |= (uint32(t) >> 16) & 0xffff o2 |= uint32(t) & 0xffff case obj.R_ADDRPOWER_DS: o1 |= (uint32(t) >> 16) & 0xffff if t&3 != 0 { ld.Errorf(s, "bad DS reloc for %s: %d", s.Name, ld.Symaddr(r.Sym)) } o2 |= uint32(t) & 0xfffc default: return -1 } if ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o1)<<32 | int64(o2) } else { *val = int64(o2)<<32 | int64(o1) } return 0 }
func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym if rs.Dynid < 0 { ld.Errorf(s, "reloc %d to non-coff symbol %s type=%d", r.Type, rs.Name, rs.Type) return false } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(uint32(rs.Dynid)) switch r.Type { default: return false case obj.R_ADDR: if r.Siz == 8 { v = ld.IMAGE_REL_AMD64_ADDR64 } else { v = ld.IMAGE_REL_AMD64_ADDR32 } case obj.R_CALL, obj.R_PCREL: v = ld.IMAGE_REL_AMD64_REL32 } ld.Thearch.Wput(uint16(v)) return true }
func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { switch r.Variant & ld.RV_TYPE_MASK { default: ld.Errorf(s, "unexpected relocation variant %d", r.Variant) return t case ld.RV_NONE: return t case ld.RV_390_DBL: if (t & 1) != 0 { ld.Errorf(s, "%s+%v is not 2-byte aligned", r.Sym.Name, r.Sym.Value) } return t >> 1 } }
func addpltsym(ctxt *ld.Link, s *ld.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rel := ctxt.Syms.Lookup(".rel.plt", 0) if plt.Size == 0 { elfsetupplt(ctxt) } // jmpq *got+size ld.Adduint8(ctxt, plt, 0xff) ld.Adduint8(ctxt, plt, 0x25) ld.Addaddrplus(ctxt, plt, got, got.Size) // add to got: pointer to current pos in plt ld.Addaddrplus(ctxt, got, plt, plt.Size) // pushl $x ld.Adduint8(ctxt, plt, 0x68) ld.Adduint32(ctxt, plt, uint32(rel.Size)) // jmp .plt ld.Adduint8(ctxt, plt, 0xe9) ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4))) // rel ld.Addaddrplus(ctxt, rel, got, got.Size-4) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_JMP_SLOT)) s.Plt = int32(plt.Size - 16) } else if ld.Headtype == obj.Hdarwin { // Same laziness as in 6l. plt := ctxt.Syms.Lookup(".plt", 0) addgotsym(ctxt, s) ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid)) // jmpq *got+size(IP) s.Plt = int32(plt.Size) ld.Adduint8(ctxt, plt, 0xff) ld.Adduint8(ctxt, plt, 0x25) ld.Addaddrplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } }
// Convert the direct jump relocation r to refer to a trampoline if the target is too far func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { switch r.Type { case obj.R_CALLARM: // r.Add is the instruction // low 24-bit encodes the target address t := (ld.Symaddr(r.Sym) + int64(signext24(r.Add&0xffffff)*4) - (s.Value + int64(r.Off))) / 4 if t > 0x7fffff || t < -0x800000 || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) { // direct call too far, need to insert trampoline offset := (signext24(r.Add&0xffffff) + 2) * 4 var tramp *ld.Symbol for i := 0; ; i++ { name := r.Sym.Name + fmt.Sprintf("%+d-tramp%d", offset, i) tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version)) if tramp.Value == 0 { // either the trampoline does not exist -- we need to create one, // or found one the address which is not assigned -- this will be // laid down immediately after the current function. use this one. break } t = (ld.Symaddr(tramp) - 8 - (s.Value + int64(r.Off))) / 4 if t >= -0x800000 && t < 0x7fffff { // found an existing trampoline that is not too far // we can just use it break } } if tramp.Type == 0 { // trampoline does not exist, create one ctxt.AddTramp(tramp) tramp.Size = 12 // 3 instructions tramp.P = make([]byte, tramp.Size) t = ld.Symaddr(r.Sym) + int64(offset) o1 := uint32(0xe5900000 | 11<<12 | 15<<16) // MOVW (R15), R11 // R15 is actual pc + 8 o2 := uint32(0xe12fff10 | 11) // JMP (R11) o3 := uint32(t) // WORD $target ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) } // modify reloc to point to tramp, which will be resolved later r.Sym = tramp r.Add = r.Add&0xff000000 | 0xfffffe // clear the offset embedded in the instruction r.Done = 0 } default: ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type) } }
func addgotsyminternal(ctxt *ld.Link, s *ld.Symbol) { if s.Got >= 0 { return } got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) ld.Addaddrplus(ctxt, got, s, 0) if ld.Iself { } else { ld.Errorf(s, "addgotsyminternal: unsupported binary format") } }
// Return the value of .TOC. for symbol s func symtoc(ctxt *ld.Link, s *ld.Symbol) int64 { var toc *ld.Symbol if s.Outer != nil { toc = ctxt.Syms.ROLookup(".TOC.", int(s.Outer.Version)) } else { toc = ctxt.Syms.ROLookup(".TOC.", int(s.Version)) } if toc == nil { ld.Errorf(s, "TOC-relative relocation in object without .TOC.") return 0 } return toc.Value }
func addgotsym(ctxt *ld.Link, s *ld.Symbol) { if s.Got >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) ld.Adduint32(ctxt, got, 0) if ld.Iself { rel := ctxt.Syms.Lookup(".rel", 0) ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT)) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } }
func addpltsym(ctxt *ld.Link, s *ld.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ctxt.Syms.Lookup(".plt", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) if plt.Size == 0 { elfsetupplt(ctxt) } // Create the glink resolver if necessary glink := ensureglinkresolver(ctxt) // Write symbol resolver stub (just a branch to the // glink resolver stub) r := ld.Addrel(glink) r.Sym = glink r.Off = int32(glink.Size) r.Siz = 4 r.Type = obj.R_CALLPOWER ld.Adduint32(ctxt, glink, 0x48000000) // b .glink // In the ppc64 ABI, the dynamic linker is responsible // for writing the entire PLT. We just need to // reserve 8 bytes for each PLT entry and generate a // JMP_SLOT dynamic relocation for it. // // TODO(austin): ABI v1 is different s.Plt = int32(plt.Size) plt.Size += 8 ld.Addaddrplus(ctxt, rela, plt, int64(s.Plt)) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_PPC64_JMP_SLOT)) ld.Adduint64(ctxt, rela, 0) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } }
func addgotsym(ctxt *ld.Link, s *ld.Symbol) { if s.Got >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) ld.Adduint64(ctxt, got, 0) if ld.Iself { rela := ctxt.Syms.Lookup(".rela", 0) ld.Addaddrplus(ctxt, rela, got, int64(s.Got)) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT)) ld.Adduint64(ctxt, rela, 0) } else if ld.Headtype == obj.Hdarwin { ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(s.Dynid)) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } }
func addpltsym(ctxt *ld.Link, s *ld.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rel := ctxt.Syms.Lookup(".rel.plt", 0) if plt.Size == 0 { elfsetupplt(ctxt) } // .got entry s.Got = int32(got.Size) // In theory, all GOT should point to the first PLT entry, // Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's // dynamic linker won't, so we'd better do it ourselves. ld.Addaddrplus(ctxt, got, plt, 0) // .plt entry, this depends on the .got entry s.Plt = int32(plt.Size) addpltreloc(ctxt, plt, got, s, obj.R_PLT0) // add lr, pc, #0xXX00000 addpltreloc(ctxt, plt, got, s, obj.R_PLT1) // add lr, lr, #0xYY000 addpltreloc(ctxt, plt, got, s, obj.R_PLT2) // ldr pc, [lr, #0xZZZ]! // rel ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_JUMP_SLOT)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } }
func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { default: return -1 case obj.R_ADDRMIPS, obj.R_ADDRMIPSU: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs return 0 case obj.R_ADDRMIPSTLS, obj.R_CALLMIPS, obj.R_JMPMIPS: r.Done = 0 r.Xsym = r.Sym r.Xadd = r.Add return 0 } } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) return 0 case obj.R_ADDRMIPS, obj.R_ADDRMIPSU: t := ld.Symaddr(r.Sym) + r.Add o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) if r.Type == obj.R_ADDRMIPS { *val = int64(o1&0xffff0000 | uint32(t)&0xffff) } else { *val = int64(o1&0xffff0000 | uint32((t+1<<15)>>16)&0xffff) } return 0 case obj.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ld.Symaddr(r.Sym) + r.Add - 0x7000 if t < -32768 || t >= 32678 { ld.Errorf(s, "TLS offset out of range %d", t) } o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) *val = int64(o1&0xffff0000 | uint32(t)&0xffff) return 0 case obj.R_CALLMIPS, obj.R_JMPMIPS: // Low 26 bits = (S + A) >> 2 t := ld.Symaddr(r.Sym) + r.Add o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) *val = int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000) return 0 } return -1 }
func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f asmb\n", obj.Cputime()) } if ld.Iself { ld.Asmbelfsetup() } sect := ld.Segtext.Sect ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for sect = sect.Next; sect != nil; sect = sect.Next { ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } if ld.Segrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", obj.Cputime()) } ld.Cseek(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f datblk\n", obj.Cputime()) } ld.Cseek(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) ld.Cseek(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) /* output symbol table */ ld.Symsize = 0 ld.Lcsize = 0 symo := uint32(0) if !*ld.FlagS { if !ld.Iself { ld.Errorf(nil, "unsupported executable format") } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", obj.Cputime()) } symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) ld.Cseek(int64(symo)) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", obj.Cputime()) } ld.Asmelfsym(ctxt) ld.Cflush() ld.Cwrite(ld.Elfstrdat) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", obj.Cputime()) } if ld.Linkmode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", obj.Cputime()) } ld.Cseek(0) switch ld.Headtype { default: ld.Errorf(nil, "unsupported operating system") case obj.Hlinux: ld.Asmbelf(ctxt, int64(symo)) } ld.Cflush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen) fmt.Printf("symsize=%d\n", ld.Symsize) fmt.Printf("lcsize=%d\n", ld.Lcsize) fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize)) } }
func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { switch r.Variant & ld.RV_TYPE_MASK { default: ld.Errorf(s, "unexpected relocation variant %d", r.Variant) fallthrough case ld.RV_NONE: return t case ld.RV_POWER_LO: if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 24, // ori 26, // xori 28: // andi if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_HA: t += 0x8000 fallthrough // Fallthrough case ld.RV_POWER_HI: t >>= 16 if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = ld.Be32(s.P[r.Off-2:]) } else { o1 = ld.Le32(s.P[r.Off:]) } switch o1 >> 26 { case 25, // oris 27, // xoris 29: // andis if t>>16 != 0 { goto overflow } default: if int64(int16(t)) != t { goto overflow } } } return int64(int16(t)) case ld.RV_POWER_DS: var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(ld.Be16(s.P[r.Off:])) } else { o1 = uint32(ld.Le16(s.P[r.Off:])) } if t&3 != 0 { ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if (r.Variant&ld.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { goto overflow } return int64(o1)&0x3 | int64(int16(t)) } overflow: ld.Errorf(s, "relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) return t }
func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { ld.Errorf(s, "unexpected relocation type %d", r.Type) return false } // Handle relocations found in ELF object files. case 256 + ld.R_390_12, 256 + ld.R_390_GOT12: ld.Errorf(s, "s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type-256) return false case 256 + ld.R_390_8, 256 + ld.R_390_16, 256 + ld.R_390_32, 256 + ld.R_390_64: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_nn relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return true case 256 + ld.R_390_PC16, 256 + ld.R_390_PC32, 256 + ld.R_390_PC64: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_PCnn relocation for dynamic symbol %s", targ.Name) } if targ.Type == 0 || targ.Type == obj.SXREF { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = obj.R_PCREL r.Add += int64(r.Siz) return true case 256 + ld.R_390_GOT16, 256 + ld.R_390_GOT32, 256 + ld.R_390_GOT64: ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return true case 256 + ld.R_390_PLT16DBL, 256 + ld.R_390_PLT32DBL: r.Type = obj.R_PCREL r.Variant = ld.RV_390_DBL r.Add += int64(r.Siz) if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) } return true case 256 + ld.R_390_PLT32, 256 + ld.R_390_PLT64: r.Type = obj.R_PCREL r.Add += int64(r.Siz) if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) } return true case 256 + ld.R_390_COPY: ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false case 256 + ld.R_390_GLOB_DAT: ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false case 256 + ld.R_390_JMP_SLOT: ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false case 256 + ld.R_390_RELATIVE: ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false case 256 + ld.R_390_GOTOFF: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_GOTOFF relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_GOTOFF return true case 256 + ld.R_390_GOTPC: r.Type = obj.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(r.Siz) return true case 256 + ld.R_390_PC16DBL, 256 + ld.R_390_PC32DBL: r.Type = obj.R_PCREL r.Variant = ld.RV_390_DBL r.Add += int64(r.Siz) if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_PCnnDBL relocation for dynamic symbol %s", targ.Name) } return true case 256 + ld.R_390_GOTPCDBL: r.Type = obj.R_PCREL r.Variant = ld.RV_390_DBL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(r.Siz) return true case 256 + ld.R_390_GOTENT: addgotsym(ctxt, targ) r.Type = obj.R_PCREL r.Variant = ld.RV_390_DBL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(targ.Got) r.Add += int64(r.Siz) return true } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return true } return false }
func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_PCREL || r.Type == obj.R_GOTPCREL { if rs.Dynid < 0 { ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_X86_64_RELOC_UNSIGNED << 28 case obj.R_CALL: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_X86_64_RELOC_BRANCH << 28 // NOTE: Only works with 'external' relocation. Forced above. case obj.R_PCREL: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_X86_64_RELOC_SIGNED << 28 case obj.R_GOTPCREL: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_X86_64_RELOC_GOT_LOAD << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 }
func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { default: return -1 case obj.R_POWER_TLS, obj.R_POWER_TLS_LE, obj.R_POWER_TLS_IE: r.Done = 0 // check Outer is nil, Type is TLSBSS? r.Xadd = r.Add r.Xsym = r.Sym return 0 case obj.R_ADDRPOWER, obj.R_ADDRPOWER_DS, obj.R_ADDRPOWER_TOCREL, obj.R_ADDRPOWER_TOCREL_DS, obj.R_ADDRPOWER_GOT, obj.R_ADDRPOWER_PCREL: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs return 0 case obj.R_CALLPOWER: r.Done = 0 r.Xsym = r.Sym r.Xadd = r.Add return 0 } } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) return 0 case obj.R_ADDRPOWER, obj.R_ADDRPOWER_DS: return archrelocaddr(ctxt, r, s, val) case obj.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off)) if t&3 != 0 { ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } if int64(int32(t<<6)>>6) != t { // TODO(austin) This can happen if text > 32M. // Add a call trampoline to .text in that case. ld.Errorf(s, "relocation for %s+%d is too big: %d", r.Sym.Name, r.Off, t) } *val |= int64(uint32(t) &^ 0xfc000003) return 0 case obj.R_POWER_TOC: // S + A - .TOC. *val = ld.Symaddr(r.Sym) + r.Add - symtoc(ctxt, s) return 0 case obj.R_POWER_TLS_LE: // The thread pointer points 0x7000 bytes after the start of the the // thread local storage area as documented in section "3.7.2 TLS // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI // Specification". v := r.Sym.Value - 0x7000 if int64(int16(v)) != v { ld.Errorf(s, "TLS offset out of range %d", v) } *val = (*val &^ 0xffff) | (v & 0xffff) return 0 } return -1 }
func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { ld.Errorf(s, "unexpected relocation type %d", r.Type) return false } // Handle relocations found in ELF object files. case 256 + ld.R_PPC64_REL24: r.Type = obj.R_CALLPOWER // This is a local call, so the caller isn't setting // up r12 and r2 is the same for the caller and // callee. Hence, we need to go to the local entry // point. (If we don't do this, the callee will try // to use r12 to compute r2.) r.Add += int64(r.Sym.Localentry) * 4 if targ.Type == obj.SDYNIMPORT { // Should have been handled in elfsetupplt ld.Errorf(s, "unexpected R_PPC64_REL24 for dyn import") } return true case 256 + ld.R_PPC_REL32: r.Type = obj.R_PCREL r.Add += 4 if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_PPC_REL32 for dyn import") } return true case 256 + ld.R_PPC64_ADDR64: r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { // These happen in .toc sections ld.Adddynsym(ctxt, targ) rela := ctxt.Syms.Lookup(".rela", 0) ld.Addaddrplus(ctxt, rela, s, int64(r.Off)) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64)) ld.Adduint64(ctxt, rela, uint64(r.Add)) r.Type = 256 // ignore during relocsym } return true case 256 + ld.R_PPC64_TOC16: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO | ld.RV_CHECK_OVERFLOW return true case 256 + ld.R_PPC64_TOC16_LO: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_LO return true case 256 + ld.R_PPC64_TOC16_HA: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW return true case 256 + ld.R_PPC64_TOC16_HI: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW return true case 256 + ld.R_PPC64_TOC16_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS | ld.RV_CHECK_OVERFLOW return true case 256 + ld.R_PPC64_TOC16_LO_DS: r.Type = obj.R_POWER_TOC r.Variant = ld.RV_POWER_DS return true case 256 + ld.R_PPC64_REL16_LO: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_LO r.Add += 2 // Compensate for relocation size of 2 return true case 256 + ld.R_PPC64_REL16_HI: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW r.Add += 2 return true case 256 + ld.R_PPC64_REL16_HA: r.Type = obj.R_PCREL r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW r.Add += 2 return true } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return true } // TODO(austin): Translate our relocations to ELF return false }
func addpltsym(ctxt *ld.Link, s *ld.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) if plt.Size == 0 { elfsetupplt(ctxt) } // larl %r1,_GLOBAL_OFFSET_TABLE_+index ld.Adduint8(ctxt, plt, 0xc0) ld.Adduint8(ctxt, plt, 0x10) ld.Addpcrelplus(ctxt, plt, got, got.Size+6) // need variant? // add to got: pointer to current pos in plt ld.Addaddrplus(ctxt, got, plt, plt.Size+8) // weird but correct // lg %r1,0(%r1) ld.Adduint8(ctxt, plt, 0xe3) ld.Adduint8(ctxt, plt, 0x10) ld.Adduint8(ctxt, plt, 0x10) ld.Adduint8(ctxt, plt, 0x00) ld.Adduint8(ctxt, plt, 0x00) ld.Adduint8(ctxt, plt, 0x04) // br %r1 ld.Adduint8(ctxt, plt, 0x07) ld.Adduint8(ctxt, plt, 0xf1) // basr %r1,%r0 ld.Adduint8(ctxt, plt, 0x0d) ld.Adduint8(ctxt, plt, 0x10) // lgf %r1,12(%r1) ld.Adduint8(ctxt, plt, 0xe3) ld.Adduint8(ctxt, plt, 0x10) ld.Adduint8(ctxt, plt, 0x10) ld.Adduint8(ctxt, plt, 0x0c) ld.Adduint8(ctxt, plt, 0x00) ld.Adduint8(ctxt, plt, 0x14) // jg .plt ld.Adduint8(ctxt, plt, 0xc0) ld.Adduint8(ctxt, plt, 0xf4) ld.Adduint32(ctxt, plt, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation //.plt index ld.Adduint32(ctxt, plt, uint32(rela.Size)) // rela size before current entry // rela ld.Addaddrplus(ctxt, rela, got, got.Size-8) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_JMP_SLOT)) ld.Adduint64(ctxt, rela, 0) s.Plt = int32(plt.Size - 32) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } }
func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { ld.Errorf(s, "unexpected relocation type %d", r.Type) return false } // Handle relocations found in ELF object files. case 256 + ld.R_X86_64_PC32: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_X86_64_PC32 relocation for dynamic symbol %s", targ.Name) } if targ.Type == 0 || targ.Type == obj.SXREF { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = obj.R_PCREL r.Add += 4 return true case 256 + ld.R_X86_64_PLT32: r.Type = obj.R_PCREL r.Add += 4 if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) } return true case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX: if targ.Type != obj.SDYNIMPORT { // have symbol if r.Off >= 2 && s.P[r.Off-2] == 0x8b { // turn MOVQ of GOT entry into LEAQ of symbol itself s.P[r.Off-2] = 0x8d r.Type = obj.R_PCREL r.Add += 4 return true } } // fall back to using GOT and hope for the best (CMOV*) // TODO: just needs relocation, no need to put in .dynsym addgotsym(ctxt, targ) r.Type = obj.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += 4 r.Add += int64(targ.Got) return true case 256 + ld.R_X86_64_64: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_X86_64_64 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return true // Handle relocations found in Mach-O object files. case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 0, 512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 0, 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 0: // TODO: What is the difference between all these? r.Type = obj.R_ADDR if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name) } return true case 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1: if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(targ.Plt) r.Type = obj.R_PCREL return true } fallthrough // fall through case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_2*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1: r.Type = obj.R_PCREL if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected pc-relative reloc for dynamic symbol %s", targ.Name) } return true case 512 + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1: if targ.Type != obj.SDYNIMPORT { // have symbol // turn MOVQ of GOT entry into LEAQ of symbol itself if r.Off < 2 || s.P[r.Off-2] != 0x8b { ld.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", targ.Name) return false } s.P[r.Off-2] = 0x8d r.Type = obj.R_PCREL return true } fallthrough // fall through case 512 + ld.MACHO_X86_64_RELOC_GOT*2 + 1: if targ.Type != obj.SDYNIMPORT { ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name) } addgotsym(ctxt, targ) r.Type = obj.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(targ.Got) return true } switch r.Type { case obj.R_CALL, obj.R_PCREL: if targ.Type != obj.SDYNIMPORT { // nothing to do, the relocation will be laid out in reloc return true } if ld.Headtype == obj.Hwindows || ld.Headtype == obj.Hwindowsgui { // nothing to do, the relocation will be laid out in pereloc1 return true } else { // for both ELF and Mach-O addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(targ.Plt) return true } case obj.R_ADDR: if s.Type == obj.STEXT && ld.Iself { if ld.Headtype == obj.Hsolaris { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) return true } // The code is asking for the address of an external // function. We provide it with the address of the // correspondent GOT symbol. addgotsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(targ.Got) return true } // Process dynamic relocations for the data sections. if ld.Buildmode == ld.BuildmodePIE && ld.Linkmode == ld.LinkInternal { // When internally linking, generate dynamic relocations // for all typical R_ADDR relocations. The exception // are those R_ADDR that are created as part of generating // the dynamic relocations and must be resolved statically. // // There are three phases relevant to understanding this: // // dodata() // we are here // address() // symbol address assignment // reloc() // resolution of static R_ADDR relocs // // At this point symbol addresses have not been // assigned yet (as the final size of the .rela section // will affect the addresses), and so we cannot write // the Elf64_Rela.r_offset now. Instead we delay it // until after the 'address' phase of the linker is // complete. We do this via Addaddrplus, which creates // a new R_ADDR relocation which will be resolved in // the 'reloc' phase. // // These synthetic static R_ADDR relocs must be skipped // now, or else we will be caught in an infinite loop // of generating synthetic relocs for our synthetic // relocs. switch s.Name { case ".dynsym", ".rela", ".got.plt", ".dynamic": return false } } else { // Either internally linking a static executable, // in which case we can resolve these relocations // statically in the 'reloc' phase, or externally // linking, in which case the relocation will be // prepared in the 'reloc' phase and passed to the // external linker in the 'asmb' phase. if s.Type != obj.SDATA && s.Type != obj.SRODATA { break } } if ld.Iself { // TODO: We generate a R_X86_64_64 relocation for every R_ADDR, even // though it would be more efficient (for the dynamic linker) if we // generated R_X86_RELATIVE instead. ld.Adddynsym(ctxt, targ) rela := ctxt.Syms.Lookup(".rela", 0) ld.Addaddrplus(ctxt, rela, s, int64(r.Off)) if r.Siz == 8 { ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64)) } else { // TODO: never happens, remove. ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_32)) } ld.Adduint64(ctxt, rela, uint64(r.Add)) r.Type = 256 // ignore during relocsym return true } if ld.Headtype == obj.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. // Instead, interpret the C declaration // void *_Cvar_stderr = &stderr; // as making _Cvar_stderr the name of a GOT entry // for stderr. This is separate from the usual GOT entry, // just in case the C code assigns to the variable, // and of course it only works for single pointers, // but we only need to support cgo and that's all it needs. ld.Adddynsym(ctxt, targ) got := ctxt.Syms.Lookup(".got", 0) s.Type = got.Type | obj.SSUB s.Outer = got s.Sub = got.Sub got.Sub = s s.Value = got.Size ld.Adduint64(ctxt, got, 0) ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(targ.Dynid)) r.Type = 256 // ignore during relocsym return true } if ld.Headtype == obj.Hwindows || ld.Headtype == obj.Hwindowsgui { // nothing to do, the relocation will be laid out in pereloc1 return true } } return false }
// resolve direct jump relocation r in s, and add trampoline if necessary func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off)) switch r.Type { case obj.R_CALLPOWER: // If branch offset is too far then create a trampoline. if int64(int32(t<<6)>>6) != t || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) { var tramp *ld.Symbol for i := 0; ; i++ { // Using r.Add as part of the name is significant in functions like duffzero where the call // target is at some offset within the function. Calls to duff+8 and duff+256 must appear as // distinct trampolines. name := r.Sym.Name if r.Add == 0 { name = name + fmt.Sprintf("-tramp%d", i) } else { name = name + fmt.Sprintf("%+x-tramp%d", r.Add, i) } // Look up the trampoline in case it already exists tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version)) if tramp.Value == 0 { break } t = ld.Symaddr(tramp) + r.Add - (s.Value + int64(r.Off)) // If the offset of the trampoline that has been found is within range, use it. if int64(int32(t<<6)>>6) == t { break } } if tramp.Type == 0 { ctxt.AddTramp(tramp) tramp.Size = 16 // 4 instructions tramp.P = make([]byte, tramp.Size) t = ld.Symaddr(r.Sym) + r.Add f := t & 0xffff0000 o1 := uint32(0x3fe00000 | (f >> 16)) // lis r31,trampaddr hi (r31 is temp reg) f = t & 0xffff o2 := uint32(0x63ff0000 | f) // ori r31,trampaddr lo o3 := uint32(0x7fe903a6) // mtctr o4 := uint32(0x4e800420) // bctr ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4) } r.Sym = tramp r.Add = 0 // This was folded into the trampoline target address r.Done = 0 } default: ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type) } }
func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym // ld64 has a bug handling MACHO_ARM64_RELOC_UNSIGNED with !extern relocation. // see cmd/internal/ld/data.go for details. The workaround is that don't use !extern // UNSIGNED relocation at all. if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM64 || r.Type == obj.R_ADDRARM64 || r.Type == obj.R_ADDR { if rs.Dynid < 0 { ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28 case obj.R_CALLARM64: if r.Xadd != 0 { ld.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28 case obj.R_ADDRARM64: r.Siz = 4 // Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21 // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. if r.Xadd != 0 { ld.Thearch.Lput(uint32(sectoff + 4)) ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } ld.Thearch.Lput(uint32(sectoff + 4)) ld.Thearch.Lput(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) if r.Xadd != 0 { ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 }
func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { case obj.R_CALLARM: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = int64(signext24(r.Add & 0xffffff)) r.Xadd *= 4 for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs // ld64 for arm seems to want the symbol table to contain offset // into the section rather than pseudo virtual address that contains // the section load address. // we need to compensate that by removing the instruction's address // from addend. if ld.Headtype == obj.Hdarwin { r.Xadd -= ld.Symaddr(s) + int64(r.Off) } if r.Xadd/4 > 0x7fffff || r.Xadd/4 < -0x800000 { ld.Errorf(s, "direct call too far %d", r.Xadd/4) } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))) return 0 } return -1 } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) return 0 // The following three arch specific relocations are only for generation of // Linux/ARM ELF's PLT entry (3 assembler instruction) case obj.R_PLT0: // add ip, pc, #0xXX00000 if ld.Symaddr(ctxt.Syms.Lookup(".got.plt", 0)) < ld.Symaddr(ctxt.Syms.Lookup(".plt", 0)) { ld.Errorf(s, ".got.plt should be placed after .plt section.") } *val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add)) >> 20)) return 0 case obj.R_PLT1: // add ip, ip, #0xYY000 *val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)) return 0 case obj.R_PLT2: // ldr pc, [ip, #0xZZZ]! *val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+8))) return 0 case obj.R_CALLARM: // bl XXXXXX or b YYYYYY // r.Add is the instruction // low 24-bit encodes the target address t := (ld.Symaddr(r.Sym) + int64(signext24(r.Add&0xffffff)*4) - (s.Value + int64(r.Off))) / 4 if t > 0x7fffff || t < -0x800000 { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&t))) return 0 } return -1 }
func addpltsym(ctxt *ld.Link, s *ld.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) if plt.Size == 0 { elfsetupplt(ctxt) } // jmpq *got+size(IP) ld.Adduint8(ctxt, plt, 0xff) ld.Adduint8(ctxt, plt, 0x25) ld.Addpcrelplus(ctxt, plt, got, got.Size) // add to got: pointer to current pos in plt ld.Addaddrplus(ctxt, got, plt, plt.Size) // pushq $x ld.Adduint8(ctxt, plt, 0x68) ld.Adduint32(ctxt, plt, uint32((got.Size-24-8)/8)) // jmpq .plt ld.Adduint8(ctxt, plt, 0xe9) ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4))) // rela ld.Addaddrplus(ctxt, rela, got, got.Size-8) ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_JMP_SLOT)) ld.Adduint64(ctxt, rela, 0) s.Plt = int32(plt.Size - 16) } else if ld.Headtype == obj.Hdarwin { // To do lazy symbol lookup right, we're supposed // to tell the dynamic loader which library each // symbol comes from and format the link info // section just so. I'm too lazy (ha!) to do that // so for now we'll just use non-lazy pointers, // which don't need to be told which library to use. // // http://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html // has details about what we're avoiding. addgotsym(ctxt, s) plt := ctxt.Syms.Lookup(".plt", 0) ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid)) // jmpq *got+size(IP) s.Plt = int32(plt.Size) ld.Adduint8(ctxt, plt, 0xff) ld.Adduint8(ctxt, plt, 0x25) ld.Addpcrelplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } }
func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym if r.Type == obj.R_PCREL { if rs.Type == obj.SHOSTOBJ { ld.Errorf(s, "pc-relative relocation of external symbol is not supported") return -1 } if r.Siz != 4 { return -1 } // emit a pair of "scattered" relocations that // resolve to the difference of section addresses of // the symbol and the instruction // this value is added to the field being relocated o1 := uint32(sectoff) o1 |= 1 << 31 // scattered bit o1 |= ld.MACHO_ARM_RELOC_SECTDIFF << 24 o1 |= 2 << 28 // size = 4 o2 := uint32(0) o2 |= 1 << 31 // scattered bit o2 |= ld.MACHO_ARM_RELOC_PAIR << 24 o2 |= 2 << 28 // size = 4 ld.Thearch.Lput(o1) ld.Thearch.Lput(uint32(ld.Symaddr(rs))) ld.Thearch.Lput(o2) ld.Thearch.Lput(uint32(s.Value + int64(r.Off))) return 0 } if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM { if rs.Dynid < 0 { ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 case obj.R_CALLARM: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM_RELOC_BR24 << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 }
func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { ld.Errorf(s, "unexpected relocation type %d", r.Type) return false } // Handle relocations found in ELF object files. case 256 + ld.R_ARM_PLT32: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return true case 256 + ld.R_ARM_THM_PC22: // R_ARM_THM_CALL ld.Exitf("R_ARM_THM_CALL, are you using -marm?") return false case 256 + ld.R_ARM_GOT32: // R_ARM_GOT_BREL if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ctxt, targ) } else { addgotsym(ctxt, targ) } r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil r.Add += int64(targ.Got) return true case 256 + ld.R_ARM_GOT_PREL: // GOT(nil) + A - nil if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ctxt, targ) } else { addgotsym(ctxt, targ) } r.Type = obj.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(targ.Got) + 4 return true case 256 + ld.R_ARM_GOTOFF: // R_ARM_GOTOFF32 r.Type = obj.R_GOTOFF return true case 256 + ld.R_ARM_GOTPC: // R_ARM_BASE_PREL r.Type = obj.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += 4 return true case 256 + ld.R_ARM_CALL: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return true case 256 + ld.R_ARM_REL32: // R_ARM_REL32 r.Type = obj.R_PCREL r.Add += 4 return true case 256 + ld.R_ARM_ABS32: if targ.Type == obj.SDYNIMPORT { ld.Errorf(s, "unexpected R_ARM_ABS32 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return true // we can just ignore this, because we are targeting ARM V5+ anyway case 256 + ld.R_ARM_V4BX: if r.Sym != nil { // R_ARM_V4BX is ABS relocation, so this symbol is a dummy symbol, ignore it r.Sym.Type = 0 } r.Sym = nil return true case 256 + ld.R_ARM_PC24, 256 + ld.R_ARM_JUMP24: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return true } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return true } switch r.Type { case obj.R_CALLARM: addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(targ.Plt) return true case obj.R_ADDR: if s.Type != obj.SDATA { break } if ld.Iself { ld.Adddynsym(ctxt, targ) rel := ctxt.Syms.Lookup(".rel", 0) ld.Addaddrplus(ctxt, rel, s, int64(r.Off)) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil return true } } return false }
func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { default: return -1 case obj.R_ARM64_GOTPCREL: var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(*val >> 32) o2 = uint32(*val) } else { o1 = uint32(*val) o2 = uint32(*val >> 32) } // Any relocation against a function symbol is redirected to // be against a local symbol instead (see putelfsym in // symtab.go) but unfortunately the system linker was buggy // when confronted with a R_AARCH64_ADR_GOT_PAGE relocation // against a local symbol until May 2015 // (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So // we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp; // add + R_ADDRARM64. if !(r.Sym.Version != 0 || (r.Sym.Type&obj.SHIDDEN != 0) || r.Sym.Attr.Local()) && r.Sym.Type == obj.STEXT && ctxt.DynlinkingGo() { if o2&0xffc00000 != 0xf9400000 { ld.Errorf(s, "R_ARM64_GOTPCREL against unexpected instruction %x", o2) } o2 = 0x91000000 | (o2 & 0x000003ff) r.Type = obj.R_ADDRARM64 } if ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o1)<<32 | int64(o2) } else { *val = int64(o2)<<32 | int64(o1) } fallthrough case obj.R_ADDRARM64: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs // Note: ld64 currently has a bug that any non-zero addend for BR26 relocation // will make the linking fail because it thinks the code is not PIC even though // the BR26 relocation should be fully resolved at link time. // That is the reason why the next if block is disabled. When the bug in ld64 // is fixed, we can enable this block and also enable duff's device in cmd/7g. if false && ld.Headtype == obj.Hdarwin { var o0, o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o0 = uint32(*val >> 32) o1 = uint32(*val) } else { o0 = uint32(*val) o1 = uint32(*val >> 32) } // Mach-O wants the addend to be encoded in the instruction // Note that although Mach-O supports ARM64_RELOC_ADDEND, it // can only encode 24-bit of signed addend, but the instructions // supports 33-bit of signed addend, so we always encode the // addend in place. o0 |= (uint32((r.Xadd>>12)&3) << 29) | (uint32((r.Xadd>>12>>2)&0x7ffff) << 5) o1 |= uint32(r.Xadd&0xfff) << 10 r.Xadd = 0 // when laid out, the instruction order must always be o1, o2. if ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o0)<<32 | int64(o1) } else { *val = int64(o1)<<32 | int64(o0) } } return 0 case obj.R_CALLARM64, obj.R_ARM64_TLS_LE, obj.R_ARM64_TLS_IE: r.Done = 0 r.Xsym = r.Sym r.Xadd = r.Add return 0 } } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) return 0 case obj.R_ADDRARM64: t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ld.Errorf(s, "program too large, address relocation distance = %d", t) } var o0, o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o0 = uint32(*val >> 32) o1 = uint32(*val) } else { o0 = uint32(*val) o1 = uint32(*val >> 32) } o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) o1 |= uint32(t&0xfff) << 10 // when laid out, the instruction order must always be o1, o2. if ctxt.Arch.ByteOrder == binary.BigEndian { *val = int64(o0)<<32 | int64(o1) } else { *val = int64(o1)<<32 | int64(o0) } return 0 case obj.R_ARM64_TLS_LE: r.Done = 0 if ld.Headtype != obj.Hlinux { ld.Errorf(s, "TLS reloc on unsupported OS %v", ld.Headtype) } // The TCB is two pointers. This is not documented anywhere, but is // de facto part of the ABI. v := r.Sym.Value + int64(2*ld.SysArch.PtrSize) if v < 0 || v >= 32678 { ld.Errorf(s, "TLS offset out of range %d", v) } *val |= v << 5 return 0 case obj.R_CALLARM64: t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off)) if t >= 1<<27 || t < -1<<27 { ld.Errorf(s, "program too large, call relocation distance = %d", t) } *val |= (t >> 2) & 0x03ffffff return 0 } return -1 }
func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f asmb\n", obj.Cputime()) } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f codeblk\n", obj.Cputime()) } if ld.Iself { ld.Asmbelfsetup() } sect := ld.Segtext.Sect ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) // 0xCC is INT $3 - breakpoint instruction ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC}) for sect = sect.Next; sect != nil; sect = sect.Next { ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } if ld.Segrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", obj.Cputime()) } ld.Cseek(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", obj.Cputime()) } ld.Cseek(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f datblk\n", obj.Cputime()) } ld.Cseek(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) ld.Cseek(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) machlink := int64(0) if ld.Headtype == obj.Hdarwin { machlink = ld.Domacholink(ctxt) } switch ld.Headtype { default: ld.Errorf(nil, "unknown header type %v", ld.Headtype) fallthrough case obj.Hplan9: break case obj.Hdarwin: ld.Flag8 = true /* 64-bit addresses */ case obj.Hlinux, obj.Hfreebsd, obj.Hnetbsd, obj.Hopenbsd, obj.Hdragonfly, obj.Hsolaris: ld.Flag8 = true /* 64-bit addresses */ case obj.Hnacl, obj.Hwindows, obj.Hwindowsgui: break } ld.Symsize = 0 ld.Spsize = 0 ld.Lcsize = 0 symo := int64(0) if !*ld.FlagS { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", obj.Cputime()) } switch ld.Headtype { default: case obj.Hplan9: *ld.FlagS = true symo = int64(ld.Segdata.Fileoff + ld.Segdata.Filelen) case obj.Hdarwin: symo = int64(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink)) case obj.Hlinux, obj.Hfreebsd, obj.Hnetbsd, obj.Hopenbsd, obj.Hdragonfly, obj.Hsolaris, obj.Hnacl: symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = ld.Rnd(symo, int64(*ld.FlagRound)) case obj.Hwindows, obj.Hwindowsgui: symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = ld.Rnd(symo, ld.PEFILEALIGN) } ld.Cseek(symo) switch ld.Headtype { default: if ld.Iself { ld.Cseek(symo) ld.Asmelfsym(ctxt) ld.Cflush() ld.Cwrite(ld.Elfstrdat) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", obj.Cputime()) } if ld.Linkmode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case obj.Hplan9: ld.Asmplan9sym(ctxt) ld.Cflush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { ld.Cput(sym.P[i]) } ld.Cflush() } case obj.Hwindows, obj.Hwindowsgui: if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", obj.Cputime()) } case obj.Hdarwin: if ld.Linkmode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } } if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f headr\n", obj.Cputime()) } ld.Cseek(0) switch ld.Headtype { default: case obj.Hplan9: /* plan9 */ magic := int32(4*26*26 + 7) magic |= 0x00008000 /* fat header */ ld.Lputb(uint32(magic)) /* magic */ ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */ ld.Lputb(uint32(ld.Segdata.Filelen)) ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) ld.Lputb(uint32(ld.Symsize)) /* nsyms */ vl := ld.Entryvalue(ctxt) ld.Lputb(PADDR(uint32(vl))) /* va of entry */ ld.Lputb(uint32(ld.Spsize)) /* sp offsets */ ld.Lputb(uint32(ld.Lcsize)) /* line offsets */ ld.Vputb(uint64(vl)) /* va of entry */ case obj.Hdarwin: ld.Asmbmacho(ctxt) case obj.Hlinux, obj.Hfreebsd, obj.Hnetbsd, obj.Hopenbsd, obj.Hdragonfly, obj.Hsolaris, obj.Hnacl: ld.Asmbelf(ctxt, symo) case obj.Hwindows, obj.Hwindowsgui: ld.Asmbpe(ctxt) } ld.Cflush() }