blob: 991f5523edc94746078c3056912ef936c4684f44 [file] [log] [blame]
// Inferno utils/6l/asm.c
// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/asm.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package amd64
import (
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/ld"
"cmd/link/internal/sym"
"debug/elf"
"log"
)
func PADDR(x uint32) uint32 {
return x &^ 0x80000000
}
func Addcall(ctxt *ld.Link, s *sym.Symbol, t *sym.Symbol) int64 {
s.Attr |= sym.AttrReachable
i := s.Size
s.Size += 4
s.Grow(s.Size)
r := s.AddRel()
r.Sym = t
r.Off = int32(i)
r.Type = objabi.R_CALL
r.Siz = 4
return i + int64(r.Siz)
}
func gentext(ctxt *ld.Link) {
if !ctxt.DynlinkingGo() {
return
}
addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin {
// we're linking a module containing the runtime -> no need for
// an init function
return
}
addmoduledata.Attr |= sym.AttrReachable
initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
initfunc.Type = sym.STEXT
initfunc.Attr |= sym.AttrLocal
initfunc.Attr |= sym.AttrReachable
o := func(op ...uint8) {
for _, op1 := range op {
initfunc.AddUint8(op1)
}
}
// 0000000000000000 <local.dso_init>:
// 0: 48 8d 3d 00 00 00 00 lea 0x0(%rip),%rdi # 7 <local.dso_init+0x7>
// 3: R_X86_64_PC32 runtime.firstmoduledata-0x4
o(0x48, 0x8d, 0x3d)
initfunc.AddPCRelPlus(ctxt.Arch, ctxt.Moduledata, 0)
// 7: e8 00 00 00 00 callq c <local.dso_init+0xc>
// 8: R_X86_64_PLT32 runtime.addmoduledata-0x4
o(0xe8)
Addcall(ctxt, initfunc, addmoduledata)
// c: c3 retq
o(0xc3)
if ctxt.BuildMode == ld.BuildModePlugin {
ctxt.Textp = append(ctxt.Textp, addmoduledata)
}
ctxt.Textp = append(ctxt.Textp, initfunc)
initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
initarray_entry.Attr |= sym.AttrReachable
initarray_entry.Attr |= sym.AttrLocal
initarray_entry.Type = sym.SINITARR
initarray_entry.AddAddr(ctxt.Arch, initfunc)
}
func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool {
targ := r.Sym
switch r.Type {
default:
if r.Type >= objabi.ElfRelocOffset {
ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type))
return false
}
// Handle relocations found in ELF object files.
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PC32):
if targ.Type == sym.SDYNIMPORT {
ld.Errorf(s, "unexpected R_X86_64_PC32 relocation for dynamic symbol %s", targ.Name)
}
// TODO(mwhudson): the test of VisibilityHidden here probably doesn't make
// sense and should be removed when someone has thought about it properly.
if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() {
ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
}
r.Type = objabi.R_PCREL
r.Add += 4
return true
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PC64):
if targ.Type == sym.SDYNIMPORT {
ld.Errorf(s, "unexpected R_X86_64_PC64 relocation for dynamic symbol %s", targ.Name)
}
if targ.Type == 0 || targ.Type == sym.SXREF {
ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
}
r.Type = objabi.R_PCREL
r.Add += 8
return true
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_PLT32):
r.Type = objabi.R_PCREL
r.Add += 4
if targ.Type == sym.SDYNIMPORT {
addpltsym(ctxt, targ)
r.Sym = ctxt.Syms.Lookup(".plt", 0)
r.Add += int64(targ.Plt())
}
return true
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_GOTPCREL),
objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_GOTPCRELX),
objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_REX_GOTPCRELX):
if targ.Type != sym.SDYNIMPORT {
// have symbol
if r.Off >= 2 && s.P[r.Off-2] == 0x8b {
// turn MOVQ of GOT entry into LEAQ of symbol itself
s.P[r.Off-2] = 0x8d
r.Type = objabi.R_PCREL
r.Add += 4
return true
}
}
// fall back to using GOT and hope for the best (CMOV*)
// TODO: just needs relocation, no need to put in .dynsym
addgotsym(ctxt, targ)
r.Type = objabi.R_PCREL
r.Sym = ctxt.Syms.Lookup(".got", 0)
r.Add += 4
r.Add += int64(targ.Got())
return true
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_X86_64_64):
if targ.Type == sym.SDYNIMPORT {
ld.Errorf(s, "unexpected R_X86_64_64 relocation for dynamic symbol %s", targ.Name)
}
r.Type = objabi.R_ADDR
return true
// Handle relocations found in Mach-O object files.
case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 0,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED*2 + 0,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_BRANCH*2 + 0:
// TODO: What is the difference between all these?
r.Type = objabi.R_ADDR
if targ.Type == sym.SDYNIMPORT {
ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name)
}
return true
case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1:
if targ.Type == sym.SDYNIMPORT {
addpltsym(ctxt, targ)
r.Sym = ctxt.Syms.Lookup(".plt", 0)
r.Add = int64(targ.Plt())
r.Type = objabi.R_PCREL
return true
}
fallthrough
case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_2*2 + 1,
objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1:
r.Type = objabi.R_PCREL
if targ.Type == sym.SDYNIMPORT {
ld.Errorf(s, "unexpected pc-relative reloc for dynamic symbol %s", targ.Name)
}
return true
case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1:
if targ.Type != sym.SDYNIMPORT {
// have symbol
// turn MOVQ of GOT entry into LEAQ of symbol itself
if r.Off < 2 || s.P[r.Off-2] != 0x8b {
ld.Errorf(s, "unexpected GOT_LOAD reloc for non-dynamic symbol %s", targ.Name)
return false
}
s.P[r.Off-2] = 0x8d
r.Type = objabi.R_PCREL
return true
}
fallthrough
case objabi.MachoRelocOffset + ld.MACHO_X86_64_RELOC_GOT*2 + 1:
if targ.Type != sym.SDYNIMPORT {
ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name)
}
addgotsym(ctxt, targ)
r.Type = objabi.R_PCREL
r.Sym = ctxt.Syms.Lookup(".got", 0)
r.Add += int64(targ.Got())
return true
}
switch r.Type {
case objabi.R_CALL,
objabi.R_PCREL:
if targ.Type != sym.SDYNIMPORT {
// nothing to do, the relocation will be laid out in reloc
return true
}
if ctxt.LinkMode == ld.LinkExternal {
// External linker will do this relocation.
return true
}
// Internal linking, for both ELF and Mach-O.
// Build a PLT entry and change the relocation target to that entry.
addpltsym(ctxt, targ)
r.Sym = ctxt.Syms.Lookup(".plt", 0)
r.Add = int64(targ.Plt())
return true
case objabi.R_ADDR:
if s.Type == sym.STEXT && ctxt.IsELF {
if ctxt.HeadType == objabi.Hsolaris {
addpltsym(ctxt, targ)
r.Sym = ctxt.Syms.Lookup(".plt", 0)
r.Add += int64(targ.Plt())
return true
}
// The code is asking for the address of an external
// function. We provide it with the address of the
// correspondent GOT symbol.
addgotsym(ctxt, targ)
r.Sym = ctxt.Syms.Lookup(".got", 0)
r.Add += int64(targ.Got())
return true
}
// Process dynamic relocations for the data sections.
if ctxt.BuildMode == ld.BuildModePIE && ctxt.LinkMode == ld.LinkInternal {
// When internally linking, generate dynamic relocations
// for all typical R_ADDR relocations. The exception
// are those R_ADDR that are created as part of generating
// the dynamic relocations and must be resolved statically.
//
// There are three phases relevant to understanding this:
//
// dodata() // we are here
// address() // symbol address assignment
// reloc() // resolution of static R_ADDR relocs
//
// At this point symbol addresses have not been
// assigned yet (as the final size of the .rela section
// will affect the addresses), and so we cannot write
// the Elf64_Rela.r_offset now. Instead we delay it
// until after the 'address' phase of the linker is
// complete. We do this via Addaddrplus, which creates
// a new R_ADDR relocation which will be resolved in
// the 'reloc' phase.
//
// These synthetic static R_ADDR relocs must be skipped
// now, or else we will be caught in an infinite loop
// of generating synthetic relocs for our synthetic
// relocs.
//
// Furthermore, the rela sections contain dynamic
// relocations with R_ADDR relocations on
// Elf64_Rela.r_offset. This field should contain the
// symbol offset as determined by reloc(), not the
// final dynamically linked address as a dynamic
// relocation would provide.
switch s.Name {
case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic":
return false
}
} else {
// Either internally linking a static executable,
// in which case we can resolve these relocations
// statically in the 'reloc' phase, or externally
// linking, in which case the relocation will be
// prepared in the 'reloc' phase and passed to the
// external linker in the 'asmb' phase.
if s.Type != sym.SDATA && s.Type != sym.SRODATA {
break
}
}
if ctxt.IsELF {
// TODO: We generate a R_X86_64_64 relocation for every R_ADDR, even
// though it would be more efficient (for the dynamic linker) if we
// generated R_X86_RELATIVE instead.
ld.Adddynsym(ctxt, targ)
rela := ctxt.Syms.Lookup(".rela", 0)
rela.AddAddrPlus(ctxt.Arch, s, int64(r.Off))
if r.Siz == 8 {
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_X86_64_64)))
} else {
// TODO: never happens, remove.
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_X86_64_32)))
}
rela.AddUint64(ctxt.Arch, uint64(r.Add))
r.Type = objabi.ElfRelocOffset // ignore during relocsym
return true
}
if ctxt.HeadType == objabi.Hdarwin && s.Size == int64(ctxt.Arch.PtrSize) && r.Off == 0 {
// Mach-O relocations are a royal pain to lay out.
// They use a compact stateful bytecode representation
// that is too much bother to deal with.
// Instead, interpret the C declaration
// void *_Cvar_stderr = &stderr;
// as making _Cvar_stderr the name of a GOT entry
// for stderr. This is separate from the usual GOT entry,
// just in case the C code assigns to the variable,
// and of course it only works for single pointers,
// but we only need to support cgo and that's all it needs.
ld.Adddynsym(ctxt, targ)
got := ctxt.Syms.Lookup(".got", 0)
s.Type = got.Type
s.Attr |= sym.AttrSubSymbol
s.Outer = got
s.Sub = got.Sub
got.Sub = s
s.Value = got.Size
got.AddUint64(ctxt.Arch, 0)
ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(targ.Dynid))
r.Type = objabi.ElfRelocOffset // ignore during relocsym
return true
}
}
return false
}
func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool {
ctxt.Out.Write64(uint64(sectoff))
elfsym := r.Xsym.ElfsymForReloc()
switch r.Type {
default:
return false
case objabi.R_ADDR:
if r.Siz == 4 {
ctxt.Out.Write64(uint64(elf.R_X86_64_32) | uint64(elfsym)<<32)
} else if r.Siz == 8 {
ctxt.Out.Write64(uint64(elf.R_X86_64_64) | uint64(elfsym)<<32)
} else {
return false
}
case objabi.R_TLS_LE:
if r.Siz == 4 {
ctxt.Out.Write64(uint64(elf.R_X86_64_TPOFF32) | uint64(elfsym)<<32)
} else {
return false
}
case objabi.R_TLS_IE:
if r.Siz == 4 {
ctxt.Out.Write64(uint64(elf.R_X86_64_GOTTPOFF) | uint64(elfsym)<<32)
} else {
return false
}
case objabi.R_CALL:
if r.Siz == 4 {
if r.Xsym.Type == sym.SDYNIMPORT {
if ctxt.DynlinkingGo() {
ctxt.Out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32)
} else {
ctxt.Out.Write64(uint64(elf.R_X86_64_GOTPCREL) | uint64(elfsym)<<32)
}
} else {
ctxt.Out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32)
}
} else {
return false
}
case objabi.R_PCREL:
if r.Siz == 4 {
if r.Xsym.Type == sym.SDYNIMPORT && r.Xsym.ElfType() == elf.STT_FUNC {
ctxt.Out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32)
} else {
ctxt.Out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32)
}
} else {
return false
}
case objabi.R_GOTPCREL:
if r.Siz == 4 {
ctxt.Out.Write64(uint64(elf.R_X86_64_GOTPCREL) | uint64(elfsym)<<32)
} else {
return false
}
}
ctxt.Out.Write64(uint64(r.Xadd))
return true
}
func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool {
var v uint32
rs := r.Xsym
if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_PCREL || r.Type == objabi.R_GOTPCREL || r.Type == objabi.R_CALL {
if rs.Dynid < 0 {
ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type)
return false
}
v = uint32(rs.Dynid)
v |= 1 << 27 // external relocation
} else {
v = uint32(rs.Sect.Extnum)
if v == 0 {
ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type)
return false
}
}
switch r.Type {
default:
return false
case objabi.R_ADDR:
v |= ld.MACHO_X86_64_RELOC_UNSIGNED << 28
case objabi.R_CALL:
v |= 1 << 24 // pc-relative bit
v |= ld.MACHO_X86_64_RELOC_BRANCH << 28
// NOTE: Only works with 'external' relocation. Forced above.
case objabi.R_PCREL:
v |= 1 << 24 // pc-relative bit
v |= ld.MACHO_X86_64_RELOC_SIGNED << 28
case objabi.R_GOTPCREL:
v |= 1 << 24 // pc-relative bit
v |= ld.MACHO_X86_64_RELOC_GOT_LOAD << 28
}
switch r.Siz {
default:
return false
case 1:
v |= 0 << 25
case 2:
v |= 1 << 25
case 4:
v |= 2 << 25
case 8:
v |= 3 << 25
}
out.Write32(uint32(sectoff))
out.Write32(v)
return true
}
func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool {
var v uint32
rs := r.Xsym
if rs.Dynid < 0 {
ld.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type)
return false
}
out.Write32(uint32(sectoff))
out.Write32(uint32(rs.Dynid))
switch r.Type {
default:
return false
case objabi.R_DWARFSECREF:
v = ld.IMAGE_REL_AMD64_SECREL
case objabi.R_ADDR:
if r.Siz == 8 {
v = ld.IMAGE_REL_AMD64_ADDR64
} else {
v = ld.IMAGE_REL_AMD64_ADDR32
}
case objabi.R_CALL,
objabi.R_PCREL:
v = ld.IMAGE_REL_AMD64_REL32
}
out.Write16(uint16(v))
return true
}
func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) {
return val, false
}
func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 {
log.Fatalf("unexpected relocation variant")
return t
}
func elfsetupplt(ctxt *ld.Link) {
plt := ctxt.Syms.Lookup(".plt", 0)
got := ctxt.Syms.Lookup(".got.plt", 0)
if plt.Size == 0 {
// pushq got+8(IP)
plt.AddUint8(0xff)
plt.AddUint8(0x35)
plt.AddPCRelPlus(ctxt.Arch, got, 8)
// jmpq got+16(IP)
plt.AddUint8(0xff)
plt.AddUint8(0x25)
plt.AddPCRelPlus(ctxt.Arch, got, 16)
// nopl 0(AX)
plt.AddUint32(ctxt.Arch, 0x00401f0f)
// assume got->size == 0 too
got.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".dynamic", 0), 0)
got.AddUint64(ctxt.Arch, 0)
got.AddUint64(ctxt.Arch, 0)
}
}
func addpltsym(ctxt *ld.Link, s *sym.Symbol) {
if s.Plt() >= 0 {
return
}
ld.Adddynsym(ctxt, s)
if ctxt.IsELF {
plt := ctxt.Syms.Lookup(".plt", 0)
got := ctxt.Syms.Lookup(".got.plt", 0)
rela := ctxt.Syms.Lookup(".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt(ctxt)
}
// jmpq *got+size(IP)
plt.AddUint8(0xff)
plt.AddUint8(0x25)
plt.AddPCRelPlus(ctxt.Arch, got, got.Size)
// add to got: pointer to current pos in plt
got.AddAddrPlus(ctxt.Arch, plt, plt.Size)
// pushq $x
plt.AddUint8(0x68)
plt.AddUint32(ctxt.Arch, uint32((got.Size-24-8)/8))
// jmpq .plt
plt.AddUint8(0xe9)
plt.AddUint32(ctxt.Arch, uint32(-(plt.Size + 4)))
// rela
rela.AddAddrPlus(ctxt.Arch, got, got.Size-8)
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_JMP_SLOT)))
rela.AddUint64(ctxt.Arch, 0)
s.SetPlt(int32(plt.Size - 16))
} else if ctxt.HeadType == objabi.Hdarwin {
// To do lazy symbol lookup right, we're supposed
// to tell the dynamic loader which library each
// symbol comes from and format the link info
// section just so. I'm too lazy (ha!) to do that
// so for now we'll just use non-lazy pointers,
// which don't need to be told which library to use.
//
// https://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html
// has details about what we're avoiding.
addgotsym(ctxt, s)
plt := ctxt.Syms.Lookup(".plt", 0)
ctxt.Syms.Lookup(".linkedit.plt", 0).AddUint32(ctxt.Arch, uint32(s.Dynid))
// jmpq *got+size(IP)
s.SetPlt(int32(plt.Size))
plt.AddUint8(0xff)
plt.AddUint8(0x25)
plt.AddPCRelPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got()))
} else {
ld.Errorf(s, "addpltsym: unsupported binary format")
}
}
func addgotsym(ctxt *ld.Link, s *sym.Symbol) {
if s.Got() >= 0 {
return
}
ld.Adddynsym(ctxt, s)
got := ctxt.Syms.Lookup(".got", 0)
s.SetGot(int32(got.Size))
got.AddUint64(ctxt.Arch, 0)
if ctxt.IsELF {
rela := ctxt.Syms.Lookup(".rela", 0)
rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got()))
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_GLOB_DAT)))
rela.AddUint64(ctxt.Arch, 0)
} else if ctxt.HeadType == objabi.Hdarwin {
ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(s.Dynid))
} else {
ld.Errorf(s, "addgotsym: unsupported binary format")
}
}
func asmb(ctxt *ld.Link) {
if ctxt.IsELF {
ld.Asmbelfsetup()
}
sect := ld.Segtext.Sections[0]
ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
// 0xCC is INT $3 - breakpoint instruction
ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC})
for _, sect = range ld.Segtext.Sections[1:] {
ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
}
if ld.Segrodata.Filelen > 0 {
ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff))
ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
}
if ld.Segrelrodata.Filelen > 0 {
ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff))
ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
}
ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff))
ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff))
ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
}
func asmb2(ctxt *ld.Link) {
machlink := int64(0)
if ctxt.HeadType == objabi.Hdarwin {
machlink = ld.Domacholink(ctxt)
}
switch ctxt.HeadType {
default:
ld.Errorf(nil, "unknown header type %v", ctxt.HeadType)
fallthrough
case objabi.Hplan9:
break
case objabi.Hdarwin:
ld.Flag8 = true /* 64-bit addresses */
case objabi.Hlinux,
objabi.Hfreebsd,
objabi.Hnetbsd,
objabi.Hopenbsd,
objabi.Hdragonfly,
objabi.Hsolaris:
ld.Flag8 = true /* 64-bit addresses */
case objabi.Hwindows:
break
}
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
symo := int64(0)
if !*ld.FlagS {
switch ctxt.HeadType {
default:
case objabi.Hplan9:
*ld.FlagS = true
symo = int64(ld.Segdata.Fileoff + ld.Segdata.Filelen)
case objabi.Hdarwin:
symo = int64(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
case objabi.Hlinux,
objabi.Hfreebsd,
objabi.Hnetbsd,
objabi.Hopenbsd,
objabi.Hdragonfly,
objabi.Hsolaris:
symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
symo = ld.Rnd(symo, int64(*ld.FlagRound))
case objabi.Hwindows:
symo = int64(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
symo = ld.Rnd(symo, ld.PEFILEALIGN)
}
ctxt.Out.SeekSet(symo)
switch ctxt.HeadType {
default:
if ctxt.IsELF {
ctxt.Out.SeekSet(symo)
ld.Asmelfsym(ctxt)
ctxt.Out.Flush()
ctxt.Out.Write(ld.Elfstrdat)
if ctxt.LinkMode == ld.LinkExternal {
ld.Elfemitreloc(ctxt)
}
}
case objabi.Hplan9:
ld.Asmplan9sym(ctxt)
ctxt.Out.Flush()
sym := ctxt.Syms.Lookup("pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
ctxt.Out.Write(sym.P)
ctxt.Out.Flush()
}
case objabi.Hwindows:
// Do nothing
case objabi.Hdarwin:
if ctxt.LinkMode == ld.LinkExternal {
ld.Machoemitreloc(ctxt)
}
}
}
ctxt.Out.SeekSet(0)
switch ctxt.HeadType {
default:
case objabi.Hplan9: /* plan9 */
magic := int32(4*26*26 + 7)
magic |= 0x00008000 /* fat header */
ctxt.Out.Write32b(uint32(magic)) /* magic */
ctxt.Out.Write32b(uint32(ld.Segtext.Filelen)) /* sizes */
ctxt.Out.Write32b(uint32(ld.Segdata.Filelen))
ctxt.Out.Write32b(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
ctxt.Out.Write32b(uint32(ld.Symsize)) /* nsyms */
vl := ld.Entryvalue(ctxt)
ctxt.Out.Write32b(PADDR(uint32(vl))) /* va of entry */
ctxt.Out.Write32b(uint32(ld.Spsize)) /* sp offsets */
ctxt.Out.Write32b(uint32(ld.Lcsize)) /* line offsets */
ctxt.Out.Write64b(uint64(vl)) /* va of entry */
case objabi.Hdarwin:
ld.Asmbmacho(ctxt)
case objabi.Hlinux,
objabi.Hfreebsd,
objabi.Hnetbsd,
objabi.Hopenbsd,
objabi.Hdragonfly,
objabi.Hsolaris:
ld.Asmbelf(ctxt, symo)
case objabi.Hwindows:
ld.Asmbpe(ctxt)
}
ctxt.Out.Flush()
}
func tlsIEtoLE(s *sym.Symbol, off, size int) {
// Transform the PC-relative instruction into a constant load.
// That is,
//
// MOVQ X(IP), REG -> MOVQ $Y, REG
//
// To determine the instruction and register, we study the op codes.
// Consult an AMD64 instruction encoding guide to decipher this.
if off < 3 {
log.Fatal("R_X86_64_GOTTPOFF reloc not preceded by MOVQ or ADDQ instruction")
}
op := s.P[off-3 : off]
reg := op[2] >> 3
if op[1] == 0x8b || reg == 4 {
// MOVQ
if op[0] == 0x4c {
op[0] = 0x49
} else if size == 4 && op[0] == 0x44 {
op[0] = 0x41
}
if op[1] == 0x8b {
op[1] = 0xc7
} else {
op[1] = 0x81 // special case for SP
}
op[2] = 0xc0 | reg
} else {
// An alternate op is ADDQ. This is handled by GNU gold,
// but right now is not generated by the Go compiler:
// ADDQ X(IP), REG -> ADDQ $Y, REG
// Consider adding support for it here.
log.Fatalf("expected TLS IE op to be MOVQ, got %v", op)
}
}