mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-18 15:36:58 +00:00
[ELF] Pass Ctx & to Arch/
This commit is contained in:
parent
37ce3c2208
commit
6d03a69034
@ -91,9 +91,10 @@ private:
|
||||
};
|
||||
|
||||
struct AArch64Relaxer {
|
||||
Ctx &ctx;
|
||||
bool safeToRelaxAdrpLdr = false;
|
||||
|
||||
AArch64Relaxer(ArrayRef<Relocation> relocs);
|
||||
AArch64Relaxer(Ctx &ctx, ArrayRef<Relocation> relocs);
|
||||
bool tryRelaxAdrpAdd(const Relocation &adrpRel, const Relocation &addRel,
|
||||
uint64_t secAddr, uint8_t *buf) const;
|
||||
bool tryRelaxAdrpLdr(const Relocation &adrpRel, const Relocation &ldrRel,
|
||||
@ -750,7 +751,8 @@ void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
|
||||
llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
|
||||
}
|
||||
|
||||
AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {
|
||||
AArch64Relaxer::AArch64Relaxer(Ctx &ctx, ArrayRef<Relocation> relocs)
|
||||
: ctx(ctx) {
|
||||
if (!ctx.arg.relax)
|
||||
return;
|
||||
// Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC
|
||||
@ -909,7 +911,7 @@ void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
|
||||
secAddr += s->outSecOff;
|
||||
else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
|
||||
secAddr += ehIn->getParent()->outSecOff;
|
||||
AArch64Relaxer relaxer(sec.relocs());
|
||||
AArch64Relaxer relaxer(ctx, sec.relocs());
|
||||
for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
|
||||
const Relocation &rel = sec.relocs()[i];
|
||||
uint8_t *loc = buf + rel.offset;
|
||||
@ -1149,13 +1151,13 @@ addTaggedSymbolReferences(InputSectionBase &sec,
|
||||
// Ideally, this isn't a problem, as any TU that imports or exports tagged
|
||||
// symbols should also be built with tagging. But, to handle these cases, we
|
||||
// demote the symbol to be untagged.
|
||||
void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {
|
||||
void elf::createTaggedSymbols(Ctx &ctx) {
|
||||
assert(hasMemtag());
|
||||
|
||||
// First, collect all symbols that are marked as tagged, and count how many
|
||||
// times they're marked as tagged.
|
||||
DenseMap<Symbol *, unsigned> taggedSymbolReferenceCount;
|
||||
for (InputFile* file : files) {
|
||||
for (InputFile *file : ctx.objectFiles) {
|
||||
if (file->kind() != InputFile::ObjKind)
|
||||
continue;
|
||||
for (InputSectionBase *section : file->getSections()) {
|
||||
@ -1171,7 +1173,7 @@ void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {
|
||||
// definitions to a symbol exceeds the amount of times they're marked as
|
||||
// tagged, it means we have an objfile that uses the untagged variant of the
|
||||
// symbol.
|
||||
for (InputFile *file : files) {
|
||||
for (InputFile *file : ctx.objectFiles) {
|
||||
if (file->kind() != InputFile::BinaryKind &&
|
||||
file->kind() != InputFile::ObjKind)
|
||||
continue;
|
||||
|
@ -214,7 +214,7 @@ void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
|
||||
|
||||
// Long form PLT Header that does not have any restrictions on the displacement
|
||||
// of the .plt from the .got.plt.
|
||||
static void writePltHeaderLong(uint8_t *buf) {
|
||||
static void writePltHeaderLong(Ctx &ctx, uint8_t *buf) {
|
||||
write32(buf + 0, 0xe52de004); // str lr, [sp,#-4]!
|
||||
write32(buf + 4, 0xe59fe004); // ldr lr, L2
|
||||
write32(buf + 8, 0xe08fe00e); // L1: add lr, pc, lr
|
||||
@ -280,7 +280,7 @@ void ARM::writePltHeader(uint8_t *buf) const {
|
||||
uint64_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA() - 4;
|
||||
if (!llvm::isUInt<27>(offset)) {
|
||||
// We cannot encode the Offset, use the long form.
|
||||
writePltHeaderLong(buf);
|
||||
writePltHeaderLong(ctx, buf);
|
||||
return;
|
||||
}
|
||||
write32(buf + 0, pltData[0]);
|
||||
@ -1048,7 +1048,7 @@ void elf::sortArmMappingSymbols() {
|
||||
}
|
||||
}
|
||||
|
||||
void elf::addArmInputSectionMappingSymbols() {
|
||||
void elf::addArmInputSectionMappingSymbols(Ctx &ctx) {
|
||||
// Collect mapping symbols for every executable input sections.
|
||||
// The linker generated mapping symbols for all the synthetic
|
||||
// sections are adding into the sectionmap through the function
|
||||
@ -1327,7 +1327,7 @@ private:
|
||||
const std::optional<uint64_t> entAddr;
|
||||
};
|
||||
|
||||
ArmCmseSGSection::ArmCmseSGSection()
|
||||
ArmCmseSGSection::ArmCmseSGSection(Ctx &ctx)
|
||||
: SyntheticSection(llvm::ELF::SHF_ALLOC | llvm::ELF::SHF_EXECINSTR,
|
||||
llvm::ELF::SHT_PROGBITS,
|
||||
/*alignment=*/32, ".gnu.sgstubs") {
|
||||
@ -1440,7 +1440,7 @@ void ArmCmseSGSection::finalizeContents(Ctx &) {
|
||||
// in the executable output by this link.
|
||||
// See Arm® v8-M Security Extensions: Requirements on Development Tools
|
||||
// https://developer.arm.com/documentation/ecm0359818/latest
|
||||
template <typename ELFT> void elf::writeARMCmseImportLib() {
|
||||
template <typename ELFT> void elf::writeARMCmseImportLib(Ctx &ctx) {
|
||||
StringTableSection *shstrtab =
|
||||
make<StringTableSection>(".shstrtab", /*dynamic=*/false);
|
||||
StringTableSection *strtab =
|
||||
@ -1538,10 +1538,10 @@ TargetInfo *elf::getARMTargetInfo(Ctx &ctx) {
|
||||
return ⌖
|
||||
}
|
||||
|
||||
template void elf::writeARMCmseImportLib<ELF32LE>();
|
||||
template void elf::writeARMCmseImportLib<ELF32BE>();
|
||||
template void elf::writeARMCmseImportLib<ELF64LE>();
|
||||
template void elf::writeARMCmseImportLib<ELF64BE>();
|
||||
template void elf::writeARMCmseImportLib<ELF32LE>(Ctx &);
|
||||
template void elf::writeARMCmseImportLib<ELF32BE>(Ctx &);
|
||||
template void elf::writeARMCmseImportLib<ELF64LE>(Ctx &);
|
||||
template void elf::writeARMCmseImportLib<ELF64BE>(Ctx &);
|
||||
|
||||
template void ObjFile<ELF32LE>::importCmseSymbols();
|
||||
template void ObjFile<ELF32BE>::importCmseSymbols();
|
||||
|
@ -212,7 +212,7 @@ LoongArch::LoongArch(Ctx &ctx) : TargetInfo(ctx) {
|
||||
ipltEntrySize = 16;
|
||||
}
|
||||
|
||||
static uint32_t getEFlags(const InputFile *f) {
|
||||
static uint32_t getEFlags(Ctx &ctx, const InputFile *f) {
|
||||
if (ctx.arg.is64)
|
||||
return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
|
||||
return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
|
||||
@ -242,7 +242,7 @@ uint32_t LoongArch::calcEFlags() const {
|
||||
continue;
|
||||
|
||||
// Take the first non-zero e_flags as the reference.
|
||||
uint32_t flags = getEFlags(f);
|
||||
uint32_t flags = getEFlags(ctx, f);
|
||||
if (target == 0 && flags != 0) {
|
||||
target = flags;
|
||||
targetFile = f;
|
||||
@ -825,7 +825,7 @@ bool LoongArch::relaxOnce(int pass) const {
|
||||
return false;
|
||||
|
||||
if (pass == 0)
|
||||
initSymbolAnchors();
|
||||
initSymbolAnchors(ctx);
|
||||
|
||||
SmallVector<InputSection *, 0> storage;
|
||||
bool changed = false;
|
||||
|
@ -70,7 +70,7 @@ template <class ELFT> MIPS<ELFT>::MIPS(Ctx &ctx) : TargetInfo(ctx) {
|
||||
}
|
||||
|
||||
template <class ELFT> uint32_t MIPS<ELFT>::calcEFlags() const {
|
||||
return calcMipsEFlags<ELFT>();
|
||||
return calcMipsEFlags<ELFT>(ctx);
|
||||
}
|
||||
|
||||
template <class ELFT>
|
||||
@ -262,14 +262,14 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
|
||||
// Overwrite trap instructions written by Writer::writeTrapInstr.
|
||||
memset(buf, 0, pltHeaderSize);
|
||||
|
||||
write16(buf, isMipsR6() ? 0x7860 : 0x7980); // addiupc v1, (GOTPLT) - .
|
||||
write16(buf, isMipsR6(ctx) ? 0x7860 : 0x7980); // addiupc v1, (GOTPLT) - .
|
||||
write16(buf + 4, 0xff23); // lw $25, 0($3)
|
||||
write16(buf + 8, 0x0535); // subu16 $2, $2, $3
|
||||
write16(buf + 10, 0x2525); // srl16 $2, $2, 2
|
||||
write16(buf + 12, 0x3302); // addiu $24, $2, -2
|
||||
write16(buf + 14, 0xfffe);
|
||||
write16(buf + 16, 0x0dff); // move $15, $31
|
||||
if (isMipsR6()) {
|
||||
if (isMipsR6(ctx)) {
|
||||
write16(buf + 18, 0x0f83); // move $28, $3
|
||||
write16(buf + 20, 0x472b); // jalrc $25
|
||||
write16(buf + 22, 0x0c00); // nop
|
||||
@ -324,7 +324,7 @@ void MIPS<ELFT>::writePlt(uint8_t *buf, const Symbol &sym,
|
||||
// Overwrite trap instructions written by Writer::writeTrapInstr.
|
||||
memset(buf, 0, pltEntrySize);
|
||||
|
||||
if (isMipsR6()) {
|
||||
if (isMipsR6(ctx)) {
|
||||
write16(buf, 0x7840); // addiupc $2, (GOTPLT) - .
|
||||
write16(buf + 4, 0xff22); // lw $25, 0($2)
|
||||
write16(buf + 8, 0x0f02); // move $24, $2
|
||||
@ -341,8 +341,9 @@ void MIPS<ELFT>::writePlt(uint8_t *buf, const Symbol &sym,
|
||||
}
|
||||
|
||||
uint32_t loadInst = ELFT::Is64Bits ? 0xddf90000 : 0x8df90000;
|
||||
uint32_t jrInst = isMipsR6() ? (ctx.arg.zHazardplt ? 0x03200409 : 0x03200009)
|
||||
: (ctx.arg.zHazardplt ? 0x03200408 : 0x03200008);
|
||||
uint32_t jrInst = isMipsR6(ctx)
|
||||
? (ctx.arg.zHazardplt ? 0x03200409 : 0x03200009)
|
||||
: (ctx.arg.zHazardplt ? 0x03200408 : 0x03200008);
|
||||
uint32_t addInst = ELFT::Is64Bits ? 0x65f80000 : 0x25f80000;
|
||||
|
||||
write32(buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry)
|
||||
|
@ -62,7 +62,7 @@ static StringRef getNanName(bool isNan2008) {
|
||||
|
||||
static StringRef getFpName(bool isFp64) { return isFp64 ? "64" : "32"; }
|
||||
|
||||
static void checkFlags(ArrayRef<FileFlags> files) {
|
||||
static void checkFlags(Ctx &ctx, ArrayRef<FileFlags> files) {
|
||||
assert(!files.empty() && "expected non-empty file list");
|
||||
|
||||
uint32_t abi = files[0].flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
|
||||
@ -293,7 +293,7 @@ static uint32_t getArchFlags(ArrayRef<FileFlags> files) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class ELFT> uint32_t elf::calcMipsEFlags() {
|
||||
template <class ELFT> uint32_t elf::calcMipsEFlags(Ctx &ctx) {
|
||||
std::vector<FileFlags> v;
|
||||
for (InputFile *f : ctx.objectFiles)
|
||||
v.push_back({f, cast<ObjFile<ELFT>>(f)->getObj().getHeader().e_flags});
|
||||
@ -305,7 +305,7 @@ template <class ELFT> uint32_t elf::calcMipsEFlags() {
|
||||
return 0;
|
||||
return ctx.arg.mipsN32Abi ? EF_MIPS_ABI2 : EF_MIPS_ABI_O32;
|
||||
}
|
||||
checkFlags(v);
|
||||
checkFlags(ctx, v);
|
||||
return getMiscFlags(v) | getPicFlags(v) | getArchFlags(v);
|
||||
}
|
||||
|
||||
@ -360,13 +360,13 @@ uint8_t elf::getMipsFpAbiFlag(uint8_t oldFlag, uint8_t newFlag,
|
||||
return oldFlag;
|
||||
}
|
||||
|
||||
template <class ELFT> static bool isN32Abi(const InputFile *f) {
|
||||
if (auto *ef = dyn_cast<ELFFileBase>(f))
|
||||
template <class ELFT> static bool isN32Abi(const InputFile &f) {
|
||||
if (auto *ef = dyn_cast<ELFFileBase>(&f))
|
||||
return ef->template getObj<ELFT>().getHeader().e_flags & EF_MIPS_ABI2;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool elf::isMipsN32Abi(const InputFile *f) {
|
||||
bool elf::isMipsN32Abi(Ctx &ctx, const InputFile &f) {
|
||||
switch (ctx.arg.ekind) {
|
||||
case ELF32LEKind:
|
||||
return isN32Abi<ELF32LE>(f);
|
||||
@ -383,12 +383,12 @@ bool elf::isMipsN32Abi(const InputFile *f) {
|
||||
|
||||
bool elf::isMicroMips() { return ctx.arg.eflags & EF_MIPS_MICROMIPS; }
|
||||
|
||||
bool elf::isMipsR6() {
|
||||
bool elf::isMipsR6(Ctx &ctx) {
|
||||
uint32_t arch = ctx.arg.eflags & EF_MIPS_ARCH;
|
||||
return arch == EF_MIPS_ARCH_32R6 || arch == EF_MIPS_ARCH_64R6;
|
||||
}
|
||||
|
||||
template uint32_t elf::calcMipsEFlags<ELF32LE>();
|
||||
template uint32_t elf::calcMipsEFlags<ELF32BE>();
|
||||
template uint32_t elf::calcMipsEFlags<ELF64LE>();
|
||||
template uint32_t elf::calcMipsEFlags<ELF64BE>();
|
||||
template uint32_t elf::calcMipsEFlags<ELF32LE>(Ctx &);
|
||||
template uint32_t elf::calcMipsEFlags<ELF32BE>(Ctx &);
|
||||
template uint32_t elf::calcMipsEFlags<ELF64LE>(Ctx &);
|
||||
template uint32_t elf::calcMipsEFlags<ELF64BE>(Ctx &);
|
||||
|
@ -64,15 +64,15 @@ private:
|
||||
static uint16_t lo(uint32_t v) { return v; }
|
||||
static uint16_t ha(uint32_t v) { return (v + 0x8000) >> 16; }
|
||||
|
||||
static uint32_t readFromHalf16(const uint8_t *loc) {
|
||||
static uint32_t readFromHalf16(Ctx &ctx, const uint8_t *loc) {
|
||||
return read32(ctx.arg.isLE ? loc : loc - 2);
|
||||
}
|
||||
|
||||
static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
|
||||
static void writeFromHalf16(Ctx &ctx, uint8_t *loc, uint32_t insn) {
|
||||
write32(ctx.arg.isLE ? loc : loc - 2, insn);
|
||||
}
|
||||
|
||||
void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
|
||||
void elf::writePPC32GlinkSection(Ctx &ctx, uint8_t *buf, size_t numEntries) {
|
||||
// Create canonical PLT entries for non-PIE code. Compilers don't generate
|
||||
// non-GOT-non-PLT relocations referencing external functions for -fpie/-fPIE.
|
||||
uint32_t glink = ctx.in.plt->getVA(); // VA of .glink
|
||||
@ -412,8 +412,8 @@ void PPC::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
|
||||
switch (rel.type) {
|
||||
case R_PPC_GOT_TLSGD16: {
|
||||
// addi rT, rA, x@got@tlsgd --> lwz rT, x@got@tprel(rA)
|
||||
uint32_t insn = readFromHalf16(loc);
|
||||
writeFromHalf16(loc, 0x80000000 | (insn & 0x03ff0000));
|
||||
uint32_t insn = readFromHalf16(ctx, loc);
|
||||
writeFromHalf16(ctx, loc, 0x80000000 | (insn & 0x03ff0000));
|
||||
relocateNoSym(loc, R_PPC_GOT_TPREL16, val);
|
||||
break;
|
||||
}
|
||||
@ -431,7 +431,7 @@ void PPC::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
|
||||
switch (rel.type) {
|
||||
case R_PPC_GOT_TLSGD16:
|
||||
// addi r3, r31, x@got@tlsgd --> addis r3, r2, x@tprel@ha
|
||||
writeFromHalf16(loc, 0x3c620000 | ha(val));
|
||||
writeFromHalf16(ctx, loc, 0x3c620000 | ha(val));
|
||||
break;
|
||||
case R_PPC_TLSGD:
|
||||
// bl __tls_get_addr(x@tldgd) --> add r3, r3, x@tprel@l
|
||||
@ -447,7 +447,7 @@ void PPC::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
|
||||
switch (rel.type) {
|
||||
case R_PPC_GOT_TLSLD16:
|
||||
// addi r3, rA, x@got@tlsgd --> addis r3, r2, 0
|
||||
writeFromHalf16(loc, 0x3c620000);
|
||||
writeFromHalf16(ctx, loc, 0x3c620000);
|
||||
break;
|
||||
case R_PPC_TLSLD:
|
||||
// r3+x@dtprel computes r3+x-0x8000, while we want it to compute r3+x@tprel
|
||||
@ -471,8 +471,8 @@ void PPC::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
|
||||
switch (rel.type) {
|
||||
case R_PPC_GOT_TPREL16: {
|
||||
// lwz rT, x@got@tprel(rA) --> addis rT, r2, x@tprel@ha
|
||||
uint32_t rt = readFromHalf16(loc) & 0x03e00000;
|
||||
writeFromHalf16(loc, 0x3c020000 | rt | ha(val));
|
||||
uint32_t rt = readFromHalf16(ctx, loc) & 0x03e00000;
|
||||
writeFromHalf16(ctx, loc, 0x3c020000 | rt | ha(val));
|
||||
break;
|
||||
}
|
||||
case R_PPC_TLS: {
|
||||
|
@ -205,7 +205,7 @@ private:
|
||||
};
|
||||
} // namespace
|
||||
|
||||
uint64_t elf::getPPC64TocBase() {
|
||||
uint64_t elf::getPPC64TocBase(Ctx &ctx) {
|
||||
// The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
|
||||
// TOC starts where the first of these sections starts. We always create a
|
||||
// .got when we see a relocation that uses it, so for us the start is always
|
||||
@ -244,12 +244,12 @@ unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
|
||||
void elf::writePrefixedInst(Ctx &ctx, uint8_t *loc, uint64_t insn) {
|
||||
insn = ctx.arg.isLE ? insn << 32 | insn >> 32 : insn;
|
||||
write64(loc, insn);
|
||||
}
|
||||
|
||||
static bool addOptional(StringRef name, uint64_t value,
|
||||
static bool addOptional(Ctx &ctx, StringRef name, uint64_t value,
|
||||
std::vector<Defined *> &defined) {
|
||||
Symbol *sym = ctx.symtab->find(name);
|
||||
if (!sym || sym->isDefined())
|
||||
@ -264,8 +264,8 @@ static bool addOptional(StringRef name, uint64_t value,
|
||||
// If from is 14, write ${prefix}14: firstInsn; ${prefix}15:
|
||||
// firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail
|
||||
// The labels are defined only if they exist in the symbol table.
|
||||
static void writeSequence(MutableArrayRef<uint32_t> buf, const char *prefix,
|
||||
int from, uint32_t firstInsn,
|
||||
static void writeSequence(Ctx &ctx, MutableArrayRef<uint32_t> buf,
|
||||
const char *prefix, int from, uint32_t firstInsn,
|
||||
ArrayRef<uint32_t> tail) {
|
||||
std::vector<Defined *> defined;
|
||||
char name[16];
|
||||
@ -273,7 +273,7 @@ static void writeSequence(MutableArrayRef<uint32_t> buf, const char *prefix,
|
||||
uint32_t *ptr = buf.data();
|
||||
for (int r = from; r < 32; ++r) {
|
||||
format("%s%d", prefix, r).snprint(name, sizeof(name));
|
||||
if (addOptional(name, 4 * (r - from), defined) && defined.size() == 1)
|
||||
if (addOptional(ctx, name, 4 * (r - from), defined) && defined.size() == 1)
|
||||
first = r - from;
|
||||
write32(ptr++, firstInsn + 0x200008 * (r - from));
|
||||
}
|
||||
@ -308,23 +308,23 @@ static void writeSequence(MutableArrayRef<uint32_t> buf, const char *prefix,
|
||||
// avoid long branch thunks. However, we don't consider the advantage
|
||||
// significant enough to complicate our trunk implementation, so we take the
|
||||
// simple approach and synthesize .text sections providing the implementation.
|
||||
void elf::addPPC64SaveRestore() {
|
||||
void elf::addPPC64SaveRestore(Ctx &ctx) {
|
||||
static uint32_t savegpr0[20], restgpr0[21], savegpr1[19], restgpr1[19];
|
||||
constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6;
|
||||
|
||||
// _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ...
|
||||
// Tail: ld 0, 16(1); mtlr 0; blr
|
||||
writeSequence(restgpr0, "_restgpr0_", 14, 0xe9c1ff70,
|
||||
writeSequence(ctx, restgpr0, "_restgpr0_", 14, 0xe9c1ff70,
|
||||
{0xe8010010, mtlr_0, blr});
|
||||
// _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ...
|
||||
// Tail: blr
|
||||
writeSequence(restgpr1, "_restgpr1_", 14, 0xe9ccff70, {blr});
|
||||
writeSequence(ctx, restgpr1, "_restgpr1_", 14, 0xe9ccff70, {blr});
|
||||
// _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ...
|
||||
// Tail: std 0, 16(1); blr
|
||||
writeSequence(savegpr0, "_savegpr0_", 14, 0xf9c1ff70, {0xf8010010, blr});
|
||||
writeSequence(ctx, savegpr0, "_savegpr0_", 14, 0xf9c1ff70, {0xf8010010, blr});
|
||||
// _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ...
|
||||
// Tail: blr
|
||||
writeSequence(savegpr1, "_savegpr1_", 14, 0xf9ccff70, {blr});
|
||||
writeSequence(ctx, savegpr1, "_savegpr1_", 14, 0xf9ccff70, {blr});
|
||||
}
|
||||
|
||||
// Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
|
||||
@ -377,7 +377,7 @@ getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
|
||||
// ld/lwa 3, 0(3) # load the value from the address
|
||||
//
|
||||
// Returns true if the relaxation is performed.
|
||||
static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
|
||||
static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel,
|
||||
uint8_t *bufLoc) {
|
||||
assert(ctx.arg.tocOptimize);
|
||||
if (rel.addend < 0)
|
||||
@ -404,7 +404,7 @@ static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
|
||||
assert(!d->isGnuIFunc());
|
||||
|
||||
// Two instructions can materialize a 32-bit signed offset from the toc base.
|
||||
uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase();
|
||||
uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase(ctx);
|
||||
if (!isInt<32>(tocRelative))
|
||||
return false;
|
||||
|
||||
@ -565,15 +565,15 @@ static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
|
||||
// pointer is pointing into the middle of the word we want to extract, and on
|
||||
// little-endian it is pointing to the start of the word. These 2 helpers are to
|
||||
// simplify reading and writing in that context.
|
||||
static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
|
||||
static void writeFromHalf16(Ctx &ctx, uint8_t *loc, uint32_t insn) {
|
||||
write32(ctx.arg.isLE ? loc : loc - 2, insn);
|
||||
}
|
||||
|
||||
static uint32_t readFromHalf16(const uint8_t *loc) {
|
||||
static uint32_t readFromHalf16(Ctx &ctx, const uint8_t *loc) {
|
||||
return read32(ctx.arg.isLE ? loc : loc - 2);
|
||||
}
|
||||
|
||||
static uint64_t readPrefixedInstruction(const uint8_t *loc) {
|
||||
static uint64_t readPrefixedInst(Ctx &ctx, const uint8_t *loc) {
|
||||
uint64_t fullInstr = read64(loc);
|
||||
return ctx.arg.isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
|
||||
}
|
||||
@ -658,24 +658,24 @@ void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
case R_PPC64_TOC16_LO_DS: {
|
||||
// Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
|
||||
// "addi reg, 2, var@toc".
|
||||
uint32_t insn = readFromHalf16(loc);
|
||||
uint32_t insn = readFromHalf16(ctx, loc);
|
||||
if (getPrimaryOpCode(insn) != LD)
|
||||
error("expected a 'ld' for got-indirect to toc-relative relaxing");
|
||||
writeFromHalf16(loc, (insn & 0x03ffffff) | 0x38000000);
|
||||
writeFromHalf16(ctx, loc, (insn & 0x03ffffff) | 0x38000000);
|
||||
relocateNoSym(loc, R_PPC64_TOC16_LO, val);
|
||||
break;
|
||||
}
|
||||
case R_PPC64_GOT_PCREL34: {
|
||||
// Clear the first 8 bits of the prefix and the first 6 bits of the
|
||||
// instruction (the primary opcode).
|
||||
uint64_t insn = readPrefixedInstruction(loc);
|
||||
uint64_t insn = readPrefixedInst(ctx, loc);
|
||||
if ((insn & 0xfc000000) != 0xe4000000)
|
||||
error("expected a 'pld' for got-indirect to pc-relative relaxing");
|
||||
insn &= ~0xff000000fc000000;
|
||||
|
||||
// Replace the cleared bits with the values for PADDI (0x600000038000000);
|
||||
insn |= 0x600000038000000;
|
||||
writePrefixedInstruction(loc, insn);
|
||||
writePrefixedInst(ctx, loc, insn);
|
||||
relocate(loc, rel, val);
|
||||
break;
|
||||
}
|
||||
@ -683,7 +683,7 @@ void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
// We can only relax this if the R_PPC64_GOT_PCREL34 at this offset can
|
||||
// be relaxed. The eligibility for the relaxation needs to be determined
|
||||
// on that relocation since this one does not relocate a symbol.
|
||||
uint64_t insn = readPrefixedInstruction(loc);
|
||||
uint64_t insn = readPrefixedInst(ctx, loc);
|
||||
uint32_t accessInsn = read32(loc + rel.addend);
|
||||
uint64_t pcRelInsn = getPCRelativeForm(accessInsn);
|
||||
|
||||
@ -702,9 +702,9 @@ void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
break; // Displacement doesn't fit.
|
||||
// Convert the PADDI to the prefixed version of accessInsn and convert
|
||||
// accessInsn to a nop.
|
||||
writePrefixedInstruction(loc, pcRelInsn |
|
||||
((totalDisp & 0x3ffff0000) << 16) |
|
||||
(totalDisp & 0xffff));
|
||||
writePrefixedInst(ctx, loc,
|
||||
pcRelInsn | ((totalDisp & 0x3ffff0000) << 16) |
|
||||
(totalDisp & 0xffff));
|
||||
write32(loc + rel.addend, NOP); // nop accessInsn.
|
||||
break;
|
||||
}
|
||||
@ -732,17 +732,17 @@ void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
|
||||
|
||||
switch (rel.type) {
|
||||
case R_PPC64_GOT_TLSGD16_HA:
|
||||
writeFromHalf16(loc, NOP);
|
||||
writeFromHalf16(ctx, loc, NOP);
|
||||
break;
|
||||
case R_PPC64_GOT_TLSGD16:
|
||||
case R_PPC64_GOT_TLSGD16_LO:
|
||||
writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13
|
||||
writeFromHalf16(ctx, loc, 0x3c6d0000); // addis r3, r13
|
||||
relocateNoSym(loc, R_PPC64_TPREL16_HA, val);
|
||||
break;
|
||||
case R_PPC64_GOT_TLSGD_PCREL34:
|
||||
// Relax from paddi r3, 0, x@got@tlsgd@pcrel, 1 to
|
||||
// paddi r3, r13, x@tprel, 0
|
||||
writePrefixedInstruction(loc, 0x06000000386d0000);
|
||||
writePrefixedInst(ctx, loc, 0x06000000386d0000);
|
||||
relocateNoSym(loc, R_PPC64_TPREL34, val);
|
||||
break;
|
||||
case R_PPC64_TLSGD: {
|
||||
@ -795,15 +795,15 @@ void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
|
||||
|
||||
switch (rel.type) {
|
||||
case R_PPC64_GOT_TLSLD16_HA:
|
||||
writeFromHalf16(loc, NOP);
|
||||
writeFromHalf16(ctx, loc, NOP);
|
||||
break;
|
||||
case R_PPC64_GOT_TLSLD16_LO:
|
||||
writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13, 0
|
||||
writeFromHalf16(ctx, loc, 0x3c6d0000); // addis r3, r13, 0
|
||||
break;
|
||||
case R_PPC64_GOT_TLSLD_PCREL34:
|
||||
// Relax from paddi r3, 0, x1@got@tlsld@pcrel, 1 to
|
||||
// paddi r3, r13, 0x1000, 0
|
||||
writePrefixedInstruction(loc, 0x06000000386d1000);
|
||||
writePrefixedInst(ctx, loc, 0x06000000386d1000);
|
||||
break;
|
||||
case R_PPC64_TLSLD: {
|
||||
// PC Relative Relaxation:
|
||||
@ -922,9 +922,9 @@ void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
|
||||
break;
|
||||
}
|
||||
case R_PPC64_GOT_TPREL_PCREL34: {
|
||||
const uint64_t pldRT = readPrefixedInstruction(loc) & 0x0000000003e00000;
|
||||
const uint64_t pldRT = readPrefixedInst(ctx, loc) & 0x0000000003e00000;
|
||||
// paddi RT(from pld), r13, symbol@tprel, 0
|
||||
writePrefixedInstruction(loc, 0x06000000380d0000 | pldRT);
|
||||
writePrefixedInst(ctx, loc, 0x06000000380d0000 | pldRT);
|
||||
relocateNoSym(loc, R_PPC64_TPREL34, val);
|
||||
break;
|
||||
}
|
||||
@ -1133,7 +1133,7 @@ int64_t PPC64::getImplicitAddend(const uint8_t *buf, RelType type) const {
|
||||
}
|
||||
|
||||
void PPC64::writeGotHeader(uint8_t *buf) const {
|
||||
write64(buf, getPPC64TocBase());
|
||||
write64(buf, getPPC64TocBase(ctx));
|
||||
}
|
||||
|
||||
void PPC64::writePltHeader(uint8_t *buf) const {
|
||||
@ -1168,7 +1168,7 @@ void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
|
||||
|
||||
void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
|
||||
uint64_t /*pltEntryAddr*/) const {
|
||||
writePPC64LoadAndBranch(buf, sym.getGotPltVA() - getPPC64TocBase());
|
||||
writePPC64LoadAndBranch(buf, sym.getGotPltVA() - getPPC64TocBase(ctx));
|
||||
}
|
||||
|
||||
static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
|
||||
@ -1284,7 +1284,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
checkInt(loc, val, 16, rel);
|
||||
// DQ-form instructions use bits 28-31 as part of the instruction encoding
|
||||
// DS-form instructions only use bits 30-31.
|
||||
uint16_t mask = isDQFormInstruction(readFromHalf16(loc)) ? 0xf : 0x3;
|
||||
uint16_t mask = isDQFormInstruction(readFromHalf16(ctx, loc)) ? 0xf : 0x3;
|
||||
checkAlignment(loc, lo(val), mask + 1, rel);
|
||||
write16(loc, (read16(loc) & mask) | lo(val));
|
||||
} break;
|
||||
@ -1292,7 +1292,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
case R_PPC64_REL16_HA:
|
||||
case R_PPC64_TPREL16_HA:
|
||||
if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0)
|
||||
writeFromHalf16(loc, NOP);
|
||||
writeFromHalf16(ctx, loc, NOP);
|
||||
else {
|
||||
checkInt(loc, val + 0x8000, 32, rel);
|
||||
write16(loc, ha(val));
|
||||
@ -1330,12 +1330,12 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
// changed into a nop. The lo part then needs to be updated to use the
|
||||
// toc-pointer register r2, as the base register.
|
||||
if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
|
||||
uint32_t insn = readFromHalf16(loc);
|
||||
uint32_t insn = readFromHalf16(ctx, loc);
|
||||
if (isInstructionUpdateForm(insn))
|
||||
error(getErrorLocation(loc) +
|
||||
"can't toc-optimize an update instruction: 0x" +
|
||||
utohexstr(insn));
|
||||
writeFromHalf16(loc, (insn & 0xffe00000) | 0x00020000 | lo(val));
|
||||
writeFromHalf16(ctx, loc, (insn & 0xffe00000) | 0x00020000 | lo(val));
|
||||
} else {
|
||||
write16(loc, lo(val));
|
||||
}
|
||||
@ -1344,7 +1344,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
case R_PPC64_TPREL16_LO_DS: {
|
||||
// DQ-form instructions use bits 28-31 as part of the instruction encoding
|
||||
// DS-form instructions only use bits 30-31.
|
||||
uint32_t insn = readFromHalf16(loc);
|
||||
uint32_t insn = readFromHalf16(ctx, loc);
|
||||
uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
|
||||
checkAlignment(loc, lo(val), mask + 1, rel);
|
||||
if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
|
||||
@ -1356,7 +1356,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
"Can't toc-optimize an update instruction: 0x" +
|
||||
Twine::utohexstr(insn));
|
||||
insn &= 0xffe00000 | mask;
|
||||
writeFromHalf16(loc, insn | 0x00020000 | lo(val));
|
||||
writeFromHalf16(ctx, loc, insn | 0x00020000 | lo(val));
|
||||
} else {
|
||||
write16(loc, (read16(loc) & mask) | lo(val));
|
||||
}
|
||||
@ -1409,9 +1409,9 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
||||
const uint64_t fullMask = 0x0003ffff0000ffff;
|
||||
checkInt(loc, val, 34, rel);
|
||||
|
||||
uint64_t instr = readPrefixedInstruction(loc) & ~fullMask;
|
||||
writePrefixedInstruction(loc, instr | ((val & si0Mask) << 16) |
|
||||
(val & si1Mask));
|
||||
uint64_t instr = readPrefixedInst(ctx, loc) & ~fullMask;
|
||||
writePrefixedInst(ctx, loc,
|
||||
instr | ((val & si0Mask) << 16) | (val & si1Mask));
|
||||
break;
|
||||
}
|
||||
// If we encounter a PCREL_OPT relocation that we won't optimize.
|
||||
@ -1486,7 +1486,7 @@ RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
|
||||
ctx.arg.pcRelOptimize) {
|
||||
// It only makes sense to optimize pld since paddi means that the address
|
||||
// of the object in the GOT is required rather than the object itself.
|
||||
if ((readPrefixedInstruction(loc) & 0xfc000000) == 0xe4000000)
|
||||
if ((readPrefixedInst(ctx, loc) & 0xfc000000) == 0xe4000000)
|
||||
return R_PPC64_RELAX_GOT_PC;
|
||||
}
|
||||
return R_GOT_PC;
|
||||
@ -1521,15 +1521,15 @@ void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
|
||||
case R_PPC64_GOT_TLSGD16_LO: {
|
||||
// Relax from addi r3, rA, sym@got@tlsgd@l to
|
||||
// ld r3, sym@got@tprel@l(rA)
|
||||
uint32_t ra = (readFromHalf16(loc) & (0x1f << 16));
|
||||
writeFromHalf16(loc, 0xe8600000 | ra);
|
||||
uint32_t ra = (readFromHalf16(ctx, loc) & (0x1f << 16));
|
||||
writeFromHalf16(ctx, loc, 0xe8600000 | ra);
|
||||
relocateNoSym(loc, R_PPC64_GOT_TPREL16_LO_DS, val);
|
||||
return;
|
||||
}
|
||||
case R_PPC64_GOT_TLSGD_PCREL34: {
|
||||
// Relax from paddi r3, 0, sym@got@tlsgd@pcrel, 1 to
|
||||
// pld r3, sym@got@tprel@pcrel
|
||||
writePrefixedInstruction(loc, 0x04100000e4600000);
|
||||
writePrefixedInst(ctx, loc, 0x04100000e4600000);
|
||||
relocateNoSym(loc, R_PPC64_GOT_TPREL_PCREL34, val);
|
||||
return;
|
||||
}
|
||||
@ -1594,7 +1594,7 @@ void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
|
||||
// R_PPC64_TOC16_LO_DS. Don't relax. This loses some relaxation
|
||||
// opportunities but is safe.
|
||||
if (ctx.ppc64noTocRelax.count({rel.sym, rel.addend}) ||
|
||||
!tryRelaxPPC64TocIndirection(rel, loc))
|
||||
!tryRelaxPPC64TocIndirection(ctx, rel, loc))
|
||||
relocate(loc, rel, val);
|
||||
break;
|
||||
case R_PPC64_CALL:
|
||||
|
@ -137,7 +137,7 @@ RISCV::RISCV(Ctx &ctx) : TargetInfo(ctx) {
|
||||
ipltEntrySize = 16;
|
||||
}
|
||||
|
||||
static uint32_t getEFlags(InputFile *f) {
|
||||
static uint32_t getEFlags(Ctx &ctx, InputFile *f) {
|
||||
if (ctx.arg.is64)
|
||||
return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
|
||||
return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
|
||||
@ -149,10 +149,9 @@ uint32_t RISCV::calcEFlags() const {
|
||||
if (ctx.objectFiles.empty())
|
||||
return 0;
|
||||
|
||||
uint32_t target = getEFlags(ctx.objectFiles.front());
|
||||
|
||||
uint32_t target = getEFlags(ctx, ctx.objectFiles.front());
|
||||
for (InputFile *f : ctx.objectFiles) {
|
||||
uint32_t eflags = getEFlags(f);
|
||||
uint32_t eflags = getEFlags(ctx, f);
|
||||
if (eflags & EF_RISCV_RVC)
|
||||
target |= EF_RISCV_RVC;
|
||||
|
||||
@ -547,7 +546,8 @@ static bool relaxable(ArrayRef<Relocation> relocs, size_t i) {
|
||||
return i + 1 != relocs.size() && relocs[i + 1].type == R_RISCV_RELAX;
|
||||
}
|
||||
|
||||
static void tlsdescToIe(uint8_t *loc, const Relocation &rel, uint64_t val) {
|
||||
static void tlsdescToIe(Ctx &ctx, uint8_t *loc, const Relocation &rel,
|
||||
uint64_t val) {
|
||||
switch (rel.type) {
|
||||
case R_RISCV_TLSDESC_HI20:
|
||||
case R_RISCV_TLSDESC_LOAD_LO12:
|
||||
@ -627,7 +627,7 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
|
||||
isToLe = false;
|
||||
tlsdescRelax = relaxable(relocs, i);
|
||||
if (!tlsdescRelax)
|
||||
tlsdescToIe(loc, rel, val);
|
||||
tlsdescToIe(ctx, loc, rel, val);
|
||||
continue;
|
||||
case R_RELAX_TLS_GD_TO_LE:
|
||||
// See the comment in handleTlsRelocation. For TLSDESC=>IE,
|
||||
@ -652,7 +652,7 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
|
||||
if (isToLe)
|
||||
tlsdescToLe(loc, rel, val);
|
||||
else
|
||||
tlsdescToIe(loc, rel, val);
|
||||
tlsdescToIe(ctx, loc, rel, val);
|
||||
continue;
|
||||
case R_RISCV_LEB128:
|
||||
if (i + 1 < size) {
|
||||
@ -678,7 +678,7 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
|
||||
}
|
||||
}
|
||||
|
||||
void elf::initSymbolAnchors() {
|
||||
void elf::initSymbolAnchors(Ctx &ctx) {
|
||||
SmallVector<InputSection *, 0> storage;
|
||||
for (OutputSection *osec : ctx.outputSections) {
|
||||
if (!(osec->flags & SHF_EXECINSTR))
|
||||
@ -732,9 +732,9 @@ void elf::initSymbolAnchors() {
|
||||
}
|
||||
|
||||
// Relax R_RISCV_CALL/R_RISCV_CALL_PLT auipc+jalr to c.j, c.jal, or jal.
|
||||
static void relaxCall(const InputSection &sec, size_t i, uint64_t loc,
|
||||
static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc,
|
||||
Relocation &r, uint32_t &remove) {
|
||||
const bool rvc = getEFlags(sec.file) & EF_RISCV_RVC;
|
||||
const bool rvc = getEFlags(ctx, sec.file) & EF_RISCV_RVC;
|
||||
const Symbol &sym = *r.sym;
|
||||
const uint64_t insnPair = read64le(sec.content().data() + r.offset);
|
||||
const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
|
||||
@ -787,8 +787,8 @@ static void relaxTlsLe(const InputSection &sec, size_t i, uint64_t loc,
|
||||
}
|
||||
}
|
||||
|
||||
static void relaxHi20Lo12(const InputSection &sec, size_t i, uint64_t loc,
|
||||
Relocation &r, uint32_t &remove) {
|
||||
static void relaxHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i,
|
||||
uint64_t loc, Relocation &r, uint32_t &remove) {
|
||||
const Defined *gp = ctx.sym.riscvGlobalPointer;
|
||||
if (!gp)
|
||||
return;
|
||||
@ -811,7 +811,7 @@ static void relaxHi20Lo12(const InputSection &sec, size_t i, uint64_t loc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool relax(InputSection &sec) {
|
||||
static bool relax(Ctx &ctx, InputSection &sec) {
|
||||
const uint64_t secAddr = sec.getVA();
|
||||
const MutableArrayRef<Relocation> relocs = sec.relocs();
|
||||
auto &aux = *sec.relaxAux;
|
||||
@ -844,7 +844,7 @@ static bool relax(InputSection &sec) {
|
||||
case R_RISCV_CALL:
|
||||
case R_RISCV_CALL_PLT:
|
||||
if (relaxable(relocs, i))
|
||||
relaxCall(sec, i, loc, r, remove);
|
||||
relaxCall(ctx, sec, i, loc, r, remove);
|
||||
break;
|
||||
case R_RISCV_TPREL_HI20:
|
||||
case R_RISCV_TPREL_ADD:
|
||||
@ -857,7 +857,7 @@ static bool relax(InputSection &sec) {
|
||||
case R_RISCV_LO12_I:
|
||||
case R_RISCV_LO12_S:
|
||||
if (relaxable(relocs, i))
|
||||
relaxHi20Lo12(sec, i, loc, r, remove);
|
||||
relaxHi20Lo12(ctx, sec, i, loc, r, remove);
|
||||
break;
|
||||
case R_RISCV_TLSDESC_HI20:
|
||||
// For TLSDESC=>LE, we can use the short form if hi20 is zero.
|
||||
@ -918,7 +918,7 @@ bool RISCV::relaxOnce(int pass) const {
|
||||
return false;
|
||||
|
||||
if (pass == 0)
|
||||
initSymbolAnchors();
|
||||
initSymbolAnchors(ctx);
|
||||
|
||||
SmallVector<InputSection *, 0> storage;
|
||||
bool changed = false;
|
||||
@ -926,7 +926,7 @@ bool RISCV::relaxOnce(int pass) const {
|
||||
if (!(osec->flags & SHF_EXECINSTR))
|
||||
continue;
|
||||
for (InputSection *sec : getInputSections(*osec, storage))
|
||||
changed |= relax(*sec);
|
||||
changed |= relax(ctx, *sec);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
@ -1170,7 +1170,8 @@ static void mergeAtomic(DenseMap<unsigned, unsigned>::iterator it,
|
||||
}
|
||||
|
||||
static RISCVAttributesSection *
|
||||
mergeAttributesSection(const SmallVector<InputSectionBase *, 0> §ions) {
|
||||
mergeAttributesSection(Ctx &ctx,
|
||||
const SmallVector<InputSectionBase *, 0> §ions) {
|
||||
using RISCVAttrs::RISCVAtomicAbiTag;
|
||||
RISCVISAUtils::OrderedExtensionMap exts;
|
||||
const InputSectionBase *firstStackAlign = nullptr;
|
||||
@ -1305,7 +1306,7 @@ void RISCVAttributesSection::writeTo(Ctx &ctx, uint8_t *buf) {
|
||||
}
|
||||
}
|
||||
|
||||
void elf::mergeRISCVAttributesSections(Ctx &) {
|
||||
void elf::mergeRISCVAttributesSections(Ctx &ctx) {
|
||||
// Find the first input SHT_RISCV_ATTRIBUTES; return if not found.
|
||||
size_t place =
|
||||
llvm::find_if(ctx.inputSections,
|
||||
@ -1325,7 +1326,7 @@ void elf::mergeRISCVAttributesSections(Ctx &) {
|
||||
|
||||
// Add the merged section.
|
||||
ctx.inputSections.insert(ctx.inputSections.begin() + place,
|
||||
mergeAttributesSection(sections));
|
||||
mergeAttributesSection(ctx, sections));
|
||||
}
|
||||
|
||||
TargetInfo *elf::getRISCVTargetInfo(Ctx &ctx) {
|
||||
|
@ -2059,7 +2059,7 @@ void LinkerDriver::inferMachineType() {
|
||||
inferred = true;
|
||||
ctx.arg.ekind = f->ekind;
|
||||
ctx.arg.emachine = f->emachine;
|
||||
ctx.arg.mipsN32Abi = ctx.arg.emachine == EM_MIPS && isMipsN32Abi(f);
|
||||
ctx.arg.mipsN32Abi = ctx.arg.emachine == EM_MIPS && isMipsN32Abi(ctx, *f);
|
||||
}
|
||||
ctx.arg.osabi = f->osabi;
|
||||
if (f->osabi != ELFOSABI_NONE)
|
||||
@ -3155,7 +3155,7 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
|
||||
|
||||
if (canHaveMemtagGlobals()) {
|
||||
llvm::TimeTraceScope timeScope("Process memory tagged symbols");
|
||||
createTaggedSymbols(ctx.objectFiles);
|
||||
createTaggedSymbols(ctx);
|
||||
}
|
||||
|
||||
// Create synthesized sections such as .got and .plt. This is called before
|
||||
|
@ -272,7 +272,7 @@ static bool isCompatible(Ctx &ctx, InputFile *file) {
|
||||
if (file->ekind == ctx.arg.ekind && file->emachine == ctx.arg.emachine) {
|
||||
if (ctx.arg.emachine != EM_MIPS)
|
||||
return true;
|
||||
if (isMipsN32Abi(file) == ctx.arg.mipsN32Abi)
|
||||
if (isMipsN32Abi(ctx, *file) == ctx.arg.mipsN32Abi)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -893,7 +893,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
|
||||
return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther);
|
||||
}
|
||||
case R_PPC64_TOCBASE:
|
||||
return getPPC64TocBase() + a;
|
||||
return getPPC64TocBase(ctx) + a;
|
||||
case R_RELAX_GOT_PC:
|
||||
case R_PPC64_RELAX_GOT_PC:
|
||||
return sym.getVA(a) - p;
|
||||
|
@ -1480,7 +1480,7 @@ void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
|
||||
if (LLVM_UNLIKELY(ctx.arg.emachine == EM_MIPS))
|
||||
addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal());
|
||||
else if (ctx.arg.emachine == EM_PPC64 && ctx.arg.isPic && type == R_PPC64_TOC)
|
||||
addend += getPPC64TocBase();
|
||||
addend += getPPC64TocBase(ctx);
|
||||
|
||||
// Ignore R_*_NONE and other marker relocations.
|
||||
if (expr == R_NONE)
|
||||
|
@ -2645,7 +2645,7 @@ PPC32GlinkSection::PPC32GlinkSection() {
|
||||
}
|
||||
|
||||
void PPC32GlinkSection::writeTo(Ctx &ctx, uint8_t *buf) {
|
||||
writePPC32GlinkSection(buf, entries.size());
|
||||
writePPC32GlinkSection(ctx, buf, entries.size());
|
||||
}
|
||||
|
||||
size_t PPC32GlinkSection::getSize(Ctx &ctx) const {
|
||||
@ -4870,7 +4870,7 @@ template <class ELFT> void elf::createSyntheticSections(Ctx &ctx) {
|
||||
}
|
||||
|
||||
if (ctx.arg.emachine == EM_ARM) {
|
||||
ctx.in.armCmseSGSection = std::make_unique<ArmCmseSGSection>();
|
||||
ctx.in.armCmseSGSection = std::make_unique<ArmCmseSGSection>(ctx);
|
||||
add(*ctx.in.armCmseSGSection);
|
||||
}
|
||||
|
||||
|
@ -1311,7 +1311,7 @@ class ArmCmseSGVeneer;
|
||||
|
||||
class ArmCmseSGSection final : public SyntheticSection {
|
||||
public:
|
||||
ArmCmseSGSection();
|
||||
ArmCmseSGSection(Ctx &ctx);
|
||||
bool isNeeded() const override { return !entries.empty(); }
|
||||
size_t getSize(Ctx &) const override;
|
||||
void writeTo(Ctx &, uint8_t *buf) override;
|
||||
|
@ -210,7 +210,7 @@ static inline std::string getErrorLocation(const uint8_t *loc) {
|
||||
|
||||
void processArmCmseSymbols(Ctx &);
|
||||
|
||||
void writePPC32GlinkSection(uint8_t *buf, size_t numEntries);
|
||||
void writePPC32GlinkSection(Ctx &, uint8_t *buf, size_t numEntries);
|
||||
|
||||
unsigned getPPCDFormOp(unsigned secondaryOp);
|
||||
unsigned getPPCDSFormOp(unsigned secondaryOp);
|
||||
@ -227,22 +227,22 @@ unsigned getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther);
|
||||
// Write a prefixed instruction, which is a 4-byte prefix followed by a 4-byte
|
||||
// instruction (regardless of endianness). Therefore, the prefix is always in
|
||||
// lower memory than the instruction.
|
||||
void writePrefixedInstruction(uint8_t *loc, uint64_t insn);
|
||||
void writePrefixedInst(Ctx &, uint8_t *loc, uint64_t insn);
|
||||
|
||||
void addPPC64SaveRestore();
|
||||
uint64_t getPPC64TocBase();
|
||||
void addPPC64SaveRestore(Ctx &);
|
||||
uint64_t getPPC64TocBase(Ctx &ctx);
|
||||
uint64_t getAArch64Page(uint64_t expr);
|
||||
bool isAArch64BTILandingPad(Symbol &s, int64_t a);
|
||||
template <typename ELFT> void writeARMCmseImportLib();
|
||||
template <typename ELFT> void writeARMCmseImportLib(Ctx &);
|
||||
uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type);
|
||||
void riscvFinalizeRelax(int passes);
|
||||
void mergeRISCVAttributesSections(Ctx &);
|
||||
void addArmInputSectionMappingSymbols();
|
||||
void addArmInputSectionMappingSymbols(Ctx &);
|
||||
void addArmSyntheticSectionMappingSymbol(Defined *);
|
||||
void sortArmMappingSymbols();
|
||||
void convertArmInstructionstoBE8(InputSection *sec, uint8_t *buf);
|
||||
void createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files);
|
||||
void initSymbolAnchors();
|
||||
void createTaggedSymbols(Ctx &);
|
||||
void initSymbolAnchors(Ctx &);
|
||||
|
||||
TargetInfo *getTarget(Ctx &);
|
||||
|
||||
|
@ -1231,7 +1231,7 @@ void elf::writePPC64LoadAndBranch(uint8_t *buf, int64_t offset) {
|
||||
}
|
||||
|
||||
void PPC64PltCallStub::writeTo(uint8_t *buf) {
|
||||
int64_t offset = destination.getGotPltVA() - getPPC64TocBase();
|
||||
int64_t offset = destination.getGotPltVA() - getPPC64TocBase(ctx);
|
||||
// Save the TOC pointer to the save-slot reserved in the call frame.
|
||||
write32(buf + 0, 0xf8410018); // std r2,24(r1)
|
||||
writePPC64LoadAndBranch(buf + 4, offset);
|
||||
@ -1257,7 +1257,7 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) {
|
||||
write32(buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b <offset>
|
||||
} else if (isInt<34>(offset)) {
|
||||
int nextInstOffset;
|
||||
uint64_t tocOffset = destination.getVA() - getPPC64TocBase();
|
||||
uint64_t tocOffset = destination.getVA() - getPPC64TocBase(ctx);
|
||||
if (tocOffset >> 16 > 0) {
|
||||
const uint64_t addi = ADDI_R12_TO_R12_NO_DISP | (tocOffset & 0xffff);
|
||||
const uint64_t addis =
|
||||
@ -1276,7 +1276,7 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) {
|
||||
ctx.in.ppc64LongBranchTarget->addEntry(&destination, addend);
|
||||
const int64_t offsetFromTOC =
|
||||
ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
|
||||
getPPC64TocBase();
|
||||
getPPC64TocBase(ctx);
|
||||
writePPC64LoadAndBranch(buf + 4, offsetFromTOC);
|
||||
}
|
||||
}
|
||||
@ -1303,8 +1303,8 @@ void PPC64R12SetupStub::writeTo(uint8_t *buf) {
|
||||
if (ctx.arg.power10Stubs) {
|
||||
const uint64_t imm = (((offset >> 16) & 0x3ffff) << 32) | (offset & 0xffff);
|
||||
// pld 12, func@plt@pcrel or paddi r12, 0, func@pcrel
|
||||
writePrefixedInstruction(
|
||||
buf, (gotPlt ? PLD_R12_NO_DISP : PADDI_R12_NO_DISP) | imm);
|
||||
writePrefixedInst(ctx, buf,
|
||||
(gotPlt ? PLD_R12_NO_DISP : PADDI_R12_NO_DISP) | imm);
|
||||
nextInstOffset = 8;
|
||||
} else {
|
||||
uint32_t off = offset - 8;
|
||||
@ -1338,7 +1338,7 @@ bool PPC64R12SetupStub::isCompatibleWith(const InputSection &isec,
|
||||
void PPC64LongBranchThunk::writeTo(uint8_t *buf) {
|
||||
int64_t offset =
|
||||
ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
|
||||
getPPC64TocBase();
|
||||
getPPC64TocBase(ctx);
|
||||
writePPC64LoadAndBranch(buf, offset);
|
||||
}
|
||||
|
||||
@ -1514,7 +1514,7 @@ static Thunk *addThunkAVR(Ctx &ctx, RelType type, Symbol &s, int64_t a) {
|
||||
}
|
||||
|
||||
static Thunk *addThunkMips(Ctx &ctx, RelType type, Symbol &s) {
|
||||
if ((s.stOther & STO_MIPS_MICROMIPS) && isMipsR6())
|
||||
if ((s.stOther & STO_MIPS_MICROMIPS) && isMipsR6(ctx))
|
||||
return make<MicroMipsR6Thunk>(ctx, s);
|
||||
if (s.stOther & STO_MIPS_MICROMIPS)
|
||||
return make<MicroMipsThunk>(ctx, s);
|
||||
|
@ -186,7 +186,7 @@ void elf::addReservedSymbols(Ctx &ctx) {
|
||||
// support Small Data Area, define it arbitrarily as 0.
|
||||
addOptionalRegular(ctx, "_SDA_BASE_", nullptr, 0, STV_HIDDEN);
|
||||
} else if (ctx.arg.emachine == EM_PPC64) {
|
||||
addPPC64SaveRestore();
|
||||
addPPC64SaveRestore(ctx);
|
||||
}
|
||||
|
||||
// The Power Architecture 64-bit v2 ABI defines a TableOfContents (TOC) which
|
||||
@ -377,7 +377,7 @@ template <class ELFT> void Writer<ELFT>::run() {
|
||||
"': " + toString(std::move(e)));
|
||||
|
||||
if (!ctx.arg.cmseOutputLib.empty())
|
||||
writeARMCmseImportLib<ELFT>();
|
||||
writeARMCmseImportLib<ELFT>(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2082,7 +2082,7 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
|
||||
ctx.script->checkFinalScriptConditions();
|
||||
|
||||
if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8) {
|
||||
addArmInputSectionMappingSymbols();
|
||||
addArmInputSectionMappingSymbols(ctx);
|
||||
sortArmMappingSymbols();
|
||||
}
|
||||
}
|
||||
|
@ -48,14 +48,14 @@ void addReservedSymbols(Ctx &ctx);
|
||||
bool includeInSymtab(const Symbol &b);
|
||||
unsigned getSectionRank(Ctx &, OutputSection &osec);
|
||||
|
||||
template <class ELFT> uint32_t calcMipsEFlags();
|
||||
template <class ELFT> uint32_t calcMipsEFlags(Ctx &);
|
||||
|
||||
uint8_t getMipsFpAbiFlag(uint8_t oldFlag, uint8_t newFlag,
|
||||
llvm::StringRef fileName);
|
||||
|
||||
bool isMipsN32Abi(const InputFile *f);
|
||||
bool isMipsN32Abi(Ctx &, const InputFile &f);
|
||||
bool isMicroMips();
|
||||
bool isMipsR6();
|
||||
bool isMipsR6(Ctx &);
|
||||
|
||||
} // namespace lld::elf
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user