2015-09-21 21:38:08 +00:00
|
|
|
//===- OutputSections.cpp -------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2015-09-21 21:38:08 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "OutputSections.h"
|
|
|
|
#include "Config.h"
|
2022-02-23 19:18:24 -08:00
|
|
|
#include "InputFiles.h"
|
2016-11-01 23:09:07 +00:00
|
|
|
#include "LinkerScript.h"
|
2022-02-07 21:53:34 -08:00
|
|
|
#include "Symbols.h"
|
2016-11-05 23:05:47 +00:00
|
|
|
#include "SyntheticSections.h"
|
2015-09-22 18:19:46 +00:00
|
|
|
#include "Target.h"
|
2022-01-30 00:44:19 -08:00
|
|
|
#include "lld/Common/Arrays.h"
|
2017-11-28 20:39:17 +00:00
|
|
|
#include "lld/Common/Memory.h"
|
2017-06-07 03:48:56 +00:00
|
|
|
#include "llvm/BinaryFormat/Dwarf.h"
|
2024-09-20 19:59:39 +02:00
|
|
|
#include "llvm/Config/llvm-config.h" // LLVM_ENABLE_ZLIB, LLVM_ENABLE_ZSTD
|
2022-09-09 10:30:18 -07:00
|
|
|
#include "llvm/Support/Compression.h"
|
2024-08-01 10:22:03 -07:00
|
|
|
#include "llvm/Support/LEB128.h"
|
[Support] Move LLD's parallel algorithm wrappers to support
Essentially takes the lld/Common/Threads.h wrappers and moves them to
the llvm/Support/Paralle.h algorithm header.
The changes are:
- Remove policy parameter, since all clients use `par`.
- Rename the methods to `parallelSort` etc to match LLVM style, since
they are no longer C++17 pstl compatible.
- Move algorithms from llvm::parallel:: to llvm::, since they have
"parallel" in the name and are no longer overloads of the regular
algorithms.
- Add range overloads
- Use the sequential algorithm directly when 1 thread is requested
(skips task grouping)
- Fix the index type of parallelForEachN to size_t. Nobody in LLVM was
using any other parameter, and it made overload resolution hard for
for_each_n(par, 0, foo.size(), ...) because 0 is int, not size_t.
Remove Threads.h and update LLD for that.
This is a prerequisite for parallel public symbol processing in the PDB
library, which is in LLVM.
Reviewed By: MaskRay, aganea
Differential Revision: https://reviews.llvm.org/D79390
2020-05-04 20:03:19 -07:00
|
|
|
#include "llvm/Support/Parallel.h"
|
2022-02-07 21:53:34 -08:00
|
|
|
#include "llvm/Support/Path.h"
|
2020-11-03 14:41:09 +00:00
|
|
|
#include "llvm/Support/TimeProfiler.h"
|
2024-09-15 22:15:02 -07:00
|
|
|
#undef in
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
2023-11-02 08:59:54 -04:00
|
|
|
// Avoid introducing max as a macro from Windows headers.
|
|
|
|
#define NOMINMAX
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
#include <zlib.h>
|
|
|
|
#endif
|
2022-09-21 11:13:02 -07:00
|
|
|
#if LLVM_ENABLE_ZSTD
|
|
|
|
#include <zstd.h>
|
|
|
|
#endif
|
2015-09-21 21:38:08 +00:00
|
|
|
|
|
|
|
using namespace llvm;
|
2016-02-09 21:46:11 +00:00
|
|
|
using namespace llvm::dwarf;
|
2015-09-21 21:38:08 +00:00
|
|
|
using namespace llvm::object;
|
2018-11-14 21:05:20 +00:00
|
|
|
using namespace llvm::support::endian;
|
2015-09-21 21:38:08 +00:00
|
|
|
using namespace llvm::ELF;
|
2020-05-14 22:18:58 -07:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2015-09-21 21:38:08 +00:00
|
|
|
|
2017-02-24 15:07:30 +00:00
|
|
|
uint32_t OutputSection::getPhdrFlags() const {
|
2018-02-27 16:55:25 +00:00
|
|
|
uint32_t ret = 0;
|
2025-02-21 18:01:38 +01:00
|
|
|
bool purecode =
|
|
|
|
(ctx.arg.emachine == EM_ARM && (flags & SHF_ARM_PURECODE)) ||
|
|
|
|
(ctx.arg.emachine == EM_AARCH64 && (flags & SHF_AARCH64_PURECODE));
|
|
|
|
if (!purecode)
|
2018-02-27 16:55:25 +00:00
|
|
|
ret |= PF_R;
|
2016-07-27 14:10:56 +00:00
|
|
|
if (flags & SHF_WRITE)
|
|
|
|
ret |= PF_W;
|
|
|
|
if (flags & SHF_EXECINSTR)
|
|
|
|
ret |= PF_X;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-13 06:50:33 +00:00
|
|
|
template <class ELFT>
|
2017-02-24 15:07:30 +00:00
|
|
|
void OutputSection::writeHeaderTo(typename ELFT::Shdr *shdr) {
|
2016-11-09 01:42:41 +00:00
|
|
|
shdr->sh_entsize = entsize;
|
2022-12-01 16:19:56 +00:00
|
|
|
shdr->sh_addralign = addralign;
|
2016-11-09 01:42:41 +00:00
|
|
|
shdr->sh_type = type;
|
|
|
|
shdr->sh_offset = offset;
|
|
|
|
shdr->sh_flags = flags;
|
|
|
|
shdr->sh_info = info;
|
|
|
|
shdr->sh_link = link;
|
2017-02-08 15:19:03 +00:00
|
|
|
shdr->sh_addr = addr;
|
2016-11-09 01:42:41 +00:00
|
|
|
shdr->sh_size = size;
|
|
|
|
shdr->sh_name = shName;
|
2016-03-13 06:50:33 +00:00
|
|
|
}
|
|
|
|
|
2024-10-11 20:28:58 -07:00
|
|
|
OutputSection::OutputSection(Ctx &ctx, StringRef name, uint32_t type,
|
|
|
|
uint64_t flags)
|
2024-11-23 14:22:24 -08:00
|
|
|
: SectionBase(Output, ctx.internalFile, name, type, flags, /*link=*/0,
|
|
|
|
/*info=*/0, /*addralign=*/1, /*entsize=*/0),
|
2024-10-11 20:28:58 -07:00
|
|
|
ctx(ctx) {}
|
2016-02-25 08:23:37 +00:00
|
|
|
|
2024-11-19 21:59:47 -08:00
|
|
|
uint64_t OutputSection::getLMA() const {
|
|
|
|
return ptLoad ? addr + ptLoad->lmaOffset : addr;
|
|
|
|
}
|
|
|
|
|
2017-10-07 00:43:31 +00:00
|
|
|
// We allow sections of types listed below to merged into a
|
|
|
|
// single progbits section. This is typically done by linker
|
|
|
|
// scripts. Merging nobits and progbits will force disk space
|
|
|
|
// to be allocated for nobits sections. Other ones don't require
|
|
|
|
// any special treatment on top of progbits, so there doesn't
|
|
|
|
// seem to be a harm in merging them.
|
2020-08-12 20:14:00 -07:00
|
|
|
//
|
|
|
|
// NOTE: clang since rL252300 emits SHT_X86_64_UNWIND .eh_frame sections. Allow
|
|
|
|
// them to be merged into SHT_PROGBITS .eh_frame (GNU as .cfi_*).
|
2024-10-03 20:06:58 -07:00
|
|
|
static bool canMergeToProgbits(Ctx &ctx, unsigned type) {
|
2017-10-07 00:43:31 +00:00
|
|
|
return type == SHT_NOBITS || type == SHT_PROGBITS || type == SHT_INIT_ARRAY ||
|
|
|
|
type == SHT_PREINIT_ARRAY || type == SHT_FINI_ARRAY ||
|
2020-08-12 20:14:00 -07:00
|
|
|
type == SHT_NOTE ||
|
2024-09-21 22:46:13 -07:00
|
|
|
(type == SHT_X86_64_UNWIND && ctx.arg.emachine == EM_X86_64);
|
2017-10-07 00:43:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 11:48:31 +00:00
|
|
|
// Record that isec will be placed in the OutputSection. isec does not become
|
|
|
|
// permanent until finalizeInputSections() is called. The function should not be
|
|
|
|
// used after finalizeInputSections() is called. If you need to add an
|
|
|
|
// InputSection post finalizeInputSections(), then you must do the following:
|
|
|
|
//
|
|
|
|
// 1. Find or create an InputSectionDescription to hold InputSection.
|
2020-01-06 10:21:05 -08:00
|
|
|
// 2. Add the InputSection to the InputSectionDescription::sections.
|
2019-09-24 11:48:31 +00:00
|
|
|
// 3. Call commitSection(isec).
|
|
|
|
void OutputSection::recordSection(InputSectionBase *isec) {
|
|
|
|
partition = isec->partition;
|
|
|
|
isec->parent = this;
|
2021-11-25 16:47:07 -08:00
|
|
|
if (commands.empty() || !isa<InputSectionDescription>(commands.back()))
|
|
|
|
commands.push_back(make<InputSectionDescription>(""));
|
|
|
|
auto *isd = cast<InputSectionDescription>(commands.back());
|
2019-09-24 11:48:31 +00:00
|
|
|
isd->sectionBases.push_back(isec);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update fields (type, flags, alignment, etc) according to the InputSection
|
|
|
|
// isec. Also check whether the InputSection flags and type are consistent with
|
|
|
|
// other InputSections.
|
2024-10-11 20:28:58 -07:00
|
|
|
void OutputSection::commitSection(InputSection *isec) {
|
2022-02-17 12:10:58 -08:00
|
|
|
if (LLVM_UNLIKELY(type != isec->type)) {
|
2024-08-01 10:22:03 -07:00
|
|
|
if (!hasInputSections && !typeIsSet) {
|
|
|
|
type = isec->type;
|
|
|
|
} else if (isStaticRelSecType(type) && isStaticRelSecType(isec->type) &&
|
|
|
|
(type == SHT_CREL) != (isec->type == SHT_CREL)) {
|
|
|
|
// Combine mixed SHT_REL[A] and SHT_CREL to SHT_CREL.
|
|
|
|
type = SHT_CREL;
|
|
|
|
if (type == SHT_REL) {
|
|
|
|
if (name.consume_front(".rel"))
|
2024-11-16 22:34:12 -08:00
|
|
|
name = ctx.saver.save(".crel" + name);
|
2024-08-01 10:22:03 -07:00
|
|
|
} else if (name.consume_front(".rela")) {
|
2024-11-16 22:34:12 -08:00
|
|
|
name = ctx.saver.save(".crel" + name);
|
2024-08-01 10:22:03 -07:00
|
|
|
}
|
|
|
|
} else {
|
2024-10-03 20:06:58 -07:00
|
|
|
if (typeIsSet || !canMergeToProgbits(ctx, type) ||
|
|
|
|
!canMergeToProgbits(ctx, isec->type)) {
|
2023-05-23 20:40:22 +00:00
|
|
|
// The (NOLOAD) changes the section type to SHT_NOBITS, the intention is
|
|
|
|
// that the contents at that address is provided by some other means.
|
|
|
|
// Some projects (e.g.
|
|
|
|
// https://github.com/ClangBuiltLinux/linux/issues/1597) rely on the
|
|
|
|
// behavior. Other types get an error.
|
|
|
|
if (type != SHT_NOBITS) {
|
2024-11-06 22:33:51 -08:00
|
|
|
Err(ctx) << "section type mismatch for " << isec->name << "\n>>> "
|
|
|
|
<< isec << ": "
|
|
|
|
<< getELFSectionTypeName(ctx.arg.emachine, isec->type)
|
|
|
|
<< "\n>>> output section " << name << ": "
|
|
|
|
<< getELFSectionTypeName(ctx.arg.emachine, type);
|
2023-05-23 20:40:22 +00:00
|
|
|
}
|
2022-02-17 12:10:58 -08:00
|
|
|
}
|
2022-05-06 07:49:42 -07:00
|
|
|
if (!typeIsSet)
|
|
|
|
type = SHT_PROGBITS;
|
2022-02-17 12:10:58 -08:00
|
|
|
}
|
|
|
|
}
|
2019-06-03 20:14:25 +00:00
|
|
|
if (!hasInputSections) {
|
2017-10-07 00:58:19 +00:00
|
|
|
// If IS is the first section to be added to this section,
|
2019-09-24 11:48:31 +00:00
|
|
|
// initialize type, entsize and flags from isec.
|
2019-06-03 20:14:25 +00:00
|
|
|
hasInputSections = true;
|
2017-11-15 17:35:22 +00:00
|
|
|
entsize = isec->entsize;
|
2018-02-27 16:55:25 +00:00
|
|
|
flags = isec->flags;
|
2017-10-07 00:58:19 +00:00
|
|
|
} else {
|
|
|
|
// Otherwise, check if new type or flags are compatible with existing ones.
|
2020-03-28 23:12:04 -07:00
|
|
|
if ((flags ^ isec->flags) & SHF_TLS)
|
2024-11-06 22:04:52 -08:00
|
|
|
ErrAlways(ctx) << "incompatible section flags for " << name << "\n>>> "
|
2024-11-29 18:37:47 -08:00
|
|
|
<< isec << ": 0x" << utohexstr(isec->flags, true)
|
2024-11-06 22:04:52 -08:00
|
|
|
<< "\n>>> output section " << name << ": 0x"
|
2024-11-29 18:37:47 -08:00
|
|
|
<< utohexstr(flags, true);
|
2017-10-07 00:43:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
isec->parent = this;
|
2025-02-21 18:01:38 +01:00
|
|
|
uint64_t andMask = 0;
|
|
|
|
if (ctx.arg.emachine == EM_ARM)
|
|
|
|
andMask |= (uint64_t)SHF_ARM_PURECODE;
|
|
|
|
if (ctx.arg.emachine == EM_AARCH64)
|
|
|
|
andMask |= (uint64_t)SHF_AARCH64_PURECODE;
|
2018-02-27 16:55:25 +00:00
|
|
|
uint64_t orMask = ~andMask;
|
|
|
|
uint64_t andFlags = (flags & isec->flags) & andMask;
|
|
|
|
uint64_t orFlags = (flags | isec->flags) & orMask;
|
|
|
|
flags = andFlags | orFlags;
|
2019-09-24 11:48:31 +00:00
|
|
|
if (nonAlloc)
|
|
|
|
flags &= ~(uint64_t)SHF_ALLOC;
|
2018-02-27 16:55:25 +00:00
|
|
|
|
2022-12-01 16:19:56 +00:00
|
|
|
addralign = std::max(addralign, isec->addralign);
|
2017-05-08 10:18:12 +00:00
|
|
|
|
2017-03-01 04:04:23 +00:00
|
|
|
// If this section contains a table of fixed-size entries, sh_entsize
|
2017-11-15 17:35:22 +00:00
|
|
|
// holds the element size. If it contains elements of different size we
|
|
|
|
// set sh_entsize to 0.
|
|
|
|
if (entsize != isec->entsize)
|
|
|
|
entsize = 0;
|
2019-09-24 11:48:31 +00:00
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
static MergeSyntheticSection *createMergeSynthetic(Ctx &ctx, StringRef name,
|
2021-12-16 20:50:06 -08:00
|
|
|
uint32_t type,
|
|
|
|
uint64_t flags,
|
2022-12-01 16:19:56 +00:00
|
|
|
uint32_t addralign) {
|
2024-09-21 22:46:13 -07:00
|
|
|
if ((flags & SHF_STRINGS) && ctx.arg.optimize >= 2)
|
2024-10-10 23:07:02 -07:00
|
|
|
return make<MergeTailSection>(ctx, name, type, flags, addralign);
|
|
|
|
return make<MergeNoTailSection>(ctx, name, type, flags, addralign);
|
2021-12-16 20:50:06 -08:00
|
|
|
}
|
|
|
|
|
2019-09-24 11:48:31 +00:00
|
|
|
// This function scans over the InputSectionBase list sectionBases to create
|
|
|
|
// InputSectionDescription::sections.
|
|
|
|
//
|
|
|
|
// It removes MergeInputSections from the input section array and adds
|
|
|
|
// new synthetic sections at the location of the first input section
|
|
|
|
// that it replaces. It then finalizes each synthetic section in order
|
|
|
|
// to compute an output offset for each piece of each input section.
|
2024-10-11 20:28:58 -07:00
|
|
|
void OutputSection::finalizeInputSections() {
|
2024-10-03 20:06:58 -07:00
|
|
|
auto *script = ctx.script;
|
2019-09-24 11:48:31 +00:00
|
|
|
std::vector<MergeSyntheticSection *> mergeSections;
|
2021-11-25 20:24:23 -08:00
|
|
|
for (SectionCommand *cmd : commands) {
|
|
|
|
auto *isd = dyn_cast<InputSectionDescription>(cmd);
|
|
|
|
if (!isd)
|
2019-09-24 11:48:31 +00:00
|
|
|
continue;
|
2021-11-25 20:24:23 -08:00
|
|
|
isd->sections.reserve(isd->sectionBases.size());
|
|
|
|
for (InputSectionBase *s : isd->sectionBases) {
|
2019-09-24 11:48:31 +00:00
|
|
|
MergeInputSection *ms = dyn_cast<MergeInputSection>(s);
|
|
|
|
if (!ms) {
|
2021-11-25 20:24:23 -08:00
|
|
|
isd->sections.push_back(cast<InputSection>(s));
|
2019-09-24 11:48:31 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We do not want to handle sections that are not alive, so just remove
|
|
|
|
// them instead of trying to merge.
|
|
|
|
if (!ms->isLive())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto i = llvm::find_if(mergeSections, [=](MergeSyntheticSection *sec) {
|
|
|
|
// While we could create a single synthetic section for two different
|
|
|
|
// values of Entsize, it is better to take Entsize into consideration.
|
|
|
|
//
|
|
|
|
// With a single synthetic section no two pieces with different Entsize
|
|
|
|
// could be equal, so we may as well have two sections.
|
|
|
|
//
|
|
|
|
// Using Entsize in here also allows us to propagate it to the synthetic
|
|
|
|
// section.
|
|
|
|
//
|
|
|
|
// SHF_STRINGS section with different alignments should not be merged.
|
|
|
|
return sec->flags == ms->flags && sec->entsize == ms->entsize &&
|
2022-12-01 16:19:56 +00:00
|
|
|
(sec->addralign == ms->addralign || !(sec->flags & SHF_STRINGS));
|
2019-09-24 11:48:31 +00:00
|
|
|
});
|
|
|
|
if (i == mergeSections.end()) {
|
2024-10-03 20:06:58 -07:00
|
|
|
MergeSyntheticSection *syn = createMergeSynthetic(
|
|
|
|
ctx, s->name, ms->type, ms->flags, ms->addralign);
|
2019-09-24 11:48:31 +00:00
|
|
|
mergeSections.push_back(syn);
|
|
|
|
i = std::prev(mergeSections.end());
|
|
|
|
syn->entsize = ms->entsize;
|
2021-11-25 20:24:23 -08:00
|
|
|
isd->sections.push_back(syn);
|
Reland: [LLD] Implement --enable-non-contiguous-regions (#90007)
When enabled, input sections that would otherwise overflow a memory
region are instead spilled to the next matching output section.
This feature parallels the one in GNU LD, but there are some differences
from its documented behavior:
- /DISCARD/ only matches previously-unmatched sections (i.e., the flag
does not affect it).
- If a section fails to fit at any of its matches, the link fails
instead of discarding the section.
- The flag --enable-non-contiguous-regions-warnings is not implemented,
as it exists to warn about such occurrences.
The implementation places stubs at possible spill locations, and
replaces them with the original input section when effecting spills.
Spilling decisions occur after address assignment. Sections are spilled
in reverse order of assignment, with each spill naively decreasing the
size of the affected memory regions. This continues until the memory
regions are brought back under size. Spilling anything causes another
pass of address assignment, and this continues to fixed point.
Spilling after rather than during assignment allows the algorithm to
consider the size effects of unspillable input sections that appear
later in the assignment. Otherwise, such sections (e.g. thunks) may
force an overflow, even if spilling something earlier could have avoided
it.
A few notable feature interactions occur:
- Stubs affect alignment, ONLY_IF_RO, etc, broadly as if a copy of the
input section were actually placed there.
- SHF_MERGE synthetic sections use the spill list of their first
contained input section (the one that gives the section its name).
- ICF occurs oblivious to spill sections; spill lists for merged-away
sections become inert and are removed after assignment.
- SHF_LINK_ORDER and .ARM.exidx are ordered according to the final
section ordering, after all spilling has completed.
- INSERT BEFORE/AFTER and OVERWRITE_SECTIONS are explicitly disallowed.
2024-05-13 12:30:50 -05:00
|
|
|
// The merge synthetic section inherits the potential spill locations of
|
|
|
|
// its first contained section.
|
|
|
|
auto it = script->potentialSpillLists.find(ms);
|
|
|
|
if (it != script->potentialSpillLists.end())
|
|
|
|
script->potentialSpillLists.try_emplace(syn, it->second);
|
2019-09-24 11:48:31 +00:00
|
|
|
}
|
|
|
|
(*i)->addSection(ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
// sectionBases should not be used from this point onwards. Clear it to
|
|
|
|
// catch misuses.
|
2021-11-25 20:24:23 -08:00
|
|
|
isd->sectionBases.clear();
|
[Coding style change] Rename variables so that they start with a lowercase letter
This patch is mechanically generated by clang-llvm-rename tool that I wrote
using Clang Refactoring Engine just for creating this patch. You can see the
source code of the tool at https://reviews.llvm.org/D64123. There's no manual
post-processing; you can generate the same patch by re-running the tool against
lld's code base.
Here is the main discussion thread to change the LLVM coding style:
https://lists.llvm.org/pipermail/llvm-dev/2019-February/130083.html
In the discussion thread, I proposed we use lld as a testbed for variable
naming scheme change, and this patch does that.
I chose to rename variables so that they are in camelCase, just because that
is a minimal change to make variables to start with a lowercase letter.
Note to downstream patch maintainers: if you are maintaining a downstream lld
repo, just rebasing ahead of this commit would cause massive merge conflicts
because this patch essentially changes every line in the lld subdirectory. But
there's a remedy.
clang-llvm-rename tool is a batch tool, so you can rename variables in your
downstream repo with the tool. Given that, here is how to rebase your repo to
a commit after the mass renaming:
1. rebase to the commit just before the mass variable renaming,
2. apply the tool to your downstream repo to mass-rename variables locally, and
3. rebase again to the head.
Most changes made by the tool should be identical for a downstream repo and
for the head, so at the step 3, almost all changes should be merged and
disappear. I'd expect that there would be some lines that you need to merge by
hand, but that shouldn't be too many.
Differential Revision: https://reviews.llvm.org/D64121
llvm-svn: 365595
2019-07-10 05:00:37 +00:00
|
|
|
|
2019-09-24 11:48:31 +00:00
|
|
|
// Some input sections may be removed from the list after ICF.
|
2021-11-25 20:24:23 -08:00
|
|
|
for (InputSection *s : isd->sections)
|
2024-10-11 20:28:58 -07:00
|
|
|
commitSection(s);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
2024-12-10 13:43:13 -08:00
|
|
|
for (auto *ms : mergeSections) {
|
|
|
|
// Merging may have increased the alignment of a spillable section. Update
|
|
|
|
// the alignment of potential spill sections and their containing output
|
|
|
|
// sections.
|
|
|
|
if (auto it = script->potentialSpillLists.find(ms);
|
|
|
|
it != script->potentialSpillLists.end()) {
|
|
|
|
for (PotentialSpillSection *s = it->second.head; s; s = s->next) {
|
|
|
|
s->addralign = std::max(s->addralign, ms->addralign);
|
|
|
|
s->parent->addralign = std::max(s->parent->addralign, s->addralign);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-10 23:43:21 -07:00
|
|
|
ms->finalizeContents();
|
2024-12-10 13:43:13 -08:00
|
|
|
}
|
2015-09-21 21:38:08 +00:00
|
|
|
}
|
|
|
|
|
2018-01-30 16:20:08 +00:00
|
|
|
static void sortByOrder(MutableArrayRef<InputSection *> in,
|
2018-06-16 12:11:34 +00:00
|
|
|
llvm::function_ref<int(InputSectionBase *s)> order) {
|
2019-04-23 02:42:06 +00:00
|
|
|
std::vector<std::pair<int, InputSection *>> v;
|
2017-08-04 10:25:29 +00:00
|
|
|
for (InputSection *s : in)
|
2023-01-26 20:28:58 -05:00
|
|
|
v.emplace_back(order(s), s);
|
2019-04-23 02:42:06 +00:00
|
|
|
llvm::stable_sort(v, less_first());
|
2017-08-04 10:25:29 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < v.size(); ++i)
|
|
|
|
in[i] = v[i].second;
|
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
uint64_t elf::getHeaderSize(Ctx &ctx) {
|
2024-09-21 22:46:13 -07:00
|
|
|
if (ctx.arg.oFormatBinary)
|
2017-03-13 14:40:58 +00:00
|
|
|
return 0;
|
2024-08-03 11:00:11 -07:00
|
|
|
return ctx.out.elfHeader->size + ctx.out.programHeaders->size;
|
2017-03-13 14:40:58 +00:00
|
|
|
}
|
|
|
|
|
2018-06-16 12:11:34 +00:00
|
|
|
void OutputSection::sort(llvm::function_ref<int(InputSectionBase *s)> order) {
|
2019-05-29 03:55:20 +00:00
|
|
|
assert(isLive());
|
2021-11-25 20:24:23 -08:00
|
|
|
for (SectionCommand *b : commands)
|
2018-02-01 19:30:15 +00:00
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(b))
|
|
|
|
sortByOrder(isd->sections, order);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
static void nopInstrFill(Ctx &ctx, uint8_t *buf, size_t size) {
|
2020-04-07 06:48:18 -07:00
|
|
|
if (size == 0)
|
|
|
|
return;
|
|
|
|
unsigned i = 0;
|
|
|
|
if (size == 0)
|
|
|
|
return;
|
2024-08-21 23:53:36 -07:00
|
|
|
std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
|
2020-04-07 06:48:18 -07:00
|
|
|
unsigned num = size / nopFiller.back().size();
|
|
|
|
for (unsigned c = 0; c < num; ++c) {
|
|
|
|
memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
|
|
|
|
i += nopFiller.back().size();
|
|
|
|
}
|
|
|
|
unsigned remaining = size - i;
|
|
|
|
if (!remaining)
|
|
|
|
return;
|
|
|
|
assert(nopFiller[remaining - 1].size() == remaining);
|
|
|
|
memcpy(buf + i, nopFiller[remaining - 1].data(), remaining);
|
|
|
|
}
|
|
|
|
|
2017-07-27 19:22:43 +00:00
|
|
|
// Fill [Buf, Buf + Size) with Filler.
|
|
|
|
// This is used for linker script "=fillexp" command.
|
2018-11-14 21:05:20 +00:00
|
|
|
static void fill(uint8_t *buf, size_t size,
|
|
|
|
const std::array<uint8_t, 4> &filler) {
|
2017-07-27 19:22:43 +00:00
|
|
|
size_t i = 0;
|
|
|
|
for (; i + 4 < size; i += 4)
|
2018-11-14 21:05:20 +00:00
|
|
|
memcpy(buf + i, filler.data(), 4);
|
|
|
|
memcpy(buf + i, filler.data(), size - i);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
2024-11-14 23:04:18 -08:00
|
|
|
static SmallVector<uint8_t, 0> deflateShard(Ctx &ctx, ArrayRef<uint8_t> in,
|
|
|
|
int level, int flush) {
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
// 15 and 8 are default. windowBits=-15 is negative to generate raw deflate
|
|
|
|
// data with no zlib header or trailer.
|
|
|
|
z_stream s = {};
|
2024-05-01 11:32:04 -07:00
|
|
|
auto res = deflateInit2(&s, level, Z_DEFLATED, -15, 8, Z_DEFAULT_STRATEGY);
|
|
|
|
if (res != 0) {
|
2024-11-16 20:26:33 -08:00
|
|
|
Err(ctx) << "--compress-sections: deflateInit2 returned " << res;
|
2024-05-01 11:32:04 -07:00
|
|
|
return {};
|
|
|
|
}
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
s.next_in = const_cast<uint8_t *>(in.data());
|
|
|
|
s.avail_in = in.size();
|
|
|
|
|
|
|
|
// Allocate a buffer of half of the input size, and grow it by 1.5x if
|
|
|
|
// insufficient.
|
|
|
|
SmallVector<uint8_t, 0> out;
|
|
|
|
size_t pos = 0;
|
|
|
|
out.resize_for_overwrite(std::max<size_t>(in.size() / 2, 64));
|
|
|
|
do {
|
|
|
|
if (pos == out.size())
|
|
|
|
out.resize_for_overwrite(out.size() * 3 / 2);
|
|
|
|
s.next_out = out.data() + pos;
|
|
|
|
s.avail_out = out.size() - pos;
|
|
|
|
(void)deflate(&s, flush);
|
|
|
|
pos = s.next_out - out.data();
|
|
|
|
} while (s.avail_out == 0);
|
|
|
|
assert(s.avail_in == 0);
|
|
|
|
|
|
|
|
out.truncate(pos);
|
|
|
|
deflateEnd(&s);
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-03-12 10:56:14 -07:00
|
|
|
// Compress certain non-SHF_ALLOC sections:
|
|
|
|
//
|
|
|
|
// * (if --compress-debug-sections is specified) non-empty .debug_* sections
|
|
|
|
// * (if --compress-sections is specified) matched sections
|
2024-10-03 20:06:58 -07:00
|
|
|
template <class ELFT> void OutputSection::maybeCompress(Ctx &ctx) {
|
2019-04-01 00:11:24 +00:00
|
|
|
using Elf_Chdr = typename ELFT::Chdr;
|
2022-09-22 17:09:58 +02:00
|
|
|
(void)sizeof(Elf_Chdr);
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-03-12 10:56:14 -07:00
|
|
|
DebugCompressionType ctype = DebugCompressionType::None;
|
2024-05-22 15:55:21 -07:00
|
|
|
size_t compressedSize = sizeof(Elf_Chdr);
|
2024-05-01 11:40:46 -07:00
|
|
|
unsigned level = 0; // default compression level
|
2024-09-21 22:46:13 -07:00
|
|
|
if (!(flags & SHF_ALLOC) && ctx.arg.compressDebugSections &&
|
2024-05-22 15:55:21 -07:00
|
|
|
name.starts_with(".debug_"))
|
2024-09-21 22:46:13 -07:00
|
|
|
ctype = *ctx.arg.compressDebugSections;
|
|
|
|
for (auto &[glob, t, l] : ctx.arg.compressSections)
|
2024-05-01 11:40:46 -07:00
|
|
|
if (glob.match(name))
|
|
|
|
std::tie(ctype, level) = {t, l};
|
2024-03-12 10:56:14 -07:00
|
|
|
if (ctype == DebugCompressionType::None)
|
|
|
|
return;
|
|
|
|
if (flags & SHF_ALLOC) {
|
2024-11-06 22:33:51 -08:00
|
|
|
Err(ctx) << "--compress-sections: section '" << name
|
|
|
|
<< "' with the SHF_ALLOC flag cannot be compressed";
|
2017-07-27 19:22:43 +00:00
|
|
|
return;
|
2024-03-12 10:56:14 -07:00
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-03-12 10:56:14 -07:00
|
|
|
llvm::TimeTraceScope timeScope("Compress sections");
|
2022-09-09 10:30:18 -07:00
|
|
|
auto buf = std::make_unique<uint8_t[]>(size);
|
2022-09-21 11:13:02 -07:00
|
|
|
// Write uncompressed data to a temporary zero-initialized buffer.
|
|
|
|
{
|
|
|
|
parallel::TaskGroup tg;
|
2024-10-03 20:56:09 -07:00
|
|
|
writeTo<ELFT>(ctx, buf.get(), tg);
|
2022-09-21 11:13:02 -07:00
|
|
|
}
|
2024-03-12 10:56:14 -07:00
|
|
|
// The generic ABI specifies "The sh_size and sh_addralign fields of the
|
|
|
|
// section header for a compressed section reflect the requirements of the
|
|
|
|
// compressed section." However, 1-byte alignment has been wildly accepted
|
|
|
|
// and utilized for a long time. Removing alignment padding is particularly
|
|
|
|
// useful when there are many compressed output sections.
|
|
|
|
addralign = 1;
|
2022-09-21 11:13:02 -07:00
|
|
|
|
2024-04-29 22:05:35 -07:00
|
|
|
// Split input into 1-MiB shards.
|
|
|
|
[[maybe_unused]] constexpr size_t shardSize = 1 << 20;
|
|
|
|
auto shardsIn = split(ArrayRef<uint8_t>(buf.get(), size), shardSize);
|
|
|
|
const size_t numShards = shardsIn.size();
|
|
|
|
auto shardsOut = std::make_unique<SmallVector<uint8_t, 0>[]>(numShards);
|
|
|
|
|
2022-09-21 11:13:02 -07:00
|
|
|
#if LLVM_ENABLE_ZSTD
|
2024-05-01 11:40:46 -07:00
|
|
|
// Use ZSTD's streaming compression API. See
|
|
|
|
// http://facebook.github.io/zstd/zstd_manual.html "Streaming compression -
|
|
|
|
// HowTo".
|
2024-03-12 10:56:14 -07:00
|
|
|
if (ctype == DebugCompressionType::Zstd) {
|
2024-04-29 22:05:35 -07:00
|
|
|
parallelFor(0, numShards, [&](size_t i) {
|
|
|
|
SmallVector<uint8_t, 0> out;
|
|
|
|
ZSTD_CCtx *cctx = ZSTD_createCCtx();
|
2024-05-01 11:40:46 -07:00
|
|
|
ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, level);
|
2024-04-29 22:05:35 -07:00
|
|
|
ZSTD_inBuffer zib = {shardsIn[i].data(), shardsIn[i].size(), 0};
|
|
|
|
ZSTD_outBuffer zob = {nullptr, 0, 0};
|
|
|
|
size_t size;
|
|
|
|
do {
|
|
|
|
// Allocate a buffer of half of the input size, and grow it by 1.5x if
|
|
|
|
// insufficient.
|
2022-09-21 11:13:02 -07:00
|
|
|
if (zob.pos == zob.size) {
|
2024-04-29 22:05:35 -07:00
|
|
|
out.resize_for_overwrite(
|
|
|
|
zob.size ? zob.size * 3 / 2 : std::max<size_t>(zib.size / 4, 64));
|
|
|
|
zob = {out.data(), out.size(), zob.pos};
|
2022-09-21 11:13:02 -07:00
|
|
|
}
|
2024-04-29 22:05:35 -07:00
|
|
|
size = ZSTD_compressStream2(cctx, &zob, &zib, ZSTD_e_end);
|
|
|
|
assert(!ZSTD_isError(size));
|
|
|
|
} while (size != 0);
|
|
|
|
out.truncate(zob.pos);
|
|
|
|
ZSTD_freeCCtx(cctx);
|
|
|
|
shardsOut[i] = std::move(out);
|
|
|
|
});
|
|
|
|
compressed.type = ELFCOMPRESS_ZSTD;
|
|
|
|
for (size_t i = 0; i != numShards; ++i)
|
2024-05-22 15:55:21 -07:00
|
|
|
compressedSize += shardsOut[i].size();
|
2022-09-09 10:30:18 -07:00
|
|
|
}
|
2022-09-21 11:13:02 -07:00
|
|
|
#endif
|
2020-11-03 14:41:09 +00:00
|
|
|
|
2022-09-09 10:30:18 -07:00
|
|
|
#if LLVM_ENABLE_ZLIB
|
2022-01-25 22:40:53 -08:00
|
|
|
// We chose 1 (Z_BEST_SPEED) as the default compression level because it is
|
2024-05-01 11:40:46 -07:00
|
|
|
// fast and provides decent compression ratios.
|
2024-04-29 22:05:35 -07:00
|
|
|
if (ctype == DebugCompressionType::Zlib) {
|
2024-05-01 11:40:46 -07:00
|
|
|
if (!level)
|
|
|
|
level = Z_BEST_SPEED;
|
2024-04-29 22:05:35 -07:00
|
|
|
|
|
|
|
// Compress shards and compute Alder-32 checksums. Use Z_SYNC_FLUSH for all
|
|
|
|
// shards but the last to flush the output to a byte boundary to be
|
|
|
|
// concatenated with the next shard.
|
|
|
|
auto shardsAdler = std::make_unique<uint32_t[]>(numShards);
|
|
|
|
parallelFor(0, numShards, [&](size_t i) {
|
2024-11-14 23:04:18 -08:00
|
|
|
shardsOut[i] = deflateShard(ctx, shardsIn[i], level,
|
2024-04-29 22:05:35 -07:00
|
|
|
i != numShards - 1 ? Z_SYNC_FLUSH : Z_FINISH);
|
|
|
|
shardsAdler[i] = adler32(1, shardsIn[i].data(), shardsIn[i].size());
|
|
|
|
});
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
|
2024-04-29 22:05:35 -07:00
|
|
|
// Update section size and combine Alder-32 checksums.
|
|
|
|
uint32_t checksum = 1; // Initial Adler-32 value
|
2024-05-22 15:55:21 -07:00
|
|
|
compressedSize += 2; // Elf_Chdir and zlib header
|
2024-04-29 22:05:35 -07:00
|
|
|
for (size_t i = 0; i != numShards; ++i) {
|
2024-05-22 15:55:21 -07:00
|
|
|
compressedSize += shardsOut[i].size();
|
2024-04-29 22:05:35 -07:00
|
|
|
checksum = adler32_combine(checksum, shardsAdler[i], shardsIn[i].size());
|
|
|
|
}
|
2024-05-22 15:55:21 -07:00
|
|
|
compressedSize += 4; // checksum
|
2024-04-29 22:05:35 -07:00
|
|
|
compressed.type = ELFCOMPRESS_ZLIB;
|
|
|
|
compressed.checksum = checksum;
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
}
|
2024-05-07 16:56:45 -07:00
|
|
|
#endif
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-05-22 15:55:21 -07:00
|
|
|
if (compressedSize >= size)
|
|
|
|
return;
|
|
|
|
compressed.uncompressedSize = size;
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
compressed.shards = std::move(shardsOut);
|
2024-05-22 15:55:21 -07:00
|
|
|
compressed.numShards = numShards;
|
|
|
|
size = compressedSize;
|
2017-07-27 19:22:43 +00:00
|
|
|
flags |= SHF_COMPRESSED;
|
|
|
|
}
|
|
|
|
|
2024-10-13 11:08:06 -07:00
|
|
|
static void writeInt(Ctx &ctx, uint8_t *buf, uint64_t data, uint64_t size) {
|
2017-07-27 19:22:43 +00:00
|
|
|
if (size == 1)
|
|
|
|
*buf = data;
|
|
|
|
else if (size == 2)
|
2024-10-13 10:47:18 -07:00
|
|
|
write16(ctx, buf, data);
|
2017-07-27 19:22:43 +00:00
|
|
|
else if (size == 4)
|
2024-10-13 10:37:47 -07:00
|
|
|
write32(ctx, buf, data);
|
2017-07-27 19:22:43 +00:00
|
|
|
else if (size == 8)
|
2024-10-13 10:47:18 -07:00
|
|
|
write64(ctx, buf, data);
|
2017-07-27 19:22:43 +00:00
|
|
|
else
|
|
|
|
llvm_unreachable("unsupported Size argument");
|
|
|
|
}
|
|
|
|
|
2022-08-24 09:40:03 -07:00
|
|
|
template <class ELFT>
|
2024-10-03 20:56:09 -07:00
|
|
|
void OutputSection::writeTo(Ctx &ctx, uint8_t *buf, parallel::TaskGroup &tg) {
|
2021-12-20 10:51:24 -08:00
|
|
|
llvm::TimeTraceScope timeScope("Write sections", name);
|
2017-07-27 19:22:43 +00:00
|
|
|
if (type == SHT_NOBITS)
|
|
|
|
return;
|
2024-08-01 10:22:03 -07:00
|
|
|
if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
|
|
|
|
buf += encodeULEB128(crelHeader, buf);
|
|
|
|
memcpy(buf, crelBody.data(), crelBody.size());
|
|
|
|
return;
|
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-03-12 10:56:14 -07:00
|
|
|
// If the section is compressed due to
|
|
|
|
// --compress-debug-section/--compress-sections, the content is already known.
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
if (compressed.shards) {
|
2022-01-26 10:23:56 -08:00
|
|
|
auto *chdr = reinterpret_cast<typename ELFT::Chdr *>(buf);
|
2024-03-12 10:56:14 -07:00
|
|
|
chdr->ch_type = compressed.type;
|
2022-01-26 10:23:56 -08:00
|
|
|
chdr->ch_size = compressed.uncompressedSize;
|
2022-12-01 16:19:56 +00:00
|
|
|
chdr->ch_addralign = addralign;
|
2022-01-26 10:23:56 -08:00
|
|
|
buf += sizeof(*chdr);
|
2024-04-29 22:05:35 -07:00
|
|
|
|
|
|
|
auto offsets = std::make_unique<size_t[]>(compressed.numShards);
|
|
|
|
if (compressed.type == ELFCOMPRESS_ZLIB) {
|
|
|
|
buf[0] = 0x78; // CMF
|
|
|
|
buf[1] = 0x01; // FLG: best speed
|
|
|
|
offsets[0] = 2; // zlib header
|
|
|
|
write32be(buf + (size - sizeof(*chdr) - 4), compressed.checksum);
|
2022-09-09 10:30:18 -07:00
|
|
|
}
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
|
|
|
|
// Compute shard offsets.
|
|
|
|
for (size_t i = 1; i != compressed.numShards; ++i)
|
|
|
|
offsets[i] = offsets[i - 1] + compressed.shards[i - 1].size();
|
2022-06-19 12:30:06 -04:00
|
|
|
parallelFor(0, compressed.numShards, [&](size_t i) {
|
[ELF] Parallelize --compress-debug-sections=zlib
When linking a Debug build clang (265MiB SHF_ALLOC sections, 920MiB uncompressed
debug info), in a --threads=1 link "Compress debug sections" takes 2/3 time and
in a --threads=8 link "Compress debug sections" takes ~70% time.
This patch splits a section into 1MiB shards and calls zlib `deflake` parallelly.
DEFLATE blocks are a bit sequence. We need to ensure every shard starts
at a byte boundary for concatenation. We use Z_SYNC_FLUSH for all shards
but the last to flush the output to a byte boundary. (Z_FULL_FLUSH can
be used as well, but Z_FULL_FLUSH clears the hash table which just
wastes time.)
The last block requires the BFINAL flag. We call deflate with Z_FINISH
to set the flag as well as flush the output to a byte boundary. Under
the hood, all of Z_SYNC_FLUSH, Z_FULL_FLUSH, and Z_FINISH emit a
non-compressed block (called stored block in zlib). RFC1951 says "Any
bits of input up to the next byte boundary are ignored."
In a --threads=8 link, "Compress debug sections" is 5.7x as fast and the total
speed is 2.54x. Because the hash table for one shard is not shared with the next
shard, the output is slightly larger. Better compression ratio can be achieved
by preloading the window size from the previous shard as dictionary
(`deflateSetDictionary`), but that is overkill.
```
# 1MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.3% +129Ki [ = ] 0 .debug_str
+0.1% +105Ki [ = ] 0 .debug_info
+0.3% +101Ki [ = ] 0 .debug_line
+0.2% +2.66Ki [ = ] 0 .debug_abbrev
+0.0% +1.19Ki [ = ] 0 .debug_ranges
+0.1% +341Ki [ = ] 0 TOTAL
# 2MiB shards
% bloaty clang.new -- clang.old
FILE SIZE VM SIZE
-------------- --------------
+0.2% +74.2Ki [ = ] 0 .debug_line
+0.1% +72.3Ki [ = ] 0 .debug_str
+0.0% +69.9Ki [ = ] 0 .debug_info
+0.1% +976 [ = ] 0 .debug_abbrev
+0.0% +882 [ = ] 0 .debug_ranges
+0.0% +218Ki [ = ] 0 TOTAL
```
Bonus in not using zlib::compress
* we can compress a debug section larger than 4GiB
* peak memory usage is lower because for most shards the output size is less
than 50% input size (all less than 55% for a large binary I tested, but
decreasing the initial output size does not decrease memory usage)
Reviewed By: ikudrin
Differential Revision: https://reviews.llvm.org/D117853
2022-01-25 10:29:04 -08:00
|
|
|
memcpy(buf + offsets[i], compressed.shards[i].data(),
|
|
|
|
compressed.shards[i].size());
|
|
|
|
});
|
2017-07-27 19:22:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write leading padding.
|
2022-07-05 23:31:09 -07:00
|
|
|
ArrayRef<InputSection *> sections = getInputSections(*this, storage);
|
2024-10-03 20:06:58 -07:00
|
|
|
std::array<uint8_t, 4> filler = getFiller(ctx);
|
2024-10-13 10:37:47 -07:00
|
|
|
bool nonZeroFiller = read32(ctx, filler.data()) != 0;
|
2018-11-14 21:05:20 +00:00
|
|
|
if (nonZeroFiller)
|
2017-07-27 19:22:43 +00:00
|
|
|
fill(buf, sections.empty() ? size : sections[0]->outSecOff, filler);
|
|
|
|
|
2024-08-01 10:22:03 -07:00
|
|
|
if (type == SHT_CREL && !(flags & SHF_ALLOC)) {
|
|
|
|
buf += encodeULEB128(crelHeader, buf);
|
|
|
|
memcpy(buf, crelBody.data(), crelBody.size());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-10-03 20:56:09 -07:00
|
|
|
auto fn = [=, &ctx](size_t begin, size_t end) {
|
2022-08-24 09:40:03 -07:00
|
|
|
size_t numSections = sections.size();
|
|
|
|
for (size_t i = begin; i != end; ++i) {
|
|
|
|
InputSection *isec = sections[i];
|
|
|
|
if (auto *s = dyn_cast<SyntheticSection>(isec))
|
2024-10-10 23:43:21 -07:00
|
|
|
s->writeTo(buf + isec->outSecOff);
|
2017-07-27 19:22:43 +00:00
|
|
|
else
|
2024-10-11 20:39:53 -07:00
|
|
|
isec->writeTo<ELFT>(ctx, buf + isec->outSecOff);
|
2022-08-24 09:40:03 -07:00
|
|
|
|
2023-06-22 13:55:59 +01:00
|
|
|
// When in Arm BE8 mode, the linker has to convert the big-endian
|
|
|
|
// instructions to little-endian, leaving the data big-endian.
|
2024-09-21 22:46:13 -07:00
|
|
|
if (ctx.arg.emachine == EM_ARM && !ctx.arg.isLE && ctx.arg.armBe8 &&
|
2023-06-22 13:55:59 +01:00
|
|
|
(flags & SHF_EXECINSTR))
|
2024-11-18 09:08:29 -08:00
|
|
|
convertArmInstructionstoBE8(ctx, isec, buf + isec->outSecOff);
|
2023-06-22 13:55:59 +01:00
|
|
|
|
2022-08-24 09:40:03 -07:00
|
|
|
// Fill gaps between sections.
|
|
|
|
if (nonZeroFiller) {
|
|
|
|
uint8_t *start = buf + isec->outSecOff + isec->getSize();
|
|
|
|
uint8_t *end;
|
|
|
|
if (i + 1 == numSections)
|
|
|
|
end = buf + size;
|
|
|
|
else
|
|
|
|
end = buf + sections[i + 1]->outSecOff;
|
|
|
|
if (isec->nopFiller) {
|
2024-08-21 23:53:36 -07:00
|
|
|
assert(ctx.target->nopInstrs);
|
2024-10-03 20:06:58 -07:00
|
|
|
nopInstrFill(ctx, start, end - start);
|
2022-08-24 09:40:03 -07:00
|
|
|
} else
|
|
|
|
fill(start, end - start, filler);
|
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
2022-08-24 09:40:03 -07:00
|
|
|
};
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2022-08-24 09:40:03 -07:00
|
|
|
// If there is any BYTE()-family command (rare), write the section content
|
|
|
|
// first then process BYTE to overwrite the filler content. The write is
|
|
|
|
// serial due to the limitation of llvm/Support/Parallel.h.
|
|
|
|
bool written = false;
|
|
|
|
size_t numSections = sections.size();
|
2021-11-25 20:24:23 -08:00
|
|
|
for (SectionCommand *cmd : commands)
|
2022-08-24 09:40:03 -07:00
|
|
|
if (auto *data = dyn_cast<ByteCommand>(cmd)) {
|
|
|
|
if (!std::exchange(written, true))
|
|
|
|
fn(0, numSections);
|
2024-10-13 11:08:06 -07:00
|
|
|
writeInt(ctx, buf + data->offset, data->expression().getValue(),
|
|
|
|
data->size);
|
2022-08-24 09:40:03 -07:00
|
|
|
}
|
|
|
|
if (written || !numSections)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// There is no data command. Write content asynchronously to overlap the write
|
|
|
|
// time with other output sections. Note, if a linker script specifies
|
|
|
|
// overlapping output sections (needs --noinhibit-exec or --no-check-sections
|
|
|
|
// to supress the error), the output may be non-deterministic.
|
|
|
|
const size_t taskSizeLimit = 4 << 20;
|
|
|
|
for (size_t begin = 0, i = 0, taskSize = 0;;) {
|
|
|
|
taskSize += sections[i]->getSize();
|
|
|
|
bool done = ++i == numSections;
|
|
|
|
if (done || taskSize >= taskSizeLimit) {
|
2023-04-18 20:22:55 +02:00
|
|
|
tg.spawn([=] { fn(begin, i); });
|
2022-08-24 09:40:03 -07:00
|
|
|
if (done)
|
|
|
|
break;
|
|
|
|
begin = i;
|
|
|
|
taskSize = 0;
|
|
|
|
}
|
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
static void finalizeShtGroup(Ctx &ctx, OutputSection *os,
|
|
|
|
InputSection *section) {
|
2017-07-27 19:22:43 +00:00
|
|
|
// sh_link field for SHT_GROUP sections should contain the section index of
|
|
|
|
// the symbol table.
|
2024-09-15 22:15:02 -07:00
|
|
|
os->link = ctx.in.symTab->getParent()->sectionIndex;
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2022-02-17 12:10:58 -08:00
|
|
|
if (!section)
|
|
|
|
return;
|
|
|
|
|
2017-07-27 19:22:43 +00:00
|
|
|
// sh_info then contain index of an entry in symbol table section which
|
|
|
|
// provides signature of the section group.
|
2019-03-06 03:07:57 +00:00
|
|
|
ArrayRef<Symbol *> symbols = section->file->getSymbols();
|
2024-09-15 22:15:02 -07:00
|
|
|
os->info = ctx.in.symTab->getSymbolIndex(*symbols[section->info]);
|
2020-07-21 08:49:04 -07:00
|
|
|
|
|
|
|
// Some group members may be combined or discarded, so we need to compute the
|
|
|
|
// new size. The content will be rewritten in InputSection::copyShtGroup.
|
2022-01-30 01:18:41 -08:00
|
|
|
DenseSet<uint32_t> seen;
|
2020-07-21 08:49:04 -07:00
|
|
|
ArrayRef<InputSectionBase *> sections = section->file->getSections();
|
|
|
|
for (const uint32_t &idx : section->getDataAs<uint32_t>().slice(1))
|
2024-10-13 10:37:47 -07:00
|
|
|
if (OutputSection *osec = sections[read32(ctx, &idx)]->getOutputSection())
|
2020-07-21 08:49:04 -07:00
|
|
|
seen.insert(osec->sectionIndex);
|
|
|
|
os->size = (1 + seen.size()) * sizeof(uint32_t);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2024-08-01 10:22:03 -07:00
|
|
|
template <class uint>
|
|
|
|
LLVM_ATTRIBUTE_ALWAYS_INLINE static void
|
2024-10-11 20:28:58 -07:00
|
|
|
encodeOneCrel(Ctx &ctx, raw_svector_ostream &os,
|
|
|
|
Elf_Crel<sizeof(uint) == 8> &out, uint offset, const Symbol &sym,
|
|
|
|
uint32_t type, uint addend) {
|
2024-08-01 10:22:03 -07:00
|
|
|
const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
|
|
|
|
out.r_offset = offset;
|
2024-09-15 22:15:02 -07:00
|
|
|
int64_t symidx = ctx.in.symTab->getSymbolIndex(sym);
|
2024-08-01 10:22:03 -07:00
|
|
|
if (sym.type == STT_SECTION) {
|
|
|
|
auto *d = dyn_cast<Defined>(&sym);
|
|
|
|
if (d) {
|
|
|
|
SectionBase *section = d->section;
|
|
|
|
assert(section->isLive());
|
2024-10-19 20:32:58 -07:00
|
|
|
addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr;
|
2024-08-01 10:22:03 -07:00
|
|
|
} else {
|
|
|
|
// Encode R_*_NONE(symidx=0).
|
|
|
|
symidx = type = addend = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Similar to llvm::ELF::encodeCrel.
|
|
|
|
uint8_t b = deltaOffset * 8 + (out.r_symidx != symidx) +
|
|
|
|
(out.r_type != type ? 2 : 0) +
|
|
|
|
(uint(out.r_addend) != addend ? 4 : 0);
|
|
|
|
if (deltaOffset < 0x10) {
|
|
|
|
os << char(b);
|
|
|
|
} else {
|
|
|
|
os << char(b | 0x80);
|
|
|
|
encodeULEB128(deltaOffset >> 4, os);
|
|
|
|
}
|
|
|
|
if (b & 1) {
|
|
|
|
encodeSLEB128(static_cast<int32_t>(symidx - out.r_symidx), os);
|
|
|
|
out.r_symidx = symidx;
|
|
|
|
}
|
|
|
|
if (b & 2) {
|
|
|
|
encodeSLEB128(static_cast<int32_t>(type - out.r_type), os);
|
|
|
|
out.r_type = type;
|
|
|
|
}
|
|
|
|
if (b & 4) {
|
|
|
|
encodeSLEB128(std::make_signed_t<uint>(addend - out.r_addend), os);
|
|
|
|
out.r_addend = addend;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ELFT>
|
2024-10-11 20:28:58 -07:00
|
|
|
static size_t relToCrel(Ctx &ctx, raw_svector_ostream &os,
|
|
|
|
Elf_Crel<ELFT::Is64Bits> &out, InputSection *relSec,
|
|
|
|
InputSectionBase *sec) {
|
2024-08-01 10:22:03 -07:00
|
|
|
const auto &file = *cast<ELFFileBase>(relSec->file);
|
|
|
|
if (relSec->type == SHT_REL) {
|
|
|
|
// REL conversion is complex and unsupported yet.
|
2024-11-06 22:33:51 -08:00
|
|
|
Err(ctx) << relSec << ": REL cannot be converted to CREL";
|
2024-08-01 10:22:03 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
auto rels = relSec->getDataAs<typename ELFT::Rela>();
|
|
|
|
for (auto rel : rels) {
|
|
|
|
encodeOneCrel<typename ELFT::uint>(
|
2024-10-11 20:28:58 -07:00
|
|
|
ctx, os, out, sec->getVA(rel.r_offset), file.getRelocTargetSym(rel),
|
2024-09-21 22:46:13 -07:00
|
|
|
rel.getType(ctx.arg.isMips64EL), getAddend<ELFT>(rel));
|
2024-08-01 10:22:03 -07:00
|
|
|
}
|
|
|
|
return rels.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the content of a non-alloc CREL section due to -r or --emit-relocs.
|
|
|
|
// Input CREL sections are decoded while REL[A] need to be converted.
|
2024-10-03 20:06:58 -07:00
|
|
|
template <bool is64> void OutputSection::finalizeNonAllocCrel(Ctx &ctx) {
|
2024-08-01 10:22:03 -07:00
|
|
|
using uint = typename Elf_Crel_Impl<is64>::uint;
|
|
|
|
raw_svector_ostream os(crelBody);
|
|
|
|
uint64_t totalCount = 0;
|
|
|
|
Elf_Crel<is64> out{};
|
|
|
|
assert(commands.size() == 1);
|
|
|
|
auto *isd = cast<InputSectionDescription>(commands[0]);
|
|
|
|
for (InputSection *relSec : isd->sections) {
|
|
|
|
const auto &file = *cast<ELFFileBase>(relSec->file);
|
|
|
|
InputSectionBase *sec = relSec->getRelocatedSection();
|
|
|
|
if (relSec->type == SHT_CREL) {
|
|
|
|
RelocsCrel<is64> entries(relSec->content_);
|
|
|
|
totalCount += entries.size();
|
|
|
|
for (Elf_Crel_Impl<is64> r : entries) {
|
2024-10-11 20:28:58 -07:00
|
|
|
encodeOneCrel<uint>(ctx, os, out, uint(sec->getVA(r.r_offset)),
|
2024-08-01 10:22:03 -07:00
|
|
|
file.getSymbol(r.r_symidx), r.r_type, r.r_addend);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert REL[A] to CREL.
|
|
|
|
if constexpr (is64) {
|
2024-10-11 20:28:58 -07:00
|
|
|
totalCount += ctx.arg.isLE
|
|
|
|
? relToCrel<ELF64LE>(ctx, os, out, relSec, sec)
|
|
|
|
: relToCrel<ELF64BE>(ctx, os, out, relSec, sec);
|
2024-08-01 10:22:03 -07:00
|
|
|
} else {
|
2024-10-11 20:28:58 -07:00
|
|
|
totalCount += ctx.arg.isLE
|
|
|
|
? relToCrel<ELF32LE>(ctx, os, out, relSec, sec)
|
|
|
|
: relToCrel<ELF32BE>(ctx, os, out, relSec, sec);
|
2024-08-01 10:22:03 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
crelHeader = totalCount * 8 + 4;
|
|
|
|
size = getULEB128Size(crelHeader) + crelBody.size();
|
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
void OutputSection::finalize(Ctx &ctx) {
|
2020-01-16 13:23:08 +00:00
|
|
|
InputSection *first = getFirstInputSection(this);
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2017-09-15 22:14:59 +00:00
|
|
|
if (flags & SHF_LINK_ORDER) {
|
2017-07-27 19:22:43 +00:00
|
|
|
// We must preserve the link order dependency of sections with the
|
|
|
|
// SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
|
|
|
|
// need to translate the InputSection sh_link to the OutputSection sh_link,
|
|
|
|
// all InputSections in the OutputSection have the same dependency.
|
2019-03-28 11:10:20 +00:00
|
|
|
if (auto *ex = dyn_cast<ARMExidxSyntheticSection>(first))
|
|
|
|
link = ex->getLinkOrderDep()->getParent()->sectionIndex;
|
2020-03-28 23:12:04 -07:00
|
|
|
else if (first->flags & SHF_LINK_ORDER)
|
|
|
|
if (auto *d = first->getLinkOrderDep())
|
|
|
|
link = d->getParent()->sectionIndex;
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type == SHT_GROUP) {
|
2024-10-03 20:06:58 -07:00
|
|
|
finalizeShtGroup(ctx, this, first);
|
2017-07-27 19:22:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-09-21 22:46:13 -07:00
|
|
|
if (!ctx.arg.copyRelocs || !isStaticRelSecType(type))
|
2017-07-27 19:22:43 +00:00
|
|
|
return;
|
|
|
|
|
2020-12-16 08:59:38 -08:00
|
|
|
// Skip if 'first' is synthetic, i.e. not a section created by --emit-relocs.
|
|
|
|
// Normally 'type' was changed by 'first' so 'first' should be non-null.
|
|
|
|
// However, if the output section is .rela.dyn, 'type' can be set by the empty
|
|
|
|
// synthetic .rela.plt and first can be null.
|
|
|
|
if (!first || isa<SyntheticSection>(first))
|
2017-07-27 19:22:43 +00:00
|
|
|
return;
|
|
|
|
|
2024-09-15 22:15:02 -07:00
|
|
|
link = ctx.in.symTab->getParent()->sectionIndex;
|
2017-07-27 19:22:43 +00:00
|
|
|
// sh_info for SHT_REL[A] sections should contain the section header index of
|
|
|
|
// the section to which the relocation applies.
|
|
|
|
InputSectionBase *s = first->getRelocatedSection();
|
|
|
|
info = s->getOutputSection()->sectionIndex;
|
|
|
|
flags |= SHF_INFO_LINK;
|
2024-08-01 10:22:03 -07:00
|
|
|
// Finalize the content of non-alloc CREL.
|
|
|
|
if (type == SHT_CREL) {
|
2024-09-21 22:46:13 -07:00
|
|
|
if (ctx.arg.is64)
|
2024-10-03 20:06:58 -07:00
|
|
|
finalizeNonAllocCrel<true>(ctx);
|
2024-08-01 10:22:03 -07:00
|
|
|
else
|
2024-10-03 20:06:58 -07:00
|
|
|
finalizeNonAllocCrel<false>(ctx);
|
2024-08-01 10:22:03 -07:00
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2019-10-25 11:04:56 -07:00
|
|
|
// Returns true if S is in one of the many forms the compiler driver may pass
|
|
|
|
// crtbegin files.
|
|
|
|
//
|
|
|
|
// Gcc uses any of crtbegin[<empty>|S|T].o.
|
2022-01-30 01:11:19 -08:00
|
|
|
// Clang uses Gcc's plus clang_rt.crtbegin[-<arch>|<empty>].o.
|
2019-10-25 11:04:56 -07:00
|
|
|
|
2022-01-30 01:11:19 -08:00
|
|
|
static bool isCrt(StringRef s, StringRef beginEnd) {
|
2019-10-25 11:04:56 -07:00
|
|
|
s = sys::path::filename(s);
|
2022-01-30 01:11:19 -08:00
|
|
|
if (!s.consume_back(".o"))
|
|
|
|
return false;
|
|
|
|
if (s.consume_front("clang_rt."))
|
|
|
|
return s.consume_front(beginEnd);
|
|
|
|
return s.consume_front(beginEnd) && s.size() <= 1;
|
2019-10-25 11:04:56 -07:00
|
|
|
}
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2020-11-12 08:56:12 -08:00
|
|
|
// .ctors and .dtors are sorted by this order:
|
2017-07-27 19:22:43 +00:00
|
|
|
//
|
2020-11-12 08:56:12 -08:00
|
|
|
// 1. .ctors/.dtors in crtbegin (which contains a sentinel value -1).
|
|
|
|
// 2. The section is named ".ctors" or ".dtors" (priority: 65536).
|
|
|
|
// 3. The section has an optional priority value in the form of ".ctors.N" or
|
|
|
|
// ".dtors.N" where N is a number in the form of %05u (priority: 65535-N).
|
|
|
|
// 4. .ctors/.dtors in crtend (which contains a sentinel value 0).
|
2017-07-27 19:22:43 +00:00
|
|
|
//
|
2020-11-12 08:56:12 -08:00
|
|
|
// For 2 and 3, the sections are sorted by priority from high to low, e.g.
|
|
|
|
// .ctors (65536), .ctors.00100 (65436), .ctors.00200 (65336). In GNU ld's
|
|
|
|
// internal linker scripts, the sorting is by string comparison which can
|
|
|
|
// achieve the same goal given the optional priority values are of the same
|
|
|
|
// length.
|
2017-07-27 19:22:43 +00:00
|
|
|
//
|
|
|
|
// In an ideal world, we don't need this function because .init_array and
|
|
|
|
// .ctors are duplicate features (and .init_array is newer.) However, there
|
|
|
|
// are too many real-world use cases of .ctors, so we had no choice to
|
|
|
|
// support that with this rather ad-hoc semantics.
|
|
|
|
static bool compCtors(const InputSection *a, const InputSection *b) {
|
2022-01-30 01:11:19 -08:00
|
|
|
bool beginA = isCrt(a->file->getName(), "crtbegin");
|
|
|
|
bool beginB = isCrt(b->file->getName(), "crtbegin");
|
2017-07-27 19:22:43 +00:00
|
|
|
if (beginA != beginB)
|
|
|
|
return beginA;
|
2022-01-30 01:11:19 -08:00
|
|
|
bool endA = isCrt(a->file->getName(), "crtend");
|
|
|
|
bool endB = isCrt(b->file->getName(), "crtend");
|
2017-07-27 19:22:43 +00:00
|
|
|
if (endA != endB)
|
|
|
|
return endB;
|
2020-11-12 08:56:12 -08:00
|
|
|
return getPriority(a->name) > getPriority(b->name);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sorts input sections by the special rules for .ctors and .dtors.
|
|
|
|
// Unfortunately, the rules are different from the one for .{init,fini}_array.
|
|
|
|
// Read the comment above.
|
|
|
|
void OutputSection::sortCtorsDtors() {
|
2021-11-25 16:47:07 -08:00
|
|
|
assert(commands.size() == 1);
|
|
|
|
auto *isd = cast<InputSectionDescription>(commands[0]);
|
2019-04-23 02:42:06 +00:00
|
|
|
llvm::stable_sort(isd->sections, compCtors);
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 08:56:12 -08:00
|
|
|
// If an input string is in the form of "foo.N" where N is a number, return N
|
|
|
|
// (65535-N if .ctors.N or .dtors.N). Otherwise, returns 65536, which is one
|
|
|
|
// greater than the lowest priority.
|
2020-05-14 22:18:58 -07:00
|
|
|
int elf::getPriority(StringRef s) {
|
2017-07-27 19:22:43 +00:00
|
|
|
size_t pos = s.rfind('.');
|
|
|
|
if (pos == StringRef::npos)
|
|
|
|
return 65536;
|
2020-11-12 08:56:12 -08:00
|
|
|
int v = 65536;
|
|
|
|
if (to_integer(s.substr(pos + 1), v, 10) &&
|
2023-06-05 14:36:19 -07:00
|
|
|
(pos == 6 && (s.starts_with(".ctors") || s.starts_with(".dtors"))))
|
2020-11-12 08:56:12 -08:00
|
|
|
v = 65535 - v;
|
2017-07-27 19:22:43 +00:00
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2020-05-14 22:18:58 -07:00
|
|
|
InputSection *elf::getFirstInputSection(const OutputSection *os) {
|
2021-11-25 20:24:23 -08:00
|
|
|
for (SectionCommand *cmd : os->commands)
|
|
|
|
if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
|
2020-01-16 13:23:08 +00:00
|
|
|
if (!isd->sections.empty())
|
|
|
|
return isd->sections[0];
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2022-07-05 23:31:09 -07:00
|
|
|
ArrayRef<InputSection *>
|
|
|
|
elf::getInputSections(const OutputSection &os,
|
|
|
|
SmallVector<InputSection *, 0> &storage) {
|
|
|
|
ArrayRef<InputSection *> ret;
|
|
|
|
storage.clear();
|
|
|
|
for (SectionCommand *cmd : os.commands) {
|
|
|
|
auto *isd = dyn_cast<InputSectionDescription>(cmd);
|
|
|
|
if (!isd)
|
|
|
|
continue;
|
|
|
|
if (ret.empty()) {
|
|
|
|
ret = isd->sections;
|
|
|
|
} else {
|
|
|
|
if (storage.empty())
|
|
|
|
storage.assign(ret.begin(), ret.end());
|
|
|
|
storage.insert(storage.end(), isd->sections.begin(), isd->sections.end());
|
|
|
|
}
|
|
|
|
}
|
2023-01-09 18:11:07 +01:00
|
|
|
return storage.empty() ? ret : ArrayRef(storage);
|
2018-02-22 09:55:28 +00:00
|
|
|
}
|
|
|
|
|
2017-07-27 19:22:43 +00:00
|
|
|
// Sorts input sections by section name suffixes, so that .foo.N comes
|
|
|
|
// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
|
|
|
|
// We want to keep the original order if the priorities are the same
|
|
|
|
// because the compiler keeps the original initialization order in a
|
|
|
|
// translation unit and we need to respect that.
|
|
|
|
// For more detail, read the section of the GCC's manual about init_priority.
|
|
|
|
void OutputSection::sortInitFini() {
|
|
|
|
// Sort sections by priority.
|
|
|
|
sort([](InputSectionBase *s) { return getPriority(s->name); });
|
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
std::array<uint8_t, 4> OutputSection::getFiller(Ctx &ctx) {
|
2017-07-27 19:22:43 +00:00
|
|
|
if (filler)
|
|
|
|
return *filler;
|
|
|
|
if (flags & SHF_EXECINSTR)
|
2024-08-21 23:53:36 -07:00
|
|
|
return ctx.target->trapInstr;
|
2018-11-14 21:05:20 +00:00
|
|
|
return {0, 0, 0, 0};
|
2017-07-27 19:22:43 +00:00
|
|
|
}
|
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
void OutputSection::checkDynRelAddends(Ctx &ctx) {
|
2024-09-21 22:46:13 -07:00
|
|
|
assert(ctx.arg.writeAddends && ctx.arg.checkDynamicRelocs);
|
2024-03-20 09:58:56 -07:00
|
|
|
assert(isStaticRelSecType(type));
|
2022-07-05 23:31:09 -07:00
|
|
|
SmallVector<InputSection *, 0> storage;
|
|
|
|
ArrayRef<InputSection *> sections = getInputSections(*this, storage);
|
2022-06-19 12:30:06 -04:00
|
|
|
parallelFor(0, sections.size(), [&](size_t i) {
|
2021-07-09 10:05:18 +01:00
|
|
|
// When linking with -r or --emit-relocs we might also call this function
|
|
|
|
// for input .rel[a].<sec> sections which we simply pass through to the
|
|
|
|
// output. We skip over those and only look at the synthetic relocation
|
|
|
|
// sections created during linking.
|
2024-10-19 21:02:03 -07:00
|
|
|
if (!SyntheticSection::classof(sections[i]) ||
|
|
|
|
!is_contained({ELF::SHT_REL, ELF::SHT_RELA, ELF::SHT_RELR},
|
|
|
|
sections[i]->type))
|
|
|
|
return;
|
|
|
|
const auto *sec = cast<RelocationBaseSection>(sections[i]);
|
2021-07-09 10:05:18 +01:00
|
|
|
if (!sec)
|
|
|
|
return;
|
|
|
|
for (const DynamicReloc &rel : sec->relocs) {
|
2021-12-21 09:43:44 -08:00
|
|
|
int64_t addend = rel.addend;
|
2021-07-09 10:05:18 +01:00
|
|
|
const OutputSection *relOsec = rel.inputSec->getOutputSection();
|
|
|
|
assert(relOsec != nullptr && "missing output section for relocation");
|
2023-09-15 22:38:18 -07:00
|
|
|
// Some targets have NOBITS synthetic sections with dynamic relocations
|
|
|
|
// with non-zero addends. Skip such sections.
|
2024-09-21 22:46:13 -07:00
|
|
|
if (is_contained({EM_PPC, EM_PPC64}, ctx.arg.emachine) &&
|
2024-09-15 22:15:02 -07:00
|
|
|
(rel.inputSec == ctx.in.ppc64LongBranchTarget.get() ||
|
|
|
|
rel.inputSec == ctx.in.igotPlt.get()))
|
2023-09-15 22:38:18 -07:00
|
|
|
continue;
|
2024-10-03 20:06:58 -07:00
|
|
|
const uint8_t *relocTarget = ctx.bufferStart + relOsec->offset +
|
|
|
|
rel.inputSec->getOffset(rel.offsetInSec);
|
2021-07-09 10:05:18 +01:00
|
|
|
// For SHT_NOBITS the written addend is always zero.
|
|
|
|
int64_t writtenAddend =
|
|
|
|
relOsec->type == SHT_NOBITS
|
|
|
|
? 0
|
2024-08-21 23:53:36 -07:00
|
|
|
: ctx.target->getImplicitAddend(relocTarget, rel.type);
|
2021-07-09 10:05:18 +01:00
|
|
|
if (addend != writtenAddend)
|
2024-11-16 13:07:17 -08:00
|
|
|
InternalErr(ctx, relocTarget)
|
|
|
|
<< "wrote incorrect addend value 0x" << utohexstr(writtenAddend)
|
|
|
|
<< " instead of 0x" << utohexstr(addend)
|
|
|
|
<< " for dynamic relocation " << rel.type << " at offset 0x"
|
|
|
|
<< utohexstr(rel.getOffset())
|
|
|
|
<< (rel.sym ? " against symbol " + rel.sym->getName() : "");
|
2021-07-09 10:05:18 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-02-24 15:07:30 +00:00
|
|
|
template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
|
|
|
|
template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-10-03 20:56:09 -07:00
|
|
|
template void OutputSection::writeTo<ELF32LE>(Ctx &, uint8_t *,
|
2022-08-24 09:40:03 -07:00
|
|
|
llvm::parallel::TaskGroup &);
|
2024-10-03 20:56:09 -07:00
|
|
|
template void OutputSection::writeTo<ELF32BE>(Ctx &, uint8_t *,
|
2022-08-24 09:40:03 -07:00
|
|
|
llvm::parallel::TaskGroup &);
|
2024-10-03 20:56:09 -07:00
|
|
|
template void OutputSection::writeTo<ELF64LE>(Ctx &, uint8_t *,
|
2022-08-24 09:40:03 -07:00
|
|
|
llvm::parallel::TaskGroup &);
|
2024-10-03 20:56:09 -07:00
|
|
|
template void OutputSection::writeTo<ELF64BE>(Ctx &, uint8_t *,
|
2022-08-24 09:40:03 -07:00
|
|
|
llvm::parallel::TaskGroup &);
|
2017-07-27 19:22:43 +00:00
|
|
|
|
2024-10-03 20:06:58 -07:00
|
|
|
template void OutputSection::maybeCompress<ELF32LE>(Ctx &);
|
|
|
|
template void OutputSection::maybeCompress<ELF32BE>(Ctx &);
|
|
|
|
template void OutputSection::maybeCompress<ELF64LE>(Ctx &);
|
|
|
|
template void OutputSection::maybeCompress<ELF64BE>(Ctx &);
|