2017-06-16 17:32:43 +00:00
|
|
|
//===- AMDGPU.cpp ---------------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-06-16 17:32:43 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-23 20:44:34 -08:00
|
|
|
#include "InputFiles.h"
|
2017-06-16 17:32:43 +00:00
|
|
|
#include "Symbols.h"
|
|
|
|
#include "Target.h"
|
[lld] unified COFF and ELF error handling on new Common/ErrorHandler
Summary:
The COFF linker and the ELF linker have long had similar but separate
Error.h and Error.cpp files to implement error handling. This change
introduces new error handling code in Common/ErrorHandler.h, changes the
COFF and ELF linkers to use it, and removes the old, separate
implementations.
Reviewers: ruiu
Reviewed By: ruiu
Subscribers: smeenai, jyknight, emaste, sdardis, nemanjai, nhaehnle, mgorny, javed.absar, kbarton, fedor.sergeev, llvm-commits
Differential Revision: https://reviews.llvm.org/D39259
llvm-svn: 316624
2017-10-25 22:28:38 +00:00
|
|
|
#include "lld/Common/ErrorHandler.h"
|
2022-02-07 21:53:34 -08:00
|
|
|
#include "llvm/BinaryFormat/ELF.h"
|
2017-06-16 17:32:43 +00:00
|
|
|
#include "llvm/Support/Endian.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::object;
|
|
|
|
using namespace llvm::support::endian;
|
|
|
|
using namespace llvm::ELF;
|
2020-05-14 22:18:58 -07:00
|
|
|
using namespace lld;
|
|
|
|
using namespace lld::elf;
|
2017-06-16 17:32:43 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class AMDGPU final : public TargetInfo {
|
2021-03-24 13:39:47 -04:00
|
|
|
private:
|
|
|
|
uint32_t calcEFlagsV3() const;
|
|
|
|
uint32_t calcEFlagsV4() const;
|
2024-02-05 08:19:53 +01:00
|
|
|
uint32_t calcEFlagsV6() const;
|
2021-03-24 13:39:47 -04:00
|
|
|
|
2017-06-16 17:32:43 +00:00
|
|
|
public:
|
2024-09-28 21:48:26 -07:00
|
|
|
AMDGPU(Ctx &);
|
2017-10-24 19:05:32 +00:00
|
|
|
uint32_t calcEFlags() const override;
|
2020-01-22 21:39:16 -08:00
|
|
|
void relocate(uint8_t *loc, const Relocation &rel,
|
|
|
|
uint64_t val) const override;
|
2017-11-03 21:21:47 +00:00
|
|
|
RelExpr getRelExpr(RelType type, const Symbol &s,
|
2017-06-16 17:32:43 +00:00
|
|
|
const uint8_t *loc) const override;
|
[ELF][ARM][AARCH64][MIPS][PPC] Simplify the logic to create R_*_RELATIVE for absolute relocation types in writable sections
Summary:
Our rule to create R_*_RELATIVE for absolute relocation types were
loose. D63121 made it stricter but it failed to create R_*_RELATIVE for
R_ARM_TARGET1 and R_PPC64_TOC. rLLD363236 worked around that by
reinstating the original behavior for ARM and PPC64.
This patch is an attempt to simplify the logic.
Note, in ld.bfd, R_ARM_TARGET2 --target2=abs also creates
R_ARM_RELATIVE. This seems a very uncommon scenario (moreover,
--target2=got-rel is the default), so I do not implement any logic
related to it.
Also, delete R_AARCH64_ABS32 from AArch64::getDynRel. We don't have
working ILP32 support yet. Allowing it would create an incorrect
R_AARCH64_RELATIVE.
For MIPS, the (if SymbolRel, then RelativeRel) code is to keep its
behavior unchanged.
Note, in ppc64-abs64-dyn.s, R_PPC64_TOC gets an incorrect addend because
computeAddend() doesn't compute the correct address. We seem to have the
wrong behavior for a long time. The important thing seems that a dynamic
relocation R_PPC64_TOC should not be created as the dynamic loader will
error R_PPC64_TOC is not supported.
Reviewers: atanasyan, grimar, peter.smith, ruiu, sfertile, espindola
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D63383
llvm-svn: 363928
2019-06-20 14:00:08 +00:00
|
|
|
RelType getDynRel(RelType type) const override;
|
2023-09-15 22:59:08 -07:00
|
|
|
int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
|
2017-06-16 17:32:43 +00:00
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2024-09-28 21:48:26 -07:00
|
|
|
AMDGPU::AMDGPU(Ctx &ctx) : TargetInfo(ctx) {
|
2017-10-16 20:46:53 +00:00
|
|
|
relativeRel = R_AMDGPU_RELATIVE64;
|
2017-06-16 17:32:43 +00:00
|
|
|
gotRel = R_AMDGPU_ABS64;
|
2019-06-11 12:59:30 +00:00
|
|
|
symbolicRel = R_AMDGPU_ABS64;
|
2017-06-16 17:32:43 +00:00
|
|
|
}
|
|
|
|
|
2017-10-24 19:40:03 +00:00
|
|
|
static uint32_t getEFlags(InputFile *file) {
|
2020-09-09 17:03:53 +03:00
|
|
|
return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader().e_flags;
|
2017-10-24 19:40:03 +00:00
|
|
|
}
|
2017-10-24 19:05:32 +00:00
|
|
|
|
2021-03-24 13:39:47 -04:00
|
|
|
uint32_t AMDGPU::calcEFlagsV3() const {
|
2022-10-01 12:06:33 -07:00
|
|
|
uint32_t ret = getEFlags(ctx.objectFiles[0]);
|
2017-10-24 19:05:32 +00:00
|
|
|
|
|
|
|
// Verify that all input files have the same e_flags.
|
2023-01-09 18:11:07 +01:00
|
|
|
for (InputFile *f : ArrayRef(ctx.objectFiles).slice(1)) {
|
2017-10-24 19:40:03 +00:00
|
|
|
if (ret == getEFlags(f))
|
|
|
|
continue;
|
|
|
|
error("incompatible e_flags: " + toString(f));
|
|
|
|
return 0;
|
2017-10-24 19:05:32 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-03-24 13:39:47 -04:00
|
|
|
uint32_t AMDGPU::calcEFlagsV4() const {
|
2022-10-01 12:06:33 -07:00
|
|
|
uint32_t retMach = getEFlags(ctx.objectFiles[0]) & EF_AMDGPU_MACH;
|
2022-06-29 18:53:38 -07:00
|
|
|
uint32_t retXnack =
|
2022-10-01 12:06:33 -07:00
|
|
|
getEFlags(ctx.objectFiles[0]) & EF_AMDGPU_FEATURE_XNACK_V4;
|
2021-03-24 13:39:47 -04:00
|
|
|
uint32_t retSramEcc =
|
2022-10-01 12:06:33 -07:00
|
|
|
getEFlags(ctx.objectFiles[0]) & EF_AMDGPU_FEATURE_SRAMECC_V4;
|
2021-03-24 13:39:47 -04:00
|
|
|
|
|
|
|
// Verify that all input files have compatible e_flags (same mach, all
|
|
|
|
// features in the same category are either ANY, ANY and ON, or ANY and OFF).
|
2023-01-09 18:11:07 +01:00
|
|
|
for (InputFile *f : ArrayRef(ctx.objectFiles).slice(1)) {
|
2021-03-24 13:39:47 -04:00
|
|
|
if (retMach != (getEFlags(f) & EF_AMDGPU_MACH)) {
|
|
|
|
error("incompatible mach: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (retXnack == EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 ||
|
|
|
|
(retXnack != EF_AMDGPU_FEATURE_XNACK_ANY_V4 &&
|
|
|
|
(getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)
|
|
|
|
!= EF_AMDGPU_FEATURE_XNACK_ANY_V4)) {
|
|
|
|
if (retXnack != (getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4)) {
|
|
|
|
error("incompatible xnack: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (retXnack == EF_AMDGPU_FEATURE_XNACK_ANY_V4)
|
|
|
|
retXnack = getEFlags(f) & EF_AMDGPU_FEATURE_XNACK_V4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4 ||
|
|
|
|
(retSramEcc != EF_AMDGPU_FEATURE_SRAMECC_ANY_V4 &&
|
|
|
|
(getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4) !=
|
|
|
|
EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)) {
|
|
|
|
if (retSramEcc != (getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4)) {
|
|
|
|
error("incompatible sramecc: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (retSramEcc == EF_AMDGPU_FEATURE_SRAMECC_ANY_V4)
|
|
|
|
retSramEcc = getEFlags(f) & EF_AMDGPU_FEATURE_SRAMECC_V4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return retMach | retXnack | retSramEcc;
|
|
|
|
}
|
|
|
|
|
2024-02-05 08:19:53 +01:00
|
|
|
uint32_t AMDGPU::calcEFlagsV6() const {
|
|
|
|
uint32_t flags = calcEFlagsV4();
|
|
|
|
|
|
|
|
uint32_t genericVersion =
|
|
|
|
getEFlags(ctx.objectFiles[0]) & EF_AMDGPU_GENERIC_VERSION;
|
|
|
|
|
|
|
|
// Verify that all input files have compatible generic version.
|
|
|
|
for (InputFile *f : ArrayRef(ctx.objectFiles).slice(1)) {
|
|
|
|
if (genericVersion != (getEFlags(f) & EF_AMDGPU_GENERIC_VERSION)) {
|
|
|
|
error("incompatible generic version: " + toString(f));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
flags |= genericVersion;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2021-03-24 13:39:47 -04:00
|
|
|
uint32_t AMDGPU::calcEFlags() const {
|
2022-10-01 12:06:33 -07:00
|
|
|
if (ctx.objectFiles.empty())
|
2022-04-08 13:22:23 -04:00
|
|
|
return 0;
|
2021-03-24 13:39:47 -04:00
|
|
|
|
2022-10-01 12:06:33 -07:00
|
|
|
uint8_t abiVersion = cast<ObjFile<ELF64LE>>(ctx.objectFiles[0])
|
2022-06-29 18:53:38 -07:00
|
|
|
->getObj()
|
|
|
|
.getHeader()
|
|
|
|
.e_ident[EI_ABIVERSION];
|
2021-03-24 13:39:47 -04:00
|
|
|
switch (abiVersion) {
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V2:
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V3:
|
|
|
|
return calcEFlagsV3();
|
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V4:
|
2022-03-21 13:54:08 -07:00
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V5:
|
2021-03-24 13:39:47 -04:00
|
|
|
return calcEFlagsV4();
|
2024-02-05 08:19:53 +01:00
|
|
|
case ELFABIVERSION_AMDGPU_HSA_V6:
|
|
|
|
return calcEFlagsV6();
|
2021-03-24 13:39:47 -04:00
|
|
|
default:
|
|
|
|
error("unknown abi version: " + Twine(abiVersion));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 21:39:16 -08:00
|
|
|
void AMDGPU::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
|
|
|
|
switch (rel.type) {
|
2017-06-16 17:32:43 +00:00
|
|
|
case R_AMDGPU_ABS32:
|
|
|
|
case R_AMDGPU_GOTPCREL:
|
|
|
|
case R_AMDGPU_GOTPCREL32_LO:
|
|
|
|
case R_AMDGPU_REL32:
|
|
|
|
case R_AMDGPU_REL32_LO:
|
|
|
|
write32le(loc, val);
|
|
|
|
break;
|
|
|
|
case R_AMDGPU_ABS64:
|
2018-06-11 21:42:53 +00:00
|
|
|
case R_AMDGPU_REL64:
|
2017-06-16 17:32:43 +00:00
|
|
|
write64le(loc, val);
|
|
|
|
break;
|
|
|
|
case R_AMDGPU_GOTPCREL32_HI:
|
|
|
|
case R_AMDGPU_REL32_HI:
|
|
|
|
write32le(loc, val >> 32);
|
|
|
|
break;
|
2021-07-13 19:28:00 +01:00
|
|
|
case R_AMDGPU_REL16: {
|
|
|
|
int64_t simm = (static_cast<int64_t>(val) - 4) / 4;
|
2024-10-13 11:14:40 -07:00
|
|
|
checkInt(ctx, loc, simm, 16, rel);
|
2021-07-13 19:28:00 +01:00
|
|
|
write16le(loc, simm);
|
|
|
|
break;
|
|
|
|
}
|
2017-06-16 17:32:43 +00:00
|
|
|
default:
|
2019-02-14 18:02:20 +00:00
|
|
|
llvm_unreachable("unknown relocation");
|
2017-06-16 17:32:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-03 21:21:47 +00:00
|
|
|
RelExpr AMDGPU::getRelExpr(RelType type, const Symbol &s,
|
2017-10-12 03:14:06 +00:00
|
|
|
const uint8_t *loc) const {
|
2017-06-16 17:32:43 +00:00
|
|
|
switch (type) {
|
|
|
|
case R_AMDGPU_ABS32:
|
|
|
|
case R_AMDGPU_ABS64:
|
|
|
|
return R_ABS;
|
|
|
|
case R_AMDGPU_REL32:
|
|
|
|
case R_AMDGPU_REL32_LO:
|
|
|
|
case R_AMDGPU_REL32_HI:
|
2018-06-11 21:42:53 +00:00
|
|
|
case R_AMDGPU_REL64:
|
2021-07-13 19:28:00 +01:00
|
|
|
case R_AMDGPU_REL16:
|
2017-06-16 17:32:43 +00:00
|
|
|
return R_PC;
|
|
|
|
case R_AMDGPU_GOTPCREL:
|
|
|
|
case R_AMDGPU_GOTPCREL32_LO:
|
|
|
|
case R_AMDGPU_GOTPCREL32_HI:
|
|
|
|
return R_GOT_PC;
|
|
|
|
default:
|
2024-10-06 19:36:21 -07:00
|
|
|
error(getErrorLoc(ctx, loc) + "unknown relocation (" + Twine(type) +
|
2019-02-14 18:02:20 +00:00
|
|
|
") against symbol " + toString(s));
|
|
|
|
return R_NONE;
|
2017-06-16 17:32:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[ELF][ARM][AARCH64][MIPS][PPC] Simplify the logic to create R_*_RELATIVE for absolute relocation types in writable sections
Summary:
Our rule to create R_*_RELATIVE for absolute relocation types were
loose. D63121 made it stricter but it failed to create R_*_RELATIVE for
R_ARM_TARGET1 and R_PPC64_TOC. rLLD363236 worked around that by
reinstating the original behavior for ARM and PPC64.
This patch is an attempt to simplify the logic.
Note, in ld.bfd, R_ARM_TARGET2 --target2=abs also creates
R_ARM_RELATIVE. This seems a very uncommon scenario (moreover,
--target2=got-rel is the default), so I do not implement any logic
related to it.
Also, delete R_AARCH64_ABS32 from AArch64::getDynRel. We don't have
working ILP32 support yet. Allowing it would create an incorrect
R_AARCH64_RELATIVE.
For MIPS, the (if SymbolRel, then RelativeRel) code is to keep its
behavior unchanged.
Note, in ppc64-abs64-dyn.s, R_PPC64_TOC gets an incorrect addend because
computeAddend() doesn't compute the correct address. We seem to have the
wrong behavior for a long time. The important thing seems that a dynamic
relocation R_PPC64_TOC should not be created as the dynamic loader will
error R_PPC64_TOC is not supported.
Reviewers: atanasyan, grimar, peter.smith, ruiu, sfertile, espindola
Reviewed By: ruiu
Differential Revision: https://reviews.llvm.org/D63383
llvm-svn: 363928
2019-06-20 14:00:08 +00:00
|
|
|
RelType AMDGPU::getDynRel(RelType type) const {
|
|
|
|
if (type == R_AMDGPU_ABS64)
|
|
|
|
return type;
|
|
|
|
return R_AMDGPU_NONE;
|
|
|
|
}
|
|
|
|
|
2023-09-15 22:59:08 -07:00
|
|
|
int64_t AMDGPU::getImplicitAddend(const uint8_t *buf, RelType type) const {
|
|
|
|
switch (type) {
|
|
|
|
case R_AMDGPU_NONE:
|
|
|
|
return 0;
|
|
|
|
case R_AMDGPU_ABS64:
|
|
|
|
case R_AMDGPU_RELATIVE64:
|
2024-10-13 10:47:18 -07:00
|
|
|
return read64(ctx, buf);
|
2023-09-15 22:59:08 -07:00
|
|
|
default:
|
2024-10-06 19:36:21 -07:00
|
|
|
internalLinkerError(getErrorLoc(ctx, buf),
|
2023-09-15 22:59:08 -07:00
|
|
|
"cannot read addend for relocation " + toString(type));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-07 23:14:02 -07:00
|
|
|
void elf::setAMDGPUTargetInfo(Ctx &ctx) { ctx.target.reset(new AMDGPU(ctx)); }
|