llvm-project/clang/lib/CodeGen/TargetInfo.cpp

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

221 lines
8.1 KiB
C++
Raw Normal View History

//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#include "TargetInfo.h"
#include "ABIInfo.h"
#include "ABIInfoImpl.h"
#include "CodeGenFunction.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace CodeGen;
LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
switch (TheKind) {
case Direct:
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 06:26:06 +00:00
OS << "Direct Type=";
if (llvm::Type *Ty = getCoerceToType())
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 06:26:06 +00:00
Ty->print(OS);
else
OS << "null";
break;
case Extend:
OS << "Extend";
break;
case Ignore:
OS << "Ignore";
break;
case InAlloca:
OS << "InAlloca Offset=" << getInAllocaFieldIndex();
break;
case Indirect:
Compute and preserve alignment more faithfully in IR-generation. Introduce an Address type to bundle a pointer value with an alignment. Introduce APIs on CGBuilderTy to work with Address values. Change core APIs on CGF/CGM to traffic in Address where appropriate. Require alignments to be non-zero. Update a ton of code to compute and propagate alignment information. As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment helper function to CGF and made use of it in a number of places in the expression emitter. The end result is that we should now be significantly more correct when performing operations on objects that are locally known to be under-aligned. Since alignment is not reliably tracked in the type system, there are inherent limits to this, but at least we are no longer confused by standard operations like derived-to-base conversions and array-to-pointer decay. I've also fixed a large number of bugs where we were applying the complete-object alignment to a pointer instead of the non-virtual alignment, although most of these were hidden by the very conservative approach we took with member alignment. Also, because IRGen now reliably asserts on zero alignments, we should no longer be subject to an absurd but frustrating recurring bug where an incomplete type would report a zero alignment and then we'd naively do a alignmentAtOffset on it and emit code using an alignment equal to the largest power-of-two factor of the offset. We should also now be emitting much more aggressive alignment attributes in the presence of over-alignment. In particular, field access now uses alignmentAtOffset instead of min. Several times in this patch, I had to change the existing code-generation pattern in order to more effectively use the Address APIs. For the most part, this seems to be a strict improvement, like doing pointer arithmetic with GEPs instead of ptrtoint. That said, I've tried very hard to not change semantics, but it is likely that I've failed in a few places, for which I apologize. ABIArgInfo now always carries the assumed alignment of indirect and indirect byval arguments. In order to cut down on what was already a dauntingly large patch, I changed the code to never set align attributes in the IR on non-byval indirect arguments. That is, we still generate code which assumes that indirect arguments have the given alignment, but we don't express this information to the backend except where it's semantically required (i.e. on byvals). This is likely a minor regression for those targets that did provide this information, but it'll be trivial to add it back in a later patch. I partially punted on applying this work to CGBuiltin. Please do not add more uses of the CreateDefaultAligned{Load,Store} APIs; they will be going away eventually. llvm-svn: 246985
2015-09-08 08:05:57 +00:00
OS << "Indirect Align=" << getIndirectAlign().getQuantity()
2011-07-15 18:23:44 +00:00
<< " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
clang: Use byref for aggregate kernel arguments Add address space to indirect abi info and use it for kernels. Previously, indirect arguments assumed assumed a stack passed object in the alloca address space using byval. A stack pointer is unsuitable for kernel arguments, which are passed in a separate, constant buffer with a different address space. Start using the new byref for aggregate kernel arguments. Previously these were emitted as raw struct arguments, and turned into loads in the backend. These will lower identically, although with byref you now have the option of applying an explicit alignment. In the future, a reasonable implementation would use byref for all kernel arguments (this would be a practical problem at the moment due to losing things like noalias on pointer arguments). This is mostly to avoid fighting the optimizer's treatment of aggregate load/store. SROA and instcombine both turn aggregate loads and stores into a long sequence of element loads and stores, rather than the optimizable memcpy I would expect in this situation. Now an explicit memcpy will be introduced up-front which is better understood and helps eliminate the alloca in more situations. This skips using byref in the case where HIP kernel pointer arguments in structs are promoted to global pointers. At minimum an additional patch is needed to allow coercion with indirect arguments. This also skips using it for OpenCL due to the current workaround used to support kernels calling kernels. Distinct function bodies would need to be generated up front instead of emitting an illegal call.
2020-05-05 20:24:53 -04:00
case IndirectAliased:
OS << "Indirect Align=" << getIndirectAlign().getQuantity()
<< " AadrSpace=" << getIndirectAddrSpace()
<< " Realign=" << getIndirectRealign();
break;
case Expand:
OS << "Expand";
break;
case CoerceAndExpand:
OS << "CoerceAndExpand Type=";
getCoerceAndExpandType()->print(OS);
break;
}
OS << ")\n";
}
TargetCodeGenInfo::TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info)
: Info(std::move(Info)) {}
TargetCodeGenInfo::~TargetCodeGenInfo() = default;
// If someone can figure out a general rule for this, that would be great.
// It's probably just doomed to be platform-dependent, though.
unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
// Verified for:
// x86-64 FreeBSD, Linux, Darwin
// x86-32 FreeBSD, Linux, Darwin
// PowerPC Linux
// ARM Darwin (*not* EABI)
// AArch64 Linux
return 32;
}
bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
const FunctionNoProtoType *fnType) const {
// The following conventions are known to require this to be false:
// x86_stdcall
// MIPS
// For everything else, we just prefer false unless we opt out.
return false;
}
void
TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const {
// This assumes the user is passing a library name like "rt" instead of a
// filename like "librt.a/so", and that they don't care whether it's static or
// dynamic.
Opt = "-l";
Opt += Lib;
}
unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
// OpenCL kernels are called via an explicit runtime API with arguments
// set with clSetKernelArg(), not as normal sub-functions.
// Return SPIR_KERNEL by default as the kernel calling convention to
// ensure the fingerprint is fixed such way that each OpenCL argument
// gets one matching argument in the produced kernel function argument
// list to enable feasible implementation of clSetKernelArg() with
// aggregates etc. In case we would use the default C calling conv here,
// clSetKernelArg() might break depending on the target-specific
// conventions; different targets might split structs passed as values
// to multiple function arguments etc.
return llvm::CallingConv::SPIR_KERNEL;
}
llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
llvm::PointerType *T, QualType QT) const {
return llvm::ConstantPointerNull::get(T);
}
Convert clang::LangAS to a strongly typed enum Summary: Convert clang::LangAS to a strongly typed enum Currently both clang AST address spaces and target specific address spaces are represented as unsigned which can lead to subtle errors if the wrong type is passed. It is especially confusing in the CodeGen files as it is not possible to see what kind of address space should be passed to a function without looking at the implementation. I originally made this change for our LLVM fork for the CHERI architecture where we make extensive use of address spaces to differentiate between capabilities and pointers. When merging the upstream changes I usually run into some test failures or runtime crashes because the wrong kind of address space is passed to a function. By converting the LangAS enum to a C++11 we can catch these errors at compile time. Additionally, it is now obvious from the function signature which kind of address space it expects. I found the following errors while writing this patch: - ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address space to TargetInfo::getPointer{Width,Align}() - TypePrinter::printAttributedAfter() prints the numeric value of the clang AST address space instead of the target address space. However, this code is not used so I kept the current behaviour - initializeForBlockHeader() in CGBlocks.cpp was passing LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}() - CodeGenFunction::EmitBlockLiteral() was passing a AST address space to TargetInfo::getPointerWidth() - CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space to Qualifiers::addAddressSpace() - CGOpenMPRuntimeNVPTX::getParameterAddress() was using llvm::Type::getPointerTo() with a AST address space - clang_getAddressSpace() returns either a LangAS or a target address space. As this is exposed to C I have kept the current behaviour and added a comment stating that it is probably not correct. Other than this the patch should not cause any functional changes. Reviewers: yaxunl, pcc, bader Reviewed By: yaxunl, bader Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits Differential Revision: https://reviews.llvm.org/D38816 llvm-svn: 315871
2017-10-15 18:48:14 +00:00
LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const {
assert(!CGM.getLangOpts().OpenCL &&
!(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
"Address space agnostic languages only");
Convert clang::LangAS to a strongly typed enum Summary: Convert clang::LangAS to a strongly typed enum Currently both clang AST address spaces and target specific address spaces are represented as unsigned which can lead to subtle errors if the wrong type is passed. It is especially confusing in the CodeGen files as it is not possible to see what kind of address space should be passed to a function without looking at the implementation. I originally made this change for our LLVM fork for the CHERI architecture where we make extensive use of address spaces to differentiate between capabilities and pointers. When merging the upstream changes I usually run into some test failures or runtime crashes because the wrong kind of address space is passed to a function. By converting the LangAS enum to a C++11 we can catch these errors at compile time. Additionally, it is now obvious from the function signature which kind of address space it expects. I found the following errors while writing this patch: - ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address space to TargetInfo::getPointer{Width,Align}() - TypePrinter::printAttributedAfter() prints the numeric value of the clang AST address space instead of the target address space. However, this code is not used so I kept the current behaviour - initializeForBlockHeader() in CGBlocks.cpp was passing LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}() - CodeGenFunction::EmitBlockLiteral() was passing a AST address space to TargetInfo::getPointerWidth() - CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space to Qualifiers::addAddressSpace() - CGOpenMPRuntimeNVPTX::getParameterAddress() was using llvm::Type::getPointerTo() with a AST address space - clang_getAddressSpace() returns either a LangAS or a target address space. As this is exposed to C I have kept the current behaviour and added a comment stating that it is probably not correct. Other than this the patch should not cause any functional changes. Reviewers: yaxunl, pcc, bader Reviewed By: yaxunl, bader Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits Differential Revision: https://reviews.llvm.org/D38816 llvm-svn: 315871
2017-10-15 18:48:14 +00:00
return D ? D->getType().getAddressSpace() : LangAS::Default;
}
llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
Convert clang::LangAS to a strongly typed enum Summary: Convert clang::LangAS to a strongly typed enum Currently both clang AST address spaces and target specific address spaces are represented as unsigned which can lead to subtle errors if the wrong type is passed. It is especially confusing in the CodeGen files as it is not possible to see what kind of address space should be passed to a function without looking at the implementation. I originally made this change for our LLVM fork for the CHERI architecture where we make extensive use of address spaces to differentiate between capabilities and pointers. When merging the upstream changes I usually run into some test failures or runtime crashes because the wrong kind of address space is passed to a function. By converting the LangAS enum to a C++11 we can catch these errors at compile time. Additionally, it is now obvious from the function signature which kind of address space it expects. I found the following errors while writing this patch: - ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address space to TargetInfo::getPointer{Width,Align}() - TypePrinter::printAttributedAfter() prints the numeric value of the clang AST address space instead of the target address space. However, this code is not used so I kept the current behaviour - initializeForBlockHeader() in CGBlocks.cpp was passing LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}() - CodeGenFunction::EmitBlockLiteral() was passing a AST address space to TargetInfo::getPointerWidth() - CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space to Qualifiers::addAddressSpace() - CGOpenMPRuntimeNVPTX::getParameterAddress() was using llvm::Type::getPointerTo() with a AST address space - clang_getAddressSpace() returns either a LangAS or a target address space. As this is exposed to C I have kept the current behaviour and added a comment stating that it is probably not correct. Other than this the patch should not cause any functional changes. Reviewers: yaxunl, pcc, bader Reviewed By: yaxunl, bader Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits Differential Revision: https://reviews.llvm.org/D38816 llvm-svn: 315871
2017-10-15 18:48:14 +00:00
CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
// Since target may map different address spaces in AST to the same address
// space, an address space conversion may end up as a bitcast.
if (auto *C = dyn_cast<llvm::Constant>(Src))
return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
// Try to preserve the source's name to make IR more readable.
return CGF.Builder.CreateAddrSpaceCast(
Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
}
llvm::Constant *
TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
Convert clang::LangAS to a strongly typed enum Summary: Convert clang::LangAS to a strongly typed enum Currently both clang AST address spaces and target specific address spaces are represented as unsigned which can lead to subtle errors if the wrong type is passed. It is especially confusing in the CodeGen files as it is not possible to see what kind of address space should be passed to a function without looking at the implementation. I originally made this change for our LLVM fork for the CHERI architecture where we make extensive use of address spaces to differentiate between capabilities and pointers. When merging the upstream changes I usually run into some test failures or runtime crashes because the wrong kind of address space is passed to a function. By converting the LangAS enum to a C++11 we can catch these errors at compile time. Additionally, it is now obvious from the function signature which kind of address space it expects. I found the following errors while writing this patch: - ItaniumRecordLayoutBuilder::LayoutField was passing a clang AST address space to TargetInfo::getPointer{Width,Align}() - TypePrinter::printAttributedAfter() prints the numeric value of the clang AST address space instead of the target address space. However, this code is not used so I kept the current behaviour - initializeForBlockHeader() in CGBlocks.cpp was passing LangAS::opencl_generic to TargetInfo::getPointer{Width,Align}() - CodeGenFunction::EmitBlockLiteral() was passing a AST address space to TargetInfo::getPointerWidth() - CGOpenMPRuntimeNVPTX::translateParameter() passed a target address space to Qualifiers::addAddressSpace() - CGOpenMPRuntimeNVPTX::getParameterAddress() was using llvm::Type::getPointerTo() with a AST address space - clang_getAddressSpace() returns either a LangAS or a target address space. As this is exposed to C I have kept the current behaviour and added a comment stating that it is probably not correct. Other than this the patch should not cause any functional changes. Reviewers: yaxunl, pcc, bader Reviewed By: yaxunl, bader Subscribers: jlebar, jholewinski, nhaehnle, Anastasia, cfe-commits Differential Revision: https://reviews.llvm.org/D38816 llvm-svn: 315871
2017-10-15 18:48:14 +00:00
LangAS SrcAddr, LangAS DestAddr,
llvm::Type *DestTy) const {
// Since target may map different address spaces in AST to the same address
// space, an address space conversion may end up as a bitcast.
return llvm::ConstantExpr::getPointerCast(Src, DestTy);
}
llvm::SyncScope::ID
TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
SyncScope Scope,
llvm::AtomicOrdering Ordering,
llvm::LLVMContext &Ctx) const {
return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
}
void TargetCodeGenInfo::addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
if (CGM.getCodeGenOpts().StackProbeSize != 4096)
Fn->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
if (CGM.getCodeGenOpts().NoStackArgProbe)
Fn->addFnAttr("no-stack-arg-probe");
}
}
/// Create an OpenCL kernel for an enqueued block.
///
/// The kernel has the same function type as the block invoke function. Its
/// name is the name of the block invoke function postfixed with "_kernel".
/// It simply calls the block invoke function then returns.
llvm::Value *TargetCodeGenInfo::createEnqueuedBlockKernel(
CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
auto *InvokeFT = Invoke->getFunctionType();
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C),
InvokeFT->params(), false);
auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
llvm::CallingConv::ID KernelCC =
CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel);
F->setCallingConv(KernelCC);
llvm::AttrBuilder KernelAttrs(C);
// FIXME: This is missing setTargetAttributes
CGF.CGM.addDefaultFunctionDefinitionAttributes(KernelAttrs);
F->addFnAttrs(KernelAttrs);
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
auto &Builder = CGF.Builder;
Builder.SetInsertPoint(BB);
llvm::SmallVector<llvm::Value *, 2> Args(llvm::make_pointer_range(F->args()));
llvm::CallInst *Call = Builder.CreateCall(Invoke, Args);
Call->setCallingConv(Invoke->getCallingConv());
Builder.CreateRetVoid();
Builder.restoreIP(IP);
return F;
}
namespace {
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
};
} // namespace
std::unique_ptr<TargetCodeGenInfo>
CodeGen::createDefaultTargetCodeGenInfo(CodeGenModule &CGM) {
return std::make_unique<DefaultTargetCodeGenInfo>(CGM.getTypes());
}