2017-10-19 21:21:30 +00:00
|
|
|
//===- ArgumentPromotion.cpp - Promote by-reference arguments -------------===//
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-21 23:48:37 +00:00
|
|
|
//
|
2004-03-07 21:29:54 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass promotes "by reference" arguments to be "by value" arguments. In
|
|
|
|
// practice, this means looking for internal functions that have pointer
|
2007-10-26 03:03:51 +00:00
|
|
|
// arguments. If it can prove, through the use of alias analysis, that an
|
|
|
|
// argument is *only* loaded, then it can pass the value into the function
|
2004-03-07 21:29:54 +00:00
|
|
|
// instead of the address of the value. This can cause recursive simplification
|
2004-05-23 21:21:17 +00:00
|
|
|
// of code and lead to the elimination of allocas (especially in C++ template
|
|
|
|
// code like the STL).
|
2004-03-07 21:29:54 +00:00
|
|
|
//
|
2004-03-08 01:04:36 +00:00
|
|
|
// This pass also handles aggregate arguments that are passed into a function,
|
|
|
|
// scalarizing them if the elements of the aggregate are only loaded. Note that
|
2008-07-29 10:00:13 +00:00
|
|
|
// by default it refuses to scalarize aggregates which would require passing in
|
|
|
|
// more than three operands to the function, because passing thousands of
|
2008-09-07 09:54:09 +00:00
|
|
|
// operands for a large array or structure is unprofitable! This limit can be
|
2008-07-29 10:00:13 +00:00
|
|
|
// configured or disabled, however.
|
2004-03-08 01:04:36 +00:00
|
|
|
//
|
2004-03-07 21:29:54 +00:00
|
|
|
// Note that this transformation could also be done for arguments that are only
|
2007-10-26 03:03:51 +00:00
|
|
|
// stored to (returning the value instead), but does not currently. This case
|
|
|
|
// would be best handled when and if LLVM begins supporting multiple return
|
|
|
|
// values from functions.
|
2004-03-07 21:29:54 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-02-09 23:46:27 +00:00
|
|
|
#include "llvm/Transforms/IPO/ArgumentPromotion.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/ADT/None.h"
|
2017-02-09 23:46:27 +00:00
|
|
|
#include "llvm/ADT/Optional.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2020-08-03 19:18:13 +01:00
|
|
|
#include "llvm/ADT/ScopeExit.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/ADT/Twine.h"
|
2016-12-19 08:22:17 +00:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/Analysis/CGSCCPassManager.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Analysis/CallGraph.h"
|
2013-01-07 15:26:48 +00:00
|
|
|
#include "llvm/Analysis/CallGraphSCCPass.h"
|
2017-02-09 23:46:27 +00:00
|
|
|
#include "llvm/Analysis/LazyCallGraph.h"
|
2016-02-24 12:49:04 +00:00
|
|
|
#include "llvm/Analysis/Loads.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/Analysis/MemoryLocation.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2019-01-16 05:15:31 +00:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2022-03-21 21:53:28 +01:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/IR/Argument.h"
|
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2014-03-04 11:45:46 +00:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Constants.h"
|
2014-07-10 05:27:53 +00:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/IR/Function.h"
|
2019-04-02 17:42:17 +00:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Module.h"
|
2019-04-02 17:42:17 +00:00
|
|
|
#include "llvm/IR/NoFolder.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/IR/PassManager.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/IR/User.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-13 13:15:01 -08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2004-09-01 22:55:40 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-25 00:23:56 +00:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-01-29 08:03:19 +00:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2017-10-19 21:21:30 +00:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
|
|
|
|
2004-03-07 21:29:54 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 02:55:47 +00:00
|
|
|
#define DEBUG_TYPE "argpromotion"
|
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted");
|
|
|
|
STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted");
|
|
|
|
STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated");
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2022-04-28 09:51:39 -07:00
|
|
|
namespace {
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
struct ArgPart {
|
|
|
|
Type *Ty;
|
|
|
|
Align Alignment;
|
|
|
|
/// A representative guaranteed-executed load instruction for use by
|
|
|
|
/// metadata transfer.
|
|
|
|
LoadInst *MustExecLoad;
|
|
|
|
};
|
2022-04-28 09:51:39 -07:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
using OffsetAndArgPart = std::pair<int64_t, ArgPart>;
|
|
|
|
|
2022-04-28 09:51:39 -07:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
static Value *createByteGEP(IRBuilderBase &IRB, const DataLayout &DL,
|
|
|
|
Value *Ptr, Type *ResElemTy, int64_t Offset) {
|
2022-04-28 15:31:00 +02:00
|
|
|
// For non-opaque pointers, try to create a "nice" GEP if possible, otherwise
|
2022-01-28 17:22:58 +01:00
|
|
|
// fall back to an i8 GEP to a specific offset.
|
|
|
|
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
|
|
|
|
APInt OrigOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
|
|
|
|
if (!Ptr->getType()->isOpaquePointerTy()) {
|
|
|
|
Type *OrigElemTy = Ptr->getType()->getNonOpaquePointerElementType();
|
|
|
|
if (OrigOffset == 0 && OrigElemTy == ResElemTy)
|
|
|
|
return Ptr;
|
|
|
|
|
|
|
|
if (OrigElemTy->isSized()) {
|
|
|
|
APInt TmpOffset = OrigOffset;
|
|
|
|
Type *TmpTy = OrigElemTy;
|
|
|
|
SmallVector<APInt> IntIndices =
|
|
|
|
DL.getGEPIndicesForOffset(TmpTy, TmpOffset);
|
|
|
|
if (TmpOffset == 0) {
|
|
|
|
// Try to add trailing zero indices to reach the right type.
|
|
|
|
while (TmpTy != ResElemTy) {
|
|
|
|
Type *NextTy = GetElementPtrInst::getTypeAtIndex(TmpTy, (uint64_t)0);
|
|
|
|
if (!NextTy)
|
|
|
|
break;
|
|
|
|
|
|
|
|
IntIndices.push_back(APInt::getZero(
|
|
|
|
isa<StructType>(TmpTy) ? 32 : OrigOffset.getBitWidth()));
|
|
|
|
TmpTy = NextTy;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Value *> Indices;
|
|
|
|
for (const APInt &Index : IntIndices)
|
|
|
|
Indices.push_back(IRB.getInt(Index));
|
|
|
|
|
|
|
|
if (OrigOffset != 0 || TmpTy == ResElemTy) {
|
|
|
|
Ptr = IRB.CreateGEP(OrigElemTy, Ptr, Indices);
|
|
|
|
return IRB.CreateBitCast(Ptr, ResElemTy->getPointerTo(AddrSpace));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (OrigOffset != 0) {
|
|
|
|
Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(AddrSpace));
|
|
|
|
Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(OrigOffset));
|
|
|
|
}
|
|
|
|
return IRB.CreateBitCast(Ptr, ResElemTy->getPointerTo(AddrSpace));
|
|
|
|
}
|
2016-07-02 18:59:51 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
/// DoPromotion - This method actually performs the promotion of the specified
|
|
|
|
/// arguments, and returns the new function. At this point, we know that it's
|
|
|
|
/// safe to do so.
|
2022-01-28 17:22:58 +01:00
|
|
|
static Function *doPromotion(
|
|
|
|
Function *F,
|
|
|
|
const DenseMap<Argument *, SmallVector<OffsetAndArgPart, 4>> &ArgsToPromote,
|
|
|
|
SmallPtrSetImpl<Argument *> &ByValArgsToTransform,
|
|
|
|
Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>>
|
|
|
|
ReplaceCallSite) {
|
2017-01-29 08:03:16 +00:00
|
|
|
// Start by computing a new prototype for the function, which is the same as
|
|
|
|
// the old function, but has modified arguments.
|
|
|
|
FunctionType *FTy = F->getFunctionType();
|
2017-01-29 08:03:19 +00:00
|
|
|
std::vector<Type *> Params;
|
2016-07-02 18:59:51 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Attribute - Keep track of the parameter attributes for the arguments
|
|
|
|
// that we are *not* promoting. For the ones that we do promote, the parameter
|
|
|
|
// attributes are lost
|
2017-04-13 00:58:09 +00:00
|
|
|
SmallVector<AttributeSet, 8> ArgAttrVec;
|
|
|
|
AttributeList PAL = F->getAttributes();
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// First, determine the new argument list
|
2017-04-28 18:37:16 +00:00
|
|
|
unsigned ArgNo = 0;
|
2017-01-29 08:03:16 +00:00
|
|
|
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
|
2017-04-28 18:37:16 +00:00
|
|
|
++I, ++ArgNo) {
|
2017-01-29 08:03:16 +00:00
|
|
|
if (ByValArgsToTransform.count(&*I)) {
|
|
|
|
// Simple byval argument? Just add all the struct element types.
|
2021-07-13 09:29:53 -07:00
|
|
|
Type *AgTy = I->getParamByValType();
|
2017-01-29 08:03:16 +00:00
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
2020-12-27 09:57:28 -08:00
|
|
|
llvm::append_range(Params, STy->elements());
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(),
|
|
|
|
AttributeSet());
|
2017-01-29 08:03:16 +00:00
|
|
|
++NumByValArgsPromoted;
|
|
|
|
} else if (!ArgsToPromote.count(&*I)) {
|
|
|
|
// Unchanged argument
|
|
|
|
Params.push_back(I->getType());
|
2021-08-13 11:16:52 -07:00
|
|
|
ArgAttrVec.push_back(PAL.getParamAttrs(ArgNo));
|
2017-01-29 08:03:16 +00:00
|
|
|
} else if (I->use_empty()) {
|
|
|
|
// Dead argument (which are always marked as promotable)
|
|
|
|
++NumArgumentsDead;
|
|
|
|
} else {
|
2022-01-28 17:22:58 +01:00
|
|
|
const auto &ArgParts = ArgsToPromote.find(&*I)->second;
|
|
|
|
for (const auto &Pair : ArgParts) {
|
|
|
|
Params.push_back(Pair.second.Ty);
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.push_back(AttributeSet());
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2022-01-28 17:22:58 +01:00
|
|
|
++NumArgumentsPromoted;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2014-08-28 22:42:00 +00:00
|
|
|
}
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
Type *RetTy = FTy->getReturnType();
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Construct the new function type using the new arguments.
|
|
|
|
FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Create the new function body and insert it into the module.
|
2018-12-18 09:52:52 +00:00
|
|
|
Function *NF = Function::Create(NFTy, F->getLinkage(), F->getAddressSpace(),
|
|
|
|
F->getName());
|
2017-01-29 08:03:16 +00:00
|
|
|
NF->copyAttributesFrom(F);
|
2020-09-10 13:08:57 -04:00
|
|
|
NF->copyMetadata(F, 0);
|
2019-02-28 01:11:12 +00:00
|
|
|
|
2020-09-10 13:08:57 -04:00
|
|
|
// The new function will have the !dbg metadata copied from the original
|
|
|
|
// function. The original function may not be deleted, and dbg metadata need
|
2022-04-28 15:31:00 +02:00
|
|
|
// to be unique, so we need to drop it.
|
2019-02-28 01:11:12 +00:00
|
|
|
F->setSubprogram(nullptr);
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2018-05-14 12:53:11 +00:00
|
|
|
LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
|
|
|
|
<< "From: " << *F);
|
2017-01-29 08:03:19 +00:00
|
|
|
|
2022-05-02 13:29:34 +08:00
|
|
|
uint64_t LargestVectorWidth = 0;
|
|
|
|
for (auto *I : Params)
|
|
|
|
if (auto *VT = dyn_cast<llvm::VectorType>(I))
|
|
|
|
LargestVectorWidth = std::max(
|
|
|
|
LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize());
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Recompute the parameter attributes list based on the new arguments for
|
|
|
|
// the function.
|
2021-08-13 11:16:52 -07:00
|
|
|
NF->setAttributes(AttributeList::get(F->getContext(), PAL.getFnAttrs(),
|
|
|
|
PAL.getRetAttrs(), ArgAttrVec));
|
2022-05-02 13:29:34 +08:00
|
|
|
AttributeFuncs::updateMinLegalVectorWidthAttr(*NF, LargestVectorWidth);
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.clear();
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
F->getParent()->getFunctionList().insert(F->getIterator(), NF);
|
|
|
|
NF->takeName(F);
|
2014-08-28 22:42:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Loop over all of the callers of the function, transforming the call sites
|
|
|
|
// to pass in the loaded pointers.
|
|
|
|
//
|
2017-01-29 08:03:19 +00:00
|
|
|
SmallVector<Value *, 16> Args;
|
2020-10-20 13:08:07 -07:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2017-01-29 08:03:16 +00:00
|
|
|
while (!F->use_empty()) {
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase &CB = cast<CallBase>(*F->user_back());
|
|
|
|
assert(CB.getCalledFunction() == F);
|
|
|
|
const AttributeList &CallPAL = CB.getAttributes();
|
|
|
|
IRBuilder<NoFolder> IRB(&CB);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Loop over the operands, inserting GEP and loads in the caller as
|
|
|
|
// appropriate.
|
2022-04-28 15:31:00 +02:00
|
|
|
auto *AI = CB.arg_begin();
|
2017-04-28 18:37:16 +00:00
|
|
|
ArgNo = 0;
|
2017-01-29 08:03:19 +00:00
|
|
|
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
|
2017-04-28 18:37:16 +00:00
|
|
|
++I, ++AI, ++ArgNo)
|
2017-01-29 08:03:16 +00:00
|
|
|
if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
|
2017-01-29 08:03:19 +00:00
|
|
|
Args.push_back(*AI); // Unmodified argument
|
2021-08-13 11:16:52 -07:00
|
|
|
ArgAttrVec.push_back(CallPAL.getParamAttrs(ArgNo));
|
2017-01-29 08:03:16 +00:00
|
|
|
} else if (ByValArgsToTransform.count(&*I)) {
|
|
|
|
// Emit a GEP and load for each element of the struct.
|
2021-07-13 09:29:53 -07:00
|
|
|
Type *AgTy = I->getParamByValType();
|
2017-01-29 08:03:16 +00:00
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
|
|
|
Value *Idxs[2] = {
|
2017-01-29 08:03:19 +00:00
|
|
|
ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr};
|
2020-10-20 13:08:07 -07:00
|
|
|
const StructLayout *SL = DL.getStructLayout(STy);
|
|
|
|
Align StructAlign = *I->getParamAlign();
|
2022-04-28 15:31:00 +02:00
|
|
|
for (unsigned J = 0, Elems = STy->getNumElements(); J != Elems; ++J) {
|
|
|
|
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), J);
|
2019-04-02 17:42:17 +00:00
|
|
|
auto *Idx =
|
2022-04-28 15:31:00 +02:00
|
|
|
IRB.CreateGEP(STy, *AI, Idxs, (*AI)->getName() + "." + Twine(J));
|
2017-01-29 08:03:16 +00:00
|
|
|
// TODO: Tell AA about the new values?
|
2020-10-20 13:08:07 -07:00
|
|
|
Align Alignment =
|
2022-04-28 15:31:00 +02:00
|
|
|
commonAlignment(StructAlign, SL->getElementOffset(J));
|
2020-10-20 13:08:07 -07:00
|
|
|
Args.push_back(IRB.CreateAlignedLoad(
|
2022-04-28 15:31:00 +02:00
|
|
|
STy->getElementType(J), Idx, Alignment, Idx->getName() + ".val"));
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.push_back(AttributeSet());
|
2008-01-11 22:31:41 +00:00
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
} else if (!I->use_empty()) {
|
2022-01-28 17:22:58 +01:00
|
|
|
Value *V = *AI;
|
|
|
|
const auto &ArgParts = ArgsToPromote.find(&*I)->second;
|
|
|
|
for (const auto &Pair : ArgParts) {
|
|
|
|
LoadInst *LI = IRB.CreateAlignedLoad(
|
|
|
|
Pair.second.Ty,
|
|
|
|
createByteGEP(IRB, DL, V, Pair.second.Ty, Pair.first),
|
|
|
|
Pair.second.Alignment, V->getName() + ".val");
|
|
|
|
if (Pair.second.MustExecLoad) {
|
|
|
|
LI->setAAMetadata(Pair.second.MustExecLoad->getAAMetadata());
|
2022-02-10 11:26:26 +01:00
|
|
|
LI->copyMetadata(*Pair.second.MustExecLoad,
|
|
|
|
{LLVMContext::MD_range, LLVMContext::MD_nonnull,
|
|
|
|
LLVMContext::MD_dereferenceable,
|
|
|
|
LLVMContext::MD_dereferenceable_or_null,
|
|
|
|
LLVMContext::MD_align, LLVMContext::MD_noundef});
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2022-01-28 17:22:58 +01:00
|
|
|
Args.push_back(LI);
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.push_back(AttributeSet());
|
2011-01-16 08:09:24 +00:00
|
|
|
}
|
2008-05-27 11:50:51 +00:00
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Push any varargs arguments on the list.
|
2020-04-20 18:14:13 -07:00
|
|
|
for (; AI != CB.arg_end(); ++AI, ++ArgNo) {
|
2017-01-29 08:03:16 +00:00
|
|
|
Args.push_back(*AI);
|
2021-08-13 11:16:52 -07:00
|
|
|
ArgAttrVec.push_back(CallPAL.getParamAttrs(ArgNo));
|
2011-01-16 08:09:24 +00:00
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
2020-04-20 18:14:13 -07:00
|
|
|
CB.getOperandBundlesAsDefs(OpBundles);
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase *NewCS = nullptr;
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
|
2017-04-13 00:58:09 +00:00
|
|
|
NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
|
2020-04-20 18:14:13 -07:00
|
|
|
Args, OpBundles, "", &CB);
|
2017-01-29 08:03:16 +00:00
|
|
|
} else {
|
2020-04-20 18:14:13 -07:00
|
|
|
auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", &CB);
|
|
|
|
NewCall->setTailCallKind(cast<CallInst>(&CB)->getTailCallKind());
|
2017-04-13 00:58:09 +00:00
|
|
|
NewCS = NewCall;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2020-04-20 18:14:13 -07:00
|
|
|
NewCS->setCallingConv(CB.getCallingConv());
|
2021-08-13 11:16:52 -07:00
|
|
|
NewCS->setAttributes(AttributeList::get(F->getContext(),
|
|
|
|
CallPAL.getFnAttrs(),
|
|
|
|
CallPAL.getRetAttrs(), ArgAttrVec));
|
2020-06-04 14:30:58 +07:00
|
|
|
NewCS->copyMetadata(CB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
|
2017-01-29 08:03:16 +00:00
|
|
|
Args.clear();
|
2017-04-13 00:58:09 +00:00
|
|
|
ArgAttrVec.clear();
|
2004-11-13 23:31:34 +00:00
|
|
|
|
2022-05-02 13:29:34 +08:00
|
|
|
AttributeFuncs::updateMinLegalVectorWidthAttr(*CB.getCaller(),
|
|
|
|
LargestVectorWidth);
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Update the callgraph to know that the callsite has been transformed.
|
2017-02-09 23:46:27 +00:00
|
|
|
if (ReplaceCallSite)
|
2020-04-20 18:14:13 -07:00
|
|
|
(*ReplaceCallSite)(CB, *NewCS);
|
2004-11-13 23:31:34 +00:00
|
|
|
|
2020-04-20 18:14:13 -07:00
|
|
|
if (!CB.use_empty()) {
|
|
|
|
CB.replaceAllUsesWith(NewCS);
|
|
|
|
NewCS->takeName(&CB);
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2004-11-13 23:31:34 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Finally, remove the old call from the program, reducing the use-count of
|
|
|
|
// F.
|
2020-04-20 18:14:13 -07:00
|
|
|
CB.eraseFromParent();
|
2004-11-13 23:31:34 +00:00
|
|
|
}
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Since we have now created the new function, splice the body of the old
|
|
|
|
// function right into the new function, leaving the old rotting hulk of the
|
|
|
|
// function empty.
|
|
|
|
NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
|
|
|
|
|
|
|
|
// Loop over the argument list, transferring uses of the old arguments over to
|
|
|
|
// the new arguments, also transferring over the names as well.
|
2022-02-01 10:32:58 +01:00
|
|
|
Function::arg_iterator I2 = NF->arg_begin();
|
|
|
|
for (Argument &Arg : F->args()) {
|
|
|
|
if (!ArgsToPromote.count(&Arg) && !ByValArgsToTransform.count(&Arg)) {
|
2017-01-29 08:03:16 +00:00
|
|
|
// If this is an unmodified argument, move the name and users over to the
|
|
|
|
// new version.
|
2022-02-01 10:32:58 +01:00
|
|
|
Arg.replaceAllUsesWith(&*I2);
|
|
|
|
I2->takeName(&Arg);
|
2017-01-29 08:03:16 +00:00
|
|
|
++I2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-02-01 10:32:58 +01:00
|
|
|
if (ByValArgsToTransform.count(&Arg)) {
|
2017-01-29 08:03:16 +00:00
|
|
|
// In the callee, we create an alloca, and store each of the new incoming
|
|
|
|
// arguments into the alloca.
|
|
|
|
Instruction *InsertPt = &NF->begin()->front();
|
|
|
|
|
|
|
|
// Just add all the struct element types.
|
2022-02-01 10:32:58 +01:00
|
|
|
Type *AgTy = Arg.getParamByValType();
|
|
|
|
Align StructAlign = *Arg.getParamAlign();
|
2020-10-20 13:08:07 -07:00
|
|
|
Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr,
|
|
|
|
StructAlign, "", InsertPt);
|
2017-01-29 08:03:16 +00:00
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
2017-01-29 08:03:19 +00:00
|
|
|
Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0),
|
|
|
|
nullptr};
|
2020-10-20 13:08:07 -07:00
|
|
|
const StructLayout *SL = DL.getStructLayout(STy);
|
2017-01-29 08:03:16 +00:00
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
for (unsigned J = 0, Elems = STy->getNumElements(); J != Elems; ++J) {
|
|
|
|
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), J);
|
2017-01-29 08:03:16 +00:00
|
|
|
Value *Idx = GetElementPtrInst::Create(
|
2022-04-28 15:31:00 +02:00
|
|
|
AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(J),
|
2017-01-29 08:03:16 +00:00
|
|
|
InsertPt);
|
2022-04-28 15:31:00 +02:00
|
|
|
I2->setName(Arg.getName() + "." + Twine(J));
|
|
|
|
Align Alignment = commonAlignment(StructAlign, SL->getElementOffset(J));
|
2020-10-20 13:08:07 -07:00
|
|
|
new StoreInst(&*I2++, Idx, false, Alignment, InsertPt);
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Anything that used the arg should now use the alloca.
|
2022-02-01 10:32:58 +01:00
|
|
|
Arg.replaceAllUsesWith(TheAlloca);
|
|
|
|
TheAlloca->takeName(&Arg);
|
2017-01-29 08:03:16 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:13 +01:00
|
|
|
// There potentially are metadata uses for things like llvm.dbg.value.
|
|
|
|
// Replace them with undef, after handling the other regular uses.
|
|
|
|
auto RauwUndefMetadata = make_scope_exit(
|
2022-02-01 10:32:58 +01:00
|
|
|
[&]() { Arg.replaceAllUsesWith(UndefValue::get(Arg.getType())); });
|
2020-08-03 19:18:13 +01:00
|
|
|
|
2022-02-01 10:32:58 +01:00
|
|
|
if (Arg.use_empty())
|
2017-01-29 08:03:16 +00:00
|
|
|
continue;
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
SmallDenseMap<int64_t, Argument *> OffsetToArg;
|
|
|
|
for (const auto &Pair : ArgsToPromote.find(&Arg)->second) {
|
|
|
|
Argument &NewArg = *I2++;
|
|
|
|
NewArg.setName(Arg.getName() + "." + Twine(Pair.first) + ".val");
|
|
|
|
OffsetToArg.insert({Pair.first, &NewArg});
|
|
|
|
}
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Otherwise, if we promoted this argument, then all users are load
|
2022-01-28 17:22:58 +01:00
|
|
|
// instructions (with possible casts and GEPs in between).
|
|
|
|
|
|
|
|
SmallVector<Value *, 16> Worklist;
|
|
|
|
SmallVector<Instruction *, 16> DeadInsts;
|
|
|
|
append_range(Worklist, Arg.users());
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Value *V = Worklist.pop_back_val();
|
|
|
|
if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V)) {
|
|
|
|
DeadInsts.push_back(cast<Instruction>(V));
|
|
|
|
append_range(Worklist, V->users());
|
|
|
|
continue;
|
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
if (auto *LI = dyn_cast<LoadInst>(V)) {
|
|
|
|
Value *Ptr = LI->getPointerOperand();
|
|
|
|
APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
|
|
|
|
Ptr =
|
|
|
|
Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
|
|
|
|
/* AllowNonInbounds */ true);
|
|
|
|
assert(Ptr == &Arg && "Not constant offset from arg?");
|
|
|
|
LI->replaceAllUsesWith(OffsetToArg[Offset.getSExtValue()]);
|
|
|
|
DeadInsts.push_back(LI);
|
|
|
|
continue;
|
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
llvm_unreachable("Unexpected user");
|
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
for (Instruction *I : DeadInsts) {
|
|
|
|
I->replaceAllUsesWith(UndefValue::get(I->getType()));
|
|
|
|
I->eraseFromParent();
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-09 23:46:27 +00:00
|
|
|
return NF;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
|
|
|
|
2019-07-09 11:35:35 +00:00
|
|
|
/// Return true if we can prove that all callees pass in a valid pointer for the
|
|
|
|
/// specified function argument.
|
2022-01-28 17:22:58 +01:00
|
|
|
static bool allCallersPassValidPointerForArgument(Argument *Arg,
|
|
|
|
Align NeededAlign,
|
|
|
|
uint64_t NeededDerefBytes) {
|
2017-01-29 08:03:16 +00:00
|
|
|
Function *Callee = Arg->getParent();
|
|
|
|
const DataLayout &DL = Callee->getParent()->getDataLayout();
|
2022-01-28 17:22:58 +01:00
|
|
|
APInt Bytes(64, NeededDerefBytes);
|
2017-01-29 08:03:16 +00:00
|
|
|
|
2022-02-08 10:29:51 +01:00
|
|
|
// Check if the argument itself is marked dereferenceable and aligned.
|
|
|
|
if (isDereferenceableAndAlignedPointer(Arg, NeededAlign, Bytes, DL))
|
|
|
|
return true;
|
2017-01-29 08:03:16 +00:00
|
|
|
|
|
|
|
// Look at all call sites of the function. At this point we know we only have
|
|
|
|
// direct callees.
|
2022-02-08 10:29:51 +01:00
|
|
|
return all_of(Callee->users(), [&](User *U) {
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase &CB = cast<CallBase>(*U);
|
2022-02-08 10:29:51 +01:00
|
|
|
return isDereferenceableAndAlignedPointer(
|
|
|
|
CB.getArgOperand(Arg->getArgNo()), NeededAlign, Bytes, DL);
|
|
|
|
});
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
/// Determine that this argument is safe to promote, and find the argument
|
|
|
|
/// parts it can be promoted into.
|
|
|
|
static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR,
|
2022-02-10 10:38:37 +01:00
|
|
|
unsigned MaxElements, bool IsRecursive,
|
2022-01-28 17:22:58 +01:00
|
|
|
SmallVectorImpl<OffsetAndArgPart> &ArgPartsVec) {
|
2008-07-29 10:00:13 +00:00
|
|
|
// Quick exit for unused arguments
|
|
|
|
if (Arg->use_empty())
|
|
|
|
return true;
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// We can only promote this argument if all of the uses are loads at known
|
|
|
|
// offsets.
|
2008-07-29 10:00:13 +00:00
|
|
|
//
|
|
|
|
// Promoting the argument causes it to be loaded in the caller
|
|
|
|
// unconditionally. This is only safe if we can prove that either the load
|
|
|
|
// would have happened in the callee anyway (ie, there is a load in the entry
|
|
|
|
// block) or the pointer passed in at every call site is guaranteed to be
|
|
|
|
// valid.
|
|
|
|
// In the former case, invalid loads can happen, but would have happened
|
|
|
|
// anyway, in the latter case, invalid loads won't happen. This prevents us
|
|
|
|
// from introducing an invalid load that wouldn't have happened in the
|
|
|
|
// original code.
|
2022-01-28 17:22:58 +01:00
|
|
|
|
|
|
|
SmallDenseMap<int64_t, ArgPart, 4> ArgParts;
|
|
|
|
Align NeededAlign(1);
|
|
|
|
uint64_t NeededDerefBytes = 0;
|
|
|
|
|
|
|
|
// Returns None if this load is not based on the argument. Return true if
|
|
|
|
// we can promote the load, false otherwise.
|
|
|
|
auto HandleLoad = [&](LoadInst *LI,
|
|
|
|
bool GuaranteedToExecute) -> Optional<bool> {
|
|
|
|
// Don't promote volatile or atomic loads.
|
|
|
|
if (!LI->isSimple())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Value *Ptr = LI->getPointerOperand();
|
|
|
|
APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
|
|
|
|
Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
|
|
|
|
/* AllowNonInbounds */ true);
|
|
|
|
if (Ptr != Arg)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
if (Offset.getSignificantBits() >= 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Type *Ty = LI->getType();
|
|
|
|
TypeSize Size = DL.getTypeStoreSize(Ty);
|
|
|
|
// Don't try to promote scalable types.
|
|
|
|
if (Size.isScalable())
|
|
|
|
return false;
|
|
|
|
|
2022-02-10 10:38:37 +01:00
|
|
|
// If this is a recursive function and one of the types is a pointer,
|
2022-01-28 17:22:58 +01:00
|
|
|
// then promoting it might lead to recursive promotion.
|
2022-02-10 10:38:37 +01:00
|
|
|
if (IsRecursive && Ty->isPointerTy())
|
2022-01-28 17:22:58 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
int64_t Off = Offset.getSExtValue();
|
|
|
|
auto Pair = ArgParts.try_emplace(
|
|
|
|
Off, ArgPart{Ty, LI->getAlign(), GuaranteedToExecute ? LI : nullptr});
|
|
|
|
ArgPart &Part = Pair.first->second;
|
|
|
|
bool OffsetNotSeenBefore = Pair.second;
|
|
|
|
|
|
|
|
// We limit promotion to only promoting up to a fixed number of elements of
|
|
|
|
// the aggregate.
|
2022-04-28 09:37:35 -07:00
|
|
|
if (MaxElements > 0 && ArgParts.size() > MaxElements) {
|
2022-01-28 17:22:58 +01:00
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion of " << *Arg << " failed: "
|
|
|
|
<< "more than " << MaxElements << " parts\n");
|
|
|
|
return false;
|
2019-07-09 11:35:35 +00:00
|
|
|
}
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// For now, we only support loading one specific type at a given offset.
|
|
|
|
if (Part.Ty != Ty) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion of " << *Arg << " failed: "
|
|
|
|
<< "loaded via both " << *Part.Ty << " and " << *Ty
|
|
|
|
<< " at offset " << Off << "\n");
|
|
|
|
return false;
|
|
|
|
}
|
2019-07-09 11:35:35 +00:00
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
// If this load is not guaranteed to execute, and we haven't seen a load at
|
2022-01-28 17:22:58 +01:00
|
|
|
// this offset before (or it had lower alignment), then we need to remember
|
|
|
|
// that requirement.
|
|
|
|
// Note that skipping loads of previously seen offsets is only correct
|
|
|
|
// because we only allow a single type for a given offset, which also means
|
|
|
|
// that the number of accessed bytes will be the same.
|
|
|
|
if (!GuaranteedToExecute &&
|
|
|
|
(OffsetNotSeenBefore || Part.Alignment < LI->getAlign())) {
|
|
|
|
// We won't be able to prove dereferenceability for negative offsets.
|
|
|
|
if (Off < 0)
|
|
|
|
return false;
|
2019-07-09 11:35:35 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// If the offset is not aligned, an aligned base pointer won't help.
|
|
|
|
if (!isAligned(LI->getAlign(), Off))
|
|
|
|
return false;
|
2019-07-09 11:35:35 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue());
|
|
|
|
NeededAlign = std::max(NeededAlign, LI->getAlign());
|
2008-07-29 10:00:13 +00:00
|
|
|
}
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
Part.Alignment = std::max(Part.Alignment, LI->getAlign());
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Look for loads that are guaranteed to execute on entry.
|
|
|
|
for (Instruction &I : Arg->getParent()->getEntryBlock()) {
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(&I))
|
|
|
|
if (Optional<bool> Res = HandleLoad(LI, /* GuaranteedToExecute */ true))
|
|
|
|
if (!*Res)
|
|
|
|
return false;
|
|
|
|
|
2022-01-28 16:07:40 +01:00
|
|
|
if (!isGuaranteedToTransferExecutionToSuccessor(&I))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// Now look at all loads of the argument. Remember the load instructions
|
|
|
|
// for the aliasing check below.
|
|
|
|
SmallVector<Value *, 16> Worklist;
|
|
|
|
SmallPtrSet<Value *, 16> Visited;
|
2017-01-29 08:03:19 +00:00
|
|
|
SmallVector<LoadInst *, 16> Loads;
|
2022-01-28 17:22:58 +01:00
|
|
|
auto AppendUsers = [&](Value *V) {
|
|
|
|
for (User *U : V->users())
|
|
|
|
if (Visited.insert(U).second)
|
|
|
|
Worklist.push_back(U);
|
|
|
|
};
|
|
|
|
AppendUsers(Arg);
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Value *V = Worklist.pop_back_val();
|
|
|
|
if (isa<BitCastInst>(V)) {
|
|
|
|
AppendUsers(V);
|
|
|
|
continue;
|
|
|
|
}
|
2019-07-09 11:35:35 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
if (auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
|
|
|
|
if (!GEP->hasAllConstantIndices())
|
2019-07-09 11:35:35 +00:00
|
|
|
return false;
|
2022-01-28 17:22:58 +01:00
|
|
|
AppendUsers(V);
|
|
|
|
continue;
|
|
|
|
}
|
2008-07-29 10:00:13 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
if (auto *LI = dyn_cast<LoadInst>(V)) {
|
|
|
|
if (!*HandleLoad(LI, /* GuaranteedToExecute */ false))
|
2019-07-09 11:35:35 +00:00
|
|
|
return false;
|
2022-01-28 17:22:58 +01:00
|
|
|
Loads.push_back(LI);
|
|
|
|
continue;
|
2004-03-08 01:04:36 +00:00
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// Unknown user.
|
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion of " << *Arg << " failed: "
|
|
|
|
<< "unknown user " << *V << "\n");
|
|
|
|
return false;
|
|
|
|
}
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
if (NeededDerefBytes || NeededAlign > 1) {
|
|
|
|
// Try to prove a required deref / aligned requirement.
|
|
|
|
if (!allCallersPassValidPointerForArgument(Arg, NeededAlign,
|
|
|
|
NeededDerefBytes)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion of " << *Arg << " failed: "
|
|
|
|
<< "not dereferenceable or aligned\n");
|
|
|
|
return false;
|
2008-07-29 10:00:13 +00:00
|
|
|
}
|
|
|
|
}
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
if (ArgParts.empty())
|
2017-01-29 08:03:19 +00:00
|
|
|
return true; // No users, this is a dead argument.
|
2004-11-13 23:31:34 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
// Sort parts by offset.
|
|
|
|
append_range(ArgPartsVec, ArgParts);
|
|
|
|
sort(ArgPartsVec,
|
|
|
|
[](const auto &A, const auto &B) { return A.first < B.first; });
|
|
|
|
|
|
|
|
// Make sure the parts are non-overlapping.
|
|
|
|
// TODO: As we're doing pure load promotion here, overlap should be fine from
|
|
|
|
// a correctness perspective. Profitability is less obvious though.
|
|
|
|
int64_t Offset = ArgPartsVec[0].first;
|
|
|
|
for (const auto &Pair : ArgPartsVec) {
|
|
|
|
if (Pair.first < Offset)
|
|
|
|
return false; // Overlap with previous part.
|
|
|
|
|
|
|
|
Offset = Pair.first + DL.getTypeStoreSize(Pair.second.Ty);
|
|
|
|
}
|
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
// Okay, now we know that the argument is only used by load instructions, and
|
2008-07-29 10:00:13 +00:00
|
|
|
// it is safe to unconditionally perform all of them. Use alias analysis to
|
2004-11-13 23:31:34 +00:00
|
|
|
// check to see if the pointer is guaranteed to not be modified from entry of
|
|
|
|
// the function to each of the load instructions.
|
2004-03-07 21:29:54 +00:00
|
|
|
|
|
|
|
// Because there could be several/many load instructions, remember which
|
|
|
|
// blocks we know to be transparent to the load.
|
2017-01-29 08:03:19 +00:00
|
|
|
df_iterator_default_set<BasicBlock *, 16> TranspBlocks;
|
2006-09-15 05:22:51 +00:00
|
|
|
|
2016-07-09 10:36:36 +00:00
|
|
|
for (LoadInst *Load : Loads) {
|
2004-03-07 21:29:54 +00:00
|
|
|
// Check to see if the load is invalidated from the start of the block to
|
|
|
|
// the load itself.
|
|
|
|
BasicBlock *BB = Load->getParent();
|
2004-03-08 01:04:36 +00:00
|
|
|
|
2015-06-17 07:18:54 +00:00
|
|
|
MemoryLocation Loc = MemoryLocation::get(Load);
|
2017-12-07 22:41:34 +00:00
|
|
|
if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod))
|
2017-01-29 08:03:19 +00:00
|
|
|
return false; // Pointer is invalidated!
|
2004-03-07 21:29:54 +00:00
|
|
|
|
|
|
|
// Now check every path from the entry block to the load for transparency.
|
|
|
|
// To do this, we perform a depth first search on the inverse CFG from the
|
|
|
|
// loading block.
|
2015-02-04 19:14:57 +00:00
|
|
|
for (BasicBlock *P : predecessors(BB)) {
|
2014-08-24 23:23:06 +00:00
|
|
|
for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks))
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
if (AAR.canBasicBlockModify(*TranspBB, Loc))
|
2004-03-07 21:29:54 +00:00
|
|
|
return false;
|
2010-07-12 14:15:10 +00:00
|
|
|
}
|
2004-03-07 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the path from the entry of the function to each load is free of
|
|
|
|
// instructions that potentially invalidate the load, we can make the
|
|
|
|
// transformation!
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
bool ArgumentPromotionPass::isDenselyPacked(Type *Ty, const DataLayout &DL) {
|
2017-01-29 08:03:16 +00:00
|
|
|
// There is no size information, so be conservative.
|
2022-04-28 15:31:00 +02:00
|
|
|
if (!Ty->isSized())
|
2017-01-29 08:03:16 +00:00
|
|
|
return false;
|
2004-06-21 00:07:58 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// If the alloc size is not equal to the storage size, then there are padding
|
|
|
|
// bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
|
2022-04-28 15:31:00 +02:00
|
|
|
if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
|
2017-01-29 08:03:16 +00:00
|
|
|
return false;
|
2004-03-08 01:04:36 +00:00
|
|
|
|
2020-04-06 17:03:49 -07:00
|
|
|
// FIXME: This isn't the right way to check for padding in vectors with
|
|
|
|
// non-byte-size elements.
|
2022-04-28 15:31:00 +02:00
|
|
|
if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
|
|
|
|
return isDenselyPacked(SeqTy->getElementType(), DL);
|
2004-05-23 21:21:17 +00:00
|
|
|
|
2020-04-06 17:03:49 -07:00
|
|
|
// For array types, check for padding within members.
|
2022-04-28 15:31:00 +02:00
|
|
|
if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
|
|
|
|
return isDenselyPacked(SeqTy->getElementType(), DL);
|
2007-11-27 13:23:08 +00:00
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
if (!isa<StructType>(Ty))
|
2020-04-06 17:03:49 -07:00
|
|
|
return true;
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Check for padding within and between elements of a struct.
|
2022-04-28 15:31:00 +02:00
|
|
|
StructType *StructTy = cast<StructType>(Ty);
|
2017-01-29 08:03:16 +00:00
|
|
|
const StructLayout *Layout = DL.getStructLayout(StructTy);
|
|
|
|
uint64_t StartPos = 0;
|
2022-04-28 15:31:00 +02:00
|
|
|
for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
|
|
|
|
Type *ElTy = StructTy->getElementType(I);
|
2017-01-29 08:03:16 +00:00
|
|
|
if (!isDenselyPacked(ElTy, DL))
|
|
|
|
return false;
|
2022-04-28 15:31:00 +02:00
|
|
|
if (StartPos != Layout->getElementOffsetInBits(I))
|
2017-01-29 08:03:16 +00:00
|
|
|
return false;
|
|
|
|
StartPos += DL.getTypeAllocSizeInBits(ElTy);
|
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
return true;
|
|
|
|
}
|
2008-07-29 10:00:13 +00:00
|
|
|
|
2018-05-01 15:54:18 +00:00
|
|
|
/// Checks if the padding bytes of an argument could be accessed.
|
2022-04-28 15:31:00 +02:00
|
|
|
static bool canPaddingBeAccessed(Argument *Arg) {
|
|
|
|
assert(Arg->hasByValAttr());
|
2004-03-08 01:04:36 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Track all the pointers to the argument to make sure they are not captured.
|
|
|
|
SmallPtrSet<Value *, 16> PtrValues;
|
2022-04-28 15:31:00 +02:00
|
|
|
PtrValues.insert(Arg);
|
2017-01-29 08:03:16 +00:00
|
|
|
|
|
|
|
// Track all of the stores.
|
|
|
|
SmallVector<StoreInst *, 16> Stores;
|
|
|
|
|
|
|
|
// Scan through the uses recursively to make sure the pointer is always used
|
|
|
|
// sanely.
|
2022-04-28 15:31:00 +02:00
|
|
|
SmallVector<Value *, 16> WorkList(Arg->users());
|
2017-01-29 08:03:16 +00:00
|
|
|
while (!WorkList.empty()) {
|
2021-01-23 10:56:33 -08:00
|
|
|
Value *V = WorkList.pop_back_val();
|
2017-01-29 08:03:16 +00:00
|
|
|
if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
|
|
|
|
if (PtrValues.insert(V).second)
|
2022-05-12 16:39:26 +02:00
|
|
|
append_range(WorkList, V->users());
|
2017-01-29 08:03:16 +00:00
|
|
|
} else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
|
|
|
|
Stores.push_back(Store);
|
|
|
|
} else if (!isa<LoadInst>(V)) {
|
|
|
|
return true;
|
2004-03-07 21:29:54 +00:00
|
|
|
}
|
2008-01-11 22:31:41 +00:00
|
|
|
}
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
// Check to make sure the pointers aren't captured
|
2017-01-29 08:03:16 +00:00
|
|
|
for (StoreInst *Store : Stores)
|
|
|
|
if (PtrValues.count(Store->getValueOperand()))
|
|
|
|
return true;
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
return false;
|
|
|
|
}
|
2005-04-21 23:48:37 +00:00
|
|
|
|
2022-01-28 17:22:58 +01:00
|
|
|
/// Check if callers and callee agree on how promoted arguments would be
|
|
|
|
/// passed.
|
|
|
|
static bool areTypesABICompatible(ArrayRef<Type *> Types, const Function &F,
|
|
|
|
const TargetTransformInfo &TTI) {
|
|
|
|
return all_of(F.uses(), [&](const Use &U) {
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase *CB = dyn_cast<CallBase>(U.getUser());
|
|
|
|
if (!CB)
|
2019-10-30 17:20:20 -05:00
|
|
|
return false;
|
2022-01-28 17:22:58 +01:00
|
|
|
|
2020-04-20 18:14:13 -07:00
|
|
|
const Function *Caller = CB->getCaller();
|
|
|
|
const Function *Callee = CB->getCalledFunction();
|
2022-01-28 17:22:58 +01:00
|
|
|
return TTI.areTypesABICompatible(Caller, Callee, Types);
|
|
|
|
});
|
2019-01-16 05:15:31 +00:00
|
|
|
}
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
/// PromoteArguments - This method checks the specified function to see if there
|
|
|
|
/// are any promotable arguments and if it is safe to promote the function (for
|
|
|
|
/// example, all callers are direct). If safe to promote some arguments, it
|
|
|
|
/// calls the DoPromotion method.
|
2017-02-09 23:46:27 +00:00
|
|
|
static Function *
|
|
|
|
promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
|
|
|
|
unsigned MaxElements,
|
2020-04-20 18:14:13 -07:00
|
|
|
Optional<function_ref<void(CallBase &OldCS, CallBase &NewCS)>>
|
2019-01-16 05:15:31 +00:00
|
|
|
ReplaceCallSite,
|
2022-02-10 10:38:37 +01:00
|
|
|
const TargetTransformInfo &TTI, bool IsRecursive) {
|
2018-02-22 14:42:08 +00:00
|
|
|
// Don't perform argument promotion for naked functions; otherwise we can end
|
|
|
|
// up removing parameters that are seemingly 'not used' as they are referred
|
|
|
|
// to in the assembly.
|
|
|
|
if(F->hasFnAttribute(Attribute::Naked))
|
|
|
|
return nullptr;
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Make sure that it is local to this module.
|
2017-02-09 23:46:27 +00:00
|
|
|
if (!F->hasLocalLinkage())
|
2017-01-29 08:03:19 +00:00
|
|
|
return nullptr;
|
2014-07-23 22:09:29 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Don't promote arguments for variadic functions. Adding, removing, or
|
|
|
|
// changing non-pack parameters can change the classification of pack
|
|
|
|
// parameters. Frontends encode that classification at the call site in the
|
|
|
|
// IR, while in the callee the classification is determined dynamically based
|
|
|
|
// on the number of registers consumed so far.
|
2017-01-29 08:03:19 +00:00
|
|
|
if (F->isVarArg())
|
|
|
|
return nullptr;
|
2008-05-26 19:58:59 +00:00
|
|
|
|
2019-05-02 00:37:36 +00:00
|
|
|
// Don't transform functions that receive inallocas, as the transformation may
|
|
|
|
// not be safe depending on calling convention.
|
|
|
|
if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca))
|
|
|
|
return nullptr;
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// First check: see if there are any pointer arguments! If not, quick exit.
|
2017-01-29 08:03:19 +00:00
|
|
|
SmallVector<Argument *, 16> PointerArgs;
|
2017-01-29 08:03:16 +00:00
|
|
|
for (Argument &I : F->args())
|
|
|
|
if (I.getType()->isPointerTy())
|
|
|
|
PointerArgs.push_back(&I);
|
2017-01-29 08:03:19 +00:00
|
|
|
if (PointerArgs.empty())
|
|
|
|
return nullptr;
|
2004-05-23 21:21:17 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Second check: make sure that all callers are direct callers. We can't
|
|
|
|
// transform functions that have indirect callers. Also see if the function
|
2022-02-10 10:38:37 +01:00
|
|
|
// is self-recursive.
|
2017-01-29 08:03:16 +00:00
|
|
|
for (Use &U : F->uses()) {
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase *CB = dyn_cast<CallBase>(U.getUser());
|
2017-01-29 08:03:16 +00:00
|
|
|
// Must be a direct call.
|
2022-04-12 15:16:11 -07:00
|
|
|
if (CB == nullptr || !CB->isCallee(&U) ||
|
|
|
|
CB->getFunctionType() != F->getFunctionType())
|
2017-01-29 08:03:19 +00:00
|
|
|
return nullptr;
|
|
|
|
|
2018-03-02 00:59:27 +00:00
|
|
|
// Can't change signature of musttail callee
|
2020-04-20 18:14:13 -07:00
|
|
|
if (CB->isMustTailCall())
|
2018-03-02 00:59:27 +00:00
|
|
|
return nullptr;
|
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
if (CB->getFunction() == F)
|
2022-02-10 10:38:37 +01:00
|
|
|
IsRecursive = true;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2017-01-29 08:03:19 +00:00
|
|
|
|
2018-03-02 00:59:27 +00:00
|
|
|
// Can't change signature of musttail caller
|
|
|
|
// FIXME: Support promoting whole chain of musttail functions
|
|
|
|
for (BasicBlock &BB : *F)
|
|
|
|
if (BB.getTerminatingMustTailCall())
|
|
|
|
return nullptr;
|
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2008-09-08 11:07:35 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
AAResults &AAR = AARGetter(*F);
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Check to see which arguments are promotable. If an argument is promotable,
|
|
|
|
// add it to ArgsToPromote.
|
2022-01-28 17:22:58 +01:00
|
|
|
DenseMap<Argument *, SmallVector<OffsetAndArgPart, 4>> ArgsToPromote;
|
2017-01-29 08:03:19 +00:00
|
|
|
SmallPtrSet<Argument *, 8> ByValArgsToTransform;
|
2017-01-29 08:03:16 +00:00
|
|
|
for (Argument *PtrArg : PointerArgs) {
|
|
|
|
// Replace sret attribute with noalias. This reduces register pressure by
|
|
|
|
// avoiding a register copy.
|
|
|
|
if (PtrArg->hasStructRetAttr()) {
|
|
|
|
unsigned ArgNo = PtrArg->getArgNo();
|
2017-05-03 18:17:31 +00:00
|
|
|
F->removeParamAttr(ArgNo, Attribute::StructRet);
|
|
|
|
F->addParamAttr(ArgNo, Attribute::NoAlias);
|
2017-01-29 08:03:16 +00:00
|
|
|
for (Use &U : F->uses()) {
|
2020-04-20 18:14:13 -07:00
|
|
|
CallBase &CB = cast<CallBase>(*U.getUser());
|
|
|
|
CB.removeParamAttr(ArgNo, Attribute::StructRet);
|
|
|
|
CB.addParamAttr(ArgNo, Attribute::NoAlias);
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2022-05-12 16:39:26 +02:00
|
|
|
// If we can promote the pointer to its value.
|
|
|
|
SmallVector<OffsetAndArgPart, 4> ArgParts;
|
|
|
|
if (findArgParts(PtrArg, DL, AAR, MaxElements, IsRecursive, ArgParts)) {
|
|
|
|
SmallVector<Type *, 4> Types;
|
|
|
|
for (const auto &Pair : ArgParts)
|
|
|
|
Types.push_back(Pair.second.Ty);
|
|
|
|
|
|
|
|
if (areTypesABICompatible(Types, *F, TTI)) {
|
|
|
|
ArgsToPromote.insert({PtrArg, std::move(ArgParts)});
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if this is a byval argument, and if the aggregate type is
|
|
|
|
// small, just pass the elements, which is always safe, if the passed value
|
|
|
|
// is densely packed or if we can prove the padding bytes are never
|
|
|
|
// accessed.
|
2020-10-20 13:08:07 -07:00
|
|
|
//
|
|
|
|
// Only handle arguments with specified alignment; if it's unspecified, the
|
|
|
|
// actual alignment of the argument is target-specific.
|
2022-01-28 17:22:58 +01:00
|
|
|
Type *ByValTy = PtrArg->getParamByValType();
|
2022-04-28 15:31:00 +02:00
|
|
|
bool IsSafeToPromote =
|
2022-01-28 17:22:58 +01:00
|
|
|
ByValTy && PtrArg->getParamAlign() &&
|
|
|
|
(ArgumentPromotionPass::isDenselyPacked(ByValTy, DL) ||
|
|
|
|
!canPaddingBeAccessed(PtrArg));
|
2022-05-12 16:39:26 +02:00
|
|
|
if (!IsSafeToPromote) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion disables passing the elements of"
|
|
|
|
<< " the argument '" << PtrArg->getName()
|
|
|
|
<< "' because it is not safe.\n");
|
|
|
|
continue;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2022-05-12 16:39:26 +02:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(ByValTy)) {
|
|
|
|
if (MaxElements > 0 && STy->getNumElements() > MaxElements) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ArgPromotion disables passing the elements of"
|
|
|
|
<< " the argument '" << PtrArg->getName()
|
|
|
|
<< "' because it would require adding more"
|
|
|
|
<< " than " << MaxElements
|
|
|
|
<< " arguments to the function.\n");
|
|
|
|
continue;
|
|
|
|
}
|
2022-01-28 17:22:58 +01:00
|
|
|
SmallVector<Type *, 4> Types;
|
2022-05-12 16:39:26 +02:00
|
|
|
append_range(Types, STy->elements());
|
2022-01-28 17:22:58 +01:00
|
|
|
|
2022-05-12 16:39:26 +02:00
|
|
|
// If all the elements are single-value types, we can promote it.
|
|
|
|
bool AllSimple =
|
|
|
|
all_of(Types, [](Type *Ty) { return Ty->isSingleValueType(); });
|
|
|
|
|
|
|
|
// Safe to transform. Passing the elements as a scalar will allow sroa to
|
|
|
|
// hack on the new alloca we introduce.
|
|
|
|
if (AllSimple && areTypesABICompatible(Types, *F, TTI))
|
|
|
|
ByValArgsToTransform.insert(PtrArg);
|
2022-01-28 17:22:58 +01:00
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// No promotable pointer arguments.
|
2017-01-29 08:03:19 +00:00
|
|
|
if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
|
2017-01-29 08:03:16 +00:00
|
|
|
return nullptr;
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2017-02-09 23:46:27 +00:00
|
|
|
return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite);
|
|
|
|
}
|
|
|
|
|
|
|
|
PreservedAnalyses ArgumentPromotionPass::run(LazyCallGraph::SCC &C,
|
|
|
|
CGSCCAnalysisManager &AM,
|
|
|
|
LazyCallGraph &CG,
|
|
|
|
CGSCCUpdateResult &UR) {
|
|
|
|
bool Changed = false, LocalChange;
|
|
|
|
|
|
|
|
// Iterate until we stop promoting from this SCC.
|
|
|
|
do {
|
|
|
|
LocalChange = false;
|
|
|
|
|
[NewPM] Only invalidate modified functions' analyses in CGSCC passes + turn on eagerly invalidate analyses
Previously, any change in any function in an SCC would cause all
analyses for all functions in the SCC to be invalidated. With this
change, we now manually invalidate analyses for functions we modify,
then let the pass manager know that all function analyses should be
preserved since we've already handled function analysis invalidation.
So far this only touches the inliner, argpromotion, function-attrs, and
updateCGAndAnalysisManager(), since they are the most used.
This is part of an effort to investigate running the function
simplification pipeline less on functions we visit multiple times in the
inliner pipeline.
However, this causes major memory regressions especially on larger IR.
To counteract this, turn on the option to eagerly invalidate function
analyses. This invalidates analyses on functions immediately after
they're processed in a module or scc to function adaptor for specific
parts of the pipeline.
Within an SCC, if a pass only modifies one function, other functions in
the SCC do not have their analyses invalidated, so in later function
passes in the SCC pass manager the analyses may still be cached. It is
only after the function passes that the eager invalidation takes effect.
For the default pipelines this makes sense because the inliner pipeline
runs the function simplification pipeline after all other SCC passes
(except CoroSplit which doesn't request any analyses).
Overall this has mostly positive effects on compile time and positive effects on memory usage.
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=instructions
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=max-rss
D113196 shows that we slightly regressed compile times in exchange for
some memory improvements when turning on eager invalidation. D100917
shows that we slightly improved compile times in exchange for major
memory regressions in some cases when invalidating less in SCC passes.
Turning these on at the same time keeps the memory improvements while
keeping compile times neutral/slightly positive.
Reviewed By: asbirlea, nikic
Differential Revision: https://reviews.llvm.org/D113304
2021-05-03 16:50:26 -07:00
|
|
|
FunctionAnalysisManager &FAM =
|
|
|
|
AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
|
|
|
|
|
2022-02-10 10:38:37 +01:00
|
|
|
bool IsRecursive = C.size() > 1;
|
2017-02-09 23:46:27 +00:00
|
|
|
for (LazyCallGraph::Node &N : C) {
|
|
|
|
Function &OldF = N.getFunction();
|
|
|
|
|
|
|
|
// FIXME: This lambda must only be used with this function. We should
|
|
|
|
// skip the lambda and just get the AA results directly.
|
|
|
|
auto AARGetter = [&](Function &F) -> AAResults & {
|
|
|
|
assert(&F == &OldF && "Called with an unexpected function!");
|
|
|
|
return FAM.getResult<AAManager>(F);
|
|
|
|
};
|
|
|
|
|
2019-01-16 05:15:31 +00:00
|
|
|
const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(OldF);
|
2022-02-10 10:38:37 +01:00
|
|
|
Function *NewF = promoteArguments(&OldF, AARGetter, MaxElements, None,
|
|
|
|
TTI, IsRecursive);
|
2017-02-09 23:46:27 +00:00
|
|
|
if (!NewF)
|
|
|
|
continue;
|
|
|
|
LocalChange = true;
|
|
|
|
|
|
|
|
// Directly substitute the functions in the call graph. Note that this
|
|
|
|
// requires the old function to be completely dead and completely
|
|
|
|
// replaced by the new function. It does no call graph updates, it merely
|
|
|
|
// swaps out the particular function mapped to a particular node in the
|
|
|
|
// graph.
|
|
|
|
C.getOuterRefSCC().replaceNodeFunction(N, *NewF);
|
2021-03-18 09:11:28 -07:00
|
|
|
FAM.clear(OldF, OldF.getName());
|
2017-02-09 23:46:27 +00:00
|
|
|
OldF.eraseFromParent();
|
[NewPM] Only invalidate modified functions' analyses in CGSCC passes + turn on eagerly invalidate analyses
Previously, any change in any function in an SCC would cause all
analyses for all functions in the SCC to be invalidated. With this
change, we now manually invalidate analyses for functions we modify,
then let the pass manager know that all function analyses should be
preserved since we've already handled function analysis invalidation.
So far this only touches the inliner, argpromotion, function-attrs, and
updateCGAndAnalysisManager(), since they are the most used.
This is part of an effort to investigate running the function
simplification pipeline less on functions we visit multiple times in the
inliner pipeline.
However, this causes major memory regressions especially on larger IR.
To counteract this, turn on the option to eagerly invalidate function
analyses. This invalidates analyses on functions immediately after
they're processed in a module or scc to function adaptor for specific
parts of the pipeline.
Within an SCC, if a pass only modifies one function, other functions in
the SCC do not have their analyses invalidated, so in later function
passes in the SCC pass manager the analyses may still be cached. It is
only after the function passes that the eager invalidation takes effect.
For the default pipelines this makes sense because the inliner pipeline
runs the function simplification pipeline after all other SCC passes
(except CoroSplit which doesn't request any analyses).
Overall this has mostly positive effects on compile time and positive effects on memory usage.
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=instructions
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=max-rss
D113196 shows that we slightly regressed compile times in exchange for
some memory improvements when turning on eager invalidation. D100917
shows that we slightly improved compile times in exchange for major
memory regressions in some cases when invalidating less in SCC passes.
Turning these on at the same time keeps the memory improvements while
keeping compile times neutral/slightly positive.
Reviewed By: asbirlea, nikic
Differential Revision: https://reviews.llvm.org/D113304
2021-05-03 16:50:26 -07:00
|
|
|
|
|
|
|
PreservedAnalyses FuncPA;
|
|
|
|
FuncPA.preserveSet<CFGAnalyses>();
|
|
|
|
for (auto *U : NewF->users()) {
|
|
|
|
auto *UserF = cast<CallBase>(U)->getFunction();
|
|
|
|
FAM.invalidate(*UserF, FuncPA);
|
|
|
|
}
|
2017-02-09 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Changed |= LocalChange;
|
|
|
|
} while (LocalChange);
|
|
|
|
|
|
|
|
if (!Changed)
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
2021-11-01 19:49:05 -07:00
|
|
|
PreservedAnalyses PA;
|
|
|
|
// We've cleared out analyses for deleted functions.
|
|
|
|
PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
|
[NewPM] Only invalidate modified functions' analyses in CGSCC passes + turn on eagerly invalidate analyses
Previously, any change in any function in an SCC would cause all
analyses for all functions in the SCC to be invalidated. With this
change, we now manually invalidate analyses for functions we modify,
then let the pass manager know that all function analyses should be
preserved since we've already handled function analysis invalidation.
So far this only touches the inliner, argpromotion, function-attrs, and
updateCGAndAnalysisManager(), since they are the most used.
This is part of an effort to investigate running the function
simplification pipeline less on functions we visit multiple times in the
inliner pipeline.
However, this causes major memory regressions especially on larger IR.
To counteract this, turn on the option to eagerly invalidate function
analyses. This invalidates analyses on functions immediately after
they're processed in a module or scc to function adaptor for specific
parts of the pipeline.
Within an SCC, if a pass only modifies one function, other functions in
the SCC do not have their analyses invalidated, so in later function
passes in the SCC pass manager the analyses may still be cached. It is
only after the function passes that the eager invalidation takes effect.
For the default pipelines this makes sense because the inliner pipeline
runs the function simplification pipeline after all other SCC passes
(except CoroSplit which doesn't request any analyses).
Overall this has mostly positive effects on compile time and positive effects on memory usage.
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=instructions
https://llvm-compile-time-tracker.com/compare.php?from=7f627596977624730f9298a1b69883af1555765e&to=39e824e0d3ca8a517502f13032dfa67304841c90&stat=max-rss
D113196 shows that we slightly regressed compile times in exchange for
some memory improvements when turning on eager invalidation. D100917
shows that we slightly improved compile times in exchange for major
memory regressions in some cases when invalidating less in SCC passes.
Turning these on at the same time keeps the memory improvements while
keeping compile times neutral/slightly positive.
Reviewed By: asbirlea, nikic
Differential Revision: https://reviews.llvm.org/D113304
2021-05-03 16:50:26 -07:00
|
|
|
// We've manually invalidated analyses for functions we've modified.
|
|
|
|
PA.preserveSet<AllAnalysesOn<Function>>();
|
2021-11-01 19:49:05 -07:00
|
|
|
return PA;
|
2017-01-29 08:03:16 +00:00
|
|
|
}
|
2008-09-08 11:07:35 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
namespace {
|
2017-10-19 21:21:30 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
|
|
|
|
struct ArgPromotion : public CallGraphSCCPass {
|
2017-10-19 21:21:30 +00:00
|
|
|
// Pass identification, replacement for typeid
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
explicit ArgPromotion(unsigned MaxElements = 3)
|
|
|
|
: CallGraphSCCPass(ID), MaxElements(MaxElements) {
|
|
|
|
initializeArgPromotionPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
2019-01-16 05:15:31 +00:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2017-01-29 08:03:19 +00:00
|
|
|
getAAResultsAnalysisUsage(AU);
|
|
|
|
CallGraphSCCPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
bool runOnSCC(CallGraphSCC &SCC) override;
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
private:
|
|
|
|
using llvm::Pass::doInitialization;
|
2017-10-19 21:21:30 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
bool doInitialization(CallGraph &CG) override;
|
2017-10-19 21:21:30 +00:00
|
|
|
|
2017-01-29 08:03:19 +00:00
|
|
|
/// The maximum number of elements to expand, or 0 for unlimited.
|
|
|
|
unsigned MaxElements;
|
|
|
|
};
|
2017-10-19 21:21:30 +00:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2008-09-07 09:54:09 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
char ArgPromotion::ID = 0;
|
2017-10-19 21:21:30 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
|
2017-01-29 08:03:19 +00:00
|
|
|
"Promote 'by reference' arguments to scalars", false,
|
|
|
|
false)
|
2017-01-29 08:03:16 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
2019-01-16 05:15:31 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2017-01-29 08:03:16 +00:00
|
|
|
INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
|
2017-01-29 08:03:19 +00:00
|
|
|
"Promote 'by reference' arguments to scalars", false, false)
|
2008-01-11 22:31:41 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) {
|
|
|
|
return new ArgPromotion(MaxElements);
|
|
|
|
}
|
2008-07-29 10:00:13 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
|
|
|
|
if (skipSCC(SCC))
|
|
|
|
return false;
|
2008-01-11 22:31:41 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Get the callgraph information that we need to update to reflect our
|
|
|
|
// changes.
|
|
|
|
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
|
2004-03-08 01:04:36 +00:00
|
|
|
|
2017-02-09 23:11:52 +00:00
|
|
|
LegacyAARGetter AARGetter(*this);
|
2008-01-11 22:31:41 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
bool Changed = false, LocalChange;
|
2008-01-11 22:31:41 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
// Iterate until we stop promoting from this SCC.
|
|
|
|
do {
|
|
|
|
LocalChange = false;
|
|
|
|
// Attempt to promote arguments from all functions in this SCC.
|
2022-02-10 10:38:37 +01:00
|
|
|
bool IsRecursive = SCC.size() > 1;
|
2017-01-29 08:03:16 +00:00
|
|
|
for (CallGraphNode *OldNode : SCC) {
|
2017-02-09 23:46:27 +00:00
|
|
|
Function *OldF = OldNode->getFunction();
|
|
|
|
if (!OldF)
|
|
|
|
continue;
|
|
|
|
|
2020-04-20 18:14:13 -07:00
|
|
|
auto ReplaceCallSite = [&](CallBase &OldCS, CallBase &NewCS) {
|
|
|
|
Function *Caller = OldCS.getParent()->getParent();
|
2017-02-09 23:46:27 +00:00
|
|
|
CallGraphNode *NewCalleeNode =
|
|
|
|
CG.getOrInsertFunction(NewCS.getCalledFunction());
|
|
|
|
CallGraphNode *CallerNode = CG[Caller];
|
2020-04-20 18:14:13 -07:00
|
|
|
CallerNode->replaceCallEdge(cast<CallBase>(OldCS),
|
|
|
|
cast<CallBase>(NewCS), NewCalleeNode);
|
2017-02-09 23:46:27 +00:00
|
|
|
};
|
|
|
|
|
2019-01-16 05:15:31 +00:00
|
|
|
const TargetTransformInfo &TTI =
|
|
|
|
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*OldF);
|
2022-02-10 10:38:37 +01:00
|
|
|
if (Function *NewF =
|
|
|
|
promoteArguments(OldF, AARGetter, MaxElements, {ReplaceCallSite},
|
|
|
|
TTI, IsRecursive)) {
|
2017-01-29 08:03:16 +00:00
|
|
|
LocalChange = true;
|
2017-02-09 23:46:27 +00:00
|
|
|
|
|
|
|
// Update the call graph for the newly promoted function.
|
|
|
|
CallGraphNode *NewNode = CG.getOrInsertFunction(NewF);
|
|
|
|
NewNode->stealCalledFunctionsFrom(OldNode);
|
|
|
|
if (OldNode->getNumReferences() == 0)
|
|
|
|
delete CG.removeFunctionFromModule(OldNode);
|
|
|
|
else
|
|
|
|
OldF->setLinkage(Function::ExternalLinkage);
|
|
|
|
|
2022-04-28 15:31:00 +02:00
|
|
|
// And update the SCC we're iterating as well.
|
2017-01-29 08:03:16 +00:00
|
|
|
SCC.ReplaceNode(OldNode, NewNode);
|
2004-03-07 21:29:54 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-29 08:03:16 +00:00
|
|
|
// Remember that we changed something.
|
|
|
|
Changed |= LocalChange;
|
|
|
|
} while (LocalChange);
|
2004-03-07 21:29:54 +00:00
|
|
|
|
2017-01-29 08:03:16 +00:00
|
|
|
return Changed;
|
2004-03-07 21:29:54 +00:00
|
|
|
}
|
2014-07-01 21:13:37 +00:00
|
|
|
|
|
|
|
bool ArgPromotion::doInitialization(CallGraph &CG) {
|
|
|
|
return CallGraphSCCPass::doInitialization(CG);
|
|
|
|
}
|