mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-21 18:06:50 +00:00
[IR][NFC] Update IRBuilder to use InsertPosition (#96497)
Uses the new InsertPosition class (added in #94226) to simplify some of the IRBuilder interface, and removes the need to pass a BasicBlock alongside a BasicBlock::iterator, using the fact that we can now get the parent basic block from the iterator even if it points to the sentinel. This patch removes the BasicBlock argument from each constructor or call to setInsertPoint. This has no functional effect, but later on as we look to remove the `Instruction *InsertBefore` argument from instruction-creation (discussed [here](https://discourse.llvm.org/t/psa-instruction-constructors-changing-to-iterator-only-insertion/77845)), this will simplify the process by allowing us to deprecate the InsertPosition constructor directly and catch all the cases where we use instructions rather than iterators.
This commit is contained in:
parent
317277e4f9
commit
6481dc5761
clang/lib/CodeGen
CGBlocks.cppCGGPUBuiltin.cppCGHLSLRuntime.cppCGObjC.cppCGObjCMac.cppCGOpenMPRuntime.cppCGStmt.cppCodeGenABITypes.cppCodeGenFunction.cpp
llvm
include/llvm
lib
Analysis
CodeGen
AtomicExpandPass.cppCodeGenPrepare.cppExpandLargeFpConvert.cppExpandMemCmp.cppExpandVectorPredication.cppHardwareLoops.cppIntrinsicLowering.cppPreISelIntrinsicLowering.cppSafeStack.cppShadowStackGCLowering.cppSjLjEHPrepare.cppWasmEHPrepare.cpp
Frontend/OpenMP
IR
Target
AArch64
AMDGPU
AMDGPUAtomicOptimizer.cppAMDGPUCodeGenPrepare.cppAMDGPULibCalls.cppAMDGPULowerKernelArguments.cppAMDGPULowerModuleLDSPass.cppSIAnnotateControlFlow.cpp
ARM
Hexagon
PowerPC
SPIRV
WebAssembly
X86
Transforms
AggressiveInstCombine
Coroutines
IPO
InstCombine
InstCombineAndOrXor.cppInstCombineCompares.cppInstCombinePHI.cppInstCombineSelect.cppInstCombineVectorOps.cpp
Instrumentation
AddressSanitizer.cppBoundsChecking.cppDataFlowSanitizer.cppGCOVProfiling.cppHWAddressSanitizer.cppMemProfiler.cppPGOInstrumentation.cpp
ObjCARC
Scalar
ConstraintElimination.cppGuardWidening.cppLoopBoundSplit.cppLoopIdiomRecognize.cppLoopPredication.cppLowerMatrixIntrinsics.cppPartiallyInlineLibCalls.cppRewriteStatepointsForGC.cppSROA.cppScalarizeMaskedMemIntrin.cppScalarizer.cpp
Utils
AMDGPUEmitPrintf.cppBypassSlowDivision.cppCallPromotionUtils.cppFlattenCFG.cppInlineFunction.cppIntegerDivision.cppSSAUpdaterBulk.cppScalarEvolutionExpander.cppSimplifyCFG.cppSimplifyIndVar.cppSimplifyLibCalls.cpp
Vectorize
unittests
Analysis
Frontend
IR
Transforms/Utils
mlir/lib/Target/LLVMIR/Dialect/OpenMP
polly/lib/CodeGen
@ -1545,7 +1545,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
|
||||
entry_ptr = entry_ptr->getNextNonDebugInstruction()->getIterator();
|
||||
else
|
||||
entry_ptr = entry->end();
|
||||
Builder.SetInsertPoint(entry, entry_ptr);
|
||||
Builder.SetInsertPoint(entry_ptr);
|
||||
|
||||
// Emit debug information for all the DeclRefExprs.
|
||||
// FIXME: also for 'this'
|
||||
|
@ -202,13 +202,13 @@ RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
|
||||
Args.push_back(Arg);
|
||||
}
|
||||
|
||||
llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
|
||||
llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
|
||||
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
|
||||
|
||||
bool isBuffered = (CGM.getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
|
||||
clang::TargetOptions::AMDGPUPrintfKind::Buffered);
|
||||
auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args, isBuffered);
|
||||
Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
|
||||
Builder.SetInsertPoint(IRB.GetInsertPoint());
|
||||
return RValue::get(Printf);
|
||||
}
|
||||
|
||||
|
@ -436,7 +436,7 @@ void CGHLSLRuntime::generateGlobalCtorDtorCalls() {
|
||||
for (auto &F : M.functions()) {
|
||||
if (!F.hasFnAttribute("hlsl.shader"))
|
||||
continue;
|
||||
IRBuilder<> B(&F.getEntryBlock(), F.getEntryBlock().begin());
|
||||
IRBuilder<> B(F.getEntryBlock().begin());
|
||||
for (auto *Fn : CtorFns)
|
||||
B.CreateCall(FunctionCallee(Fn));
|
||||
|
||||
|
@ -2970,13 +2970,12 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
|
||||
value = doFallback(CGF, value);
|
||||
} else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
|
||||
// Place the retain immediately following the call.
|
||||
CGF.Builder.SetInsertPoint(call->getParent(),
|
||||
++llvm::BasicBlock::iterator(call));
|
||||
CGF.Builder.SetInsertPoint(++llvm::BasicBlock::iterator(call));
|
||||
value = doAfterCall(CGF, value);
|
||||
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
|
||||
// Place the retain at the beginning of the normal destination block.
|
||||
llvm::BasicBlock *BB = invoke->getNormalDest();
|
||||
CGF.Builder.SetInsertPoint(BB, BB->begin());
|
||||
CGF.Builder.SetInsertPoint(BB->begin());
|
||||
value = doAfterCall(CGF, value);
|
||||
|
||||
// Bitcasts can arise because of related-result returns. Rewrite
|
||||
@ -2984,7 +2983,7 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
|
||||
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
|
||||
// Change the insert point to avoid emitting the fall-back call after the
|
||||
// bitcast.
|
||||
CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator());
|
||||
CGF.Builder.SetInsertPoint(bitcast->getIterator());
|
||||
llvm::Value *operand = bitcast->getOperand(0);
|
||||
operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
|
||||
bitcast->setOperand(0, operand);
|
||||
|
@ -4417,7 +4417,7 @@ void FragileHazards::emitHazardsInNewBlocks() {
|
||||
// call. If the call throws, then this is sufficient to
|
||||
// guarantee correctness as long as it doesn't also write to any
|
||||
// locals.
|
||||
Builder.SetInsertPoint(&BB, BI);
|
||||
Builder.SetInsertPoint(BI);
|
||||
emitReadHazard(Builder);
|
||||
}
|
||||
}
|
||||
|
@ -1447,7 +1447,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
|
||||
if (!Elem.second.ServiceInsertPt)
|
||||
setLocThreadIdInsertPt(CGF);
|
||||
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
|
||||
CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
|
||||
CGF.Builder.SetInsertPoint(&*Elem.second.ServiceInsertPt);
|
||||
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
|
||||
llvm::CallInst *Call = CGF.Builder.CreateCall(
|
||||
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
|
||||
|
@ -3076,7 +3076,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
|
||||
if (IsGCCAsmGoto && !CBRRegResults.empty()) {
|
||||
for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
|
||||
llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
|
||||
Builder.SetInsertPoint(Succ, --(Succ->end()));
|
||||
Builder.SetInsertPoint(--(Succ->end()));
|
||||
EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
|
||||
ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
|
||||
ResultTypeRequiresCast, ResultRegIsFlagReg);
|
||||
|
@ -123,7 +123,7 @@ llvm::Value *CodeGen::getCXXDestructorImplicitParam(
|
||||
CGF.CurCodeDecl = D;
|
||||
CGF.CurFuncDecl = D;
|
||||
CGF.CurFn = InsertBlock->getParent();
|
||||
CGF.Builder.SetInsertPoint(InsertBlock, InsertPoint);
|
||||
CGF.Builder.SetInsertPoint(InsertPoint);
|
||||
return CGM.getCXXABI().getCXXDestructorImplicitParam(
|
||||
CGF, D, Type, ForVirtualBase, Delegating);
|
||||
}
|
||||
|
@ -2764,7 +2764,7 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
|
||||
if (!CGM.getCodeGenOpts().SanitizeStats)
|
||||
return;
|
||||
|
||||
llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
|
||||
llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
|
||||
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
|
||||
CGM.getSanStats().create(IRB, SSK);
|
||||
}
|
||||
@ -2883,7 +2883,7 @@ void CodeGenFunction::EmitAArch64MultiVersionResolver(
|
||||
}
|
||||
|
||||
if (!AArch64CpuInitialized) {
|
||||
Builder.SetInsertPoint(CurBlock, CurBlock->begin());
|
||||
Builder.SetInsertPoint(CurBlock->begin());
|
||||
EmitAArch64CpuInit();
|
||||
AArch64CpuInitialized = true;
|
||||
Builder.SetInsertPoint(CurBlock);
|
||||
|
@ -173,37 +173,13 @@ public:
|
||||
BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
|
||||
LLVMContext &getContext() const { return Context; }
|
||||
|
||||
/// This specifies that created instructions should be appended to the
|
||||
/// end of the specified block.
|
||||
void SetInsertPoint(BasicBlock *TheBB) {
|
||||
BB = TheBB;
|
||||
InsertPt = BB->end();
|
||||
}
|
||||
|
||||
/// This specifies that created instructions should be inserted before
|
||||
/// the specified instruction.
|
||||
void SetInsertPoint(Instruction *I) {
|
||||
BB = I->getParent();
|
||||
InsertPt = I->getIterator();
|
||||
assert(InsertPt != BB->end() && "Can't read debug loc from end()");
|
||||
SetCurrentDebugLocation(I->getStableDebugLoc());
|
||||
}
|
||||
|
||||
/// This specifies that created instructions should be inserted at the
|
||||
/// specified point.
|
||||
void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
|
||||
BB = TheBB;
|
||||
/// specified insert position.
|
||||
void SetInsertPoint(InsertPosition IP) {
|
||||
BB = IP.getBasicBlock();
|
||||
InsertPt = IP;
|
||||
if (IP != TheBB->end())
|
||||
SetCurrentDebugLocation(IP->getStableDebugLoc());
|
||||
}
|
||||
|
||||
/// This specifies that created instructions should be inserted at
|
||||
/// the specified point, but also requires that \p IP is dereferencable.
|
||||
void SetInsertPoint(BasicBlock::iterator IP) {
|
||||
BB = IP->getParent();
|
||||
InsertPt = IP;
|
||||
SetCurrentDebugLocation(IP->getStableDebugLoc());
|
||||
if (InsertPt != BB->end())
|
||||
SetCurrentDebugLocation(InsertPt->getStableDebugLoc());
|
||||
}
|
||||
|
||||
/// This specifies that created instructions should inserted at the beginning
|
||||
@ -286,7 +262,7 @@ public:
|
||||
/// Sets the current insert point to a previously-saved location.
|
||||
void restoreIP(InsertPoint IP) {
|
||||
if (IP.isSet())
|
||||
SetInsertPoint(IP.getBlock(), IP.getPoint());
|
||||
SetInsertPoint(IP.getPoint());
|
||||
else
|
||||
ClearInsertionPoint();
|
||||
}
|
||||
@ -2677,44 +2653,20 @@ public:
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
|
||||
|
||||
explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
|
||||
MDNode *FPMathTag = nullptr,
|
||||
explicit IRBuilder(InsertPosition IP, MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
|
||||
FPMathTag, OpBundles),
|
||||
Folder(Folder) {
|
||||
SetInsertPoint(TheBB);
|
||||
}
|
||||
|
||||
explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
|
||||
FPMathTag, OpBundles) {
|
||||
SetInsertPoint(TheBB);
|
||||
}
|
||||
|
||||
explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag,
|
||||
OpBundles) {
|
||||
: IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
|
||||
this->Inserter, FPMathTag, OpBundles) {
|
||||
SetInsertPoint(IP);
|
||||
}
|
||||
|
||||
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
|
||||
MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
|
||||
FPMathTag, OpBundles),
|
||||
explicit IRBuilder(InsertPosition IP, FolderTy Folder,
|
||||
MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
|
||||
this->Inserter, FPMathTag, OpBundles),
|
||||
Folder(Folder) {
|
||||
SetInsertPoint(TheBB, IP);
|
||||
}
|
||||
|
||||
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
|
||||
MDNode *FPMathTag = nullptr,
|
||||
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
|
||||
: IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
|
||||
FPMathTag, OpBundles) {
|
||||
SetInsertPoint(TheBB, IP);
|
||||
SetInsertPoint(IP);
|
||||
}
|
||||
|
||||
/// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
|
||||
|
@ -44,6 +44,13 @@ template <> struct ilist_alloc_traits<Instruction> {
|
||||
iterator_range<simple_ilist<DbgRecord>::iterator>
|
||||
getDbgRecordRange(DbgMarker *);
|
||||
|
||||
/// Class used to generate an insert position (ultimately always a
|
||||
/// BasicBlock::iterator, which it will implicitly convert to) from either:
|
||||
/// - An Instruction, inserting immediately prior. This will soon be marked as
|
||||
/// deprecated.
|
||||
/// - A BasicBlock, inserting at the end.
|
||||
/// - An iterator, inserting at its position.
|
||||
/// - Any nullptr value, giving a blank iterator (not valid for insertion).
|
||||
class InsertPosition {
|
||||
using InstListType = SymbolTableList<Instruction, ilist_iterator_bits<true>,
|
||||
ilist_parent<BasicBlock>>;
|
||||
@ -51,8 +58,6 @@ class InsertPosition {
|
||||
|
||||
public:
|
||||
InsertPosition(std::nullptr_t) : InsertAt() {}
|
||||
// LLVM_DEPRECATED("Use BasicBlock::iterators for insertion instead",
|
||||
// "BasicBlock::iterator")
|
||||
InsertPosition(Instruction *InsertBefore);
|
||||
InsertPosition(BasicBlock *InsertAtEnd);
|
||||
InsertPosition(InstListType::iterator InsertAt) : InsertAt(InsertAt) {}
|
||||
|
@ -376,9 +376,7 @@ public:
|
||||
Builder.SetInsertPoint(IP);
|
||||
}
|
||||
|
||||
void setInsertPoint(BasicBlock::iterator IP) {
|
||||
Builder.SetInsertPoint(IP->getParent(), IP);
|
||||
}
|
||||
void setInsertPoint(BasicBlock::iterator IP) { Builder.SetInsertPoint(IP); }
|
||||
|
||||
/// Clear the current insertion point. This is useful if the instruction
|
||||
/// that had been serving as the insertion point may have been deleted.
|
||||
|
@ -1217,7 +1217,7 @@ SizeOffsetValue ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
|
||||
// Compute offset/size for each PHI incoming pointer.
|
||||
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
|
||||
BasicBlock *IncomingBlock = PHI.getIncomingBlock(i);
|
||||
Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(IncomingBlock->getFirstInsertionPt());
|
||||
SizeOffsetValue EdgeData = compute_(PHI.getIncomingValue(i));
|
||||
|
||||
if (!EdgeData.bothKnown()) {
|
||||
|
@ -1242,7 +1242,7 @@ Value *AtomicExpandImpl::insertRMWLLSCLoop(
|
||||
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
|
||||
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
|
||||
|
||||
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
||||
Builder.SetInsertPoint(ExitBB->begin());
|
||||
return Loaded;
|
||||
}
|
||||
|
||||
@ -1478,7 +1478,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
// succeeded or not. We expose this to later passes by converting any
|
||||
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
|
||||
// PHI.
|
||||
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
||||
Builder.SetInsertPoint(ExitBB->begin());
|
||||
PHINode *LoadedExit =
|
||||
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
|
||||
LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
|
||||
@ -1491,7 +1491,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
// a type wider than the one in the cmpxchg instruction.
|
||||
Value *LoadedFull = LoadedExit;
|
||||
|
||||
Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator()));
|
||||
Builder.SetInsertPoint(std::next(Success->getIterator()));
|
||||
Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV);
|
||||
|
||||
// Look for any users of the cmpxchg that are just comparing the loaded value
|
||||
@ -1616,7 +1616,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
|
||||
|
||||
Builder.CreateCondBr(Success, ExitBB, LoopBB);
|
||||
|
||||
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
||||
Builder.SetInsertPoint(ExitBB->begin());
|
||||
return NewLoaded;
|
||||
}
|
||||
|
||||
|
@ -2355,7 +2355,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
|
||||
|
||||
// Create a PHI in the end block to select either the output of the intrinsic
|
||||
// or the bit width of the operand.
|
||||
Builder.SetInsertPoint(EndBlock, EndBlock->begin());
|
||||
Builder.SetInsertPoint(EndBlock->begin());
|
||||
PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
|
||||
replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
|
||||
Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
|
||||
@ -6306,7 +6306,7 @@ bool CodeGenPrepare::splitLargeGEPOffsets() {
|
||||
NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
|
||||
NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
|
||||
}
|
||||
IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
|
||||
IRBuilder<> NewBaseBuilder(NewBaseInsertPt);
|
||||
// Create a new base.
|
||||
Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
|
||||
NewBaseGEP = OldBase;
|
||||
|
@ -212,7 +212,7 @@ static void expandFPToI(Instruction *FPToI) {
|
||||
Builder.CreateBr(End);
|
||||
|
||||
// cleanup:
|
||||
Builder.SetInsertPoint(End, End->begin());
|
||||
Builder.SetInsertPoint(End->begin());
|
||||
PHINode *Retval0 = Builder.CreatePHI(FPToI->getType(), 4);
|
||||
|
||||
Retval0->addIncoming(Cond8, IfThen5);
|
||||
@ -560,7 +560,7 @@ static void expandIToFP(Instruction *IToFP) {
|
||||
Builder.CreateBr(End);
|
||||
|
||||
// return:
|
||||
Builder.SetInsertPoint(End, End->begin());
|
||||
Builder.SetInsertPoint(End->begin());
|
||||
PHINode *Retval0 = Builder.CreatePHI(IToFP->getType(), 2);
|
||||
Retval0->addIncoming(A4, IfEnd26);
|
||||
Retval0->addIncoming(ConstantFP::getZero(IToFP->getType(), false), Entry);
|
||||
|
@ -574,7 +574,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
|
||||
// need to be calculated and can simply return 1.
|
||||
if (IsUsedForZeroCmp) {
|
||||
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
|
||||
Builder.SetInsertPoint(ResBlock.BB, InsertPt);
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
|
||||
PhiRes->addIncoming(Res, ResBlock.BB);
|
||||
BranchInst *NewBr = BranchInst::Create(EndBlock);
|
||||
@ -584,7 +584,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
|
||||
return;
|
||||
}
|
||||
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
|
||||
Builder.SetInsertPoint(ResBlock.BB, InsertPt);
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
|
||||
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
|
||||
ResBlock.PhiSrc2);
|
||||
@ -611,7 +611,7 @@ void MemCmpExpansion::setupResultBlockPHINodes() {
|
||||
}
|
||||
|
||||
void MemCmpExpansion::setupEndBlockPHINodes() {
|
||||
Builder.SetInsertPoint(EndBlock, EndBlock->begin());
|
||||
Builder.SetInsertPoint(EndBlock->begin());
|
||||
PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
|
||||
}
|
||||
|
||||
|
@ -667,7 +667,7 @@ void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
|
||||
auto *M = VPI.getModule();
|
||||
Function *VScaleFunc =
|
||||
Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
|
||||
IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
|
||||
IRBuilder<> Builder(VPI.getIterator());
|
||||
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
|
||||
Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
|
||||
MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
|
||||
|
@ -580,7 +580,7 @@ PHINode* HardwareLoop::InsertPHICounter(Value *NumElts, Value *EltsRem) {
|
||||
BasicBlock *Preheader = L->getLoopPreheader();
|
||||
BasicBlock *Header = L->getHeader();
|
||||
BasicBlock *Latch = ExitBranch->getParent();
|
||||
IRBuilder<> Builder(Header, Header->getFirstNonPHIIt());
|
||||
IRBuilder<> Builder(Header->getFirstNonPHIIt());
|
||||
PHINode *Index = Builder.CreatePHI(NumElts->getType(), 2);
|
||||
Index->addIncoming(NumElts, Preheader);
|
||||
Index->addIncoming(EltsRem, Latch);
|
||||
|
@ -40,7 +40,7 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
|
||||
FunctionCallee FCache =
|
||||
M->getOrInsertFunction(NewFn, FunctionType::get(RetTy, ParamTys, false));
|
||||
|
||||
IRBuilder<> Builder(CI->getParent(), CI->getIterator());
|
||||
IRBuilder<> Builder(CI->getIterator());
|
||||
SmallVector<Value *, 8> Args(ArgBegin, ArgEnd);
|
||||
CallInst *NewCI = Builder.CreateCall(FCache, Args);
|
||||
NewCI->setName(CI->getName());
|
||||
|
@ -144,7 +144,7 @@ static bool lowerObjCCall(Function &F, const char *NewFn,
|
||||
auto *CI = cast<CallInst>(CB);
|
||||
assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
|
||||
|
||||
IRBuilder<> Builder(CI->getParent(), CI->getIterator());
|
||||
IRBuilder<> Builder(CI->getIterator());
|
||||
SmallVector<Value *, 8> Args(CI->args());
|
||||
SmallVector<llvm::OperandBundleDef, 1> BundleList;
|
||||
CI->getOperandBundlesAsDefs(BundleList);
|
||||
|
@ -784,7 +784,7 @@ bool SafeStack::run() {
|
||||
if (!StackRestorePoints.empty())
|
||||
++NumUnsafeStackRestorePointsFunctions;
|
||||
|
||||
IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
|
||||
IRBuilder<> IRB(F.begin()->getFirstInsertionPt());
|
||||
// Calls must always have a debug location, or else inlining breaks. So
|
||||
// we explicitly set a artificial debug location here.
|
||||
if (DISubprogram *SP = F.getSubprogram())
|
||||
|
@ -351,7 +351,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
|
||||
|
||||
// Build the shadow stack entry at the very start of the function.
|
||||
BasicBlock::iterator IP = F.getEntryBlock().begin();
|
||||
IRBuilder<> AtEntry(IP->getParent(), IP);
|
||||
IRBuilder<> AtEntry(IP);
|
||||
|
||||
Instruction *StackEntry =
|
||||
AtEntry.CreateAlloca(ConcreteStackEntryTy, nullptr, "gc_frame");
|
||||
@ -384,7 +384,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
|
||||
// shadow stack.
|
||||
while (isa<StoreInst>(IP))
|
||||
++IP;
|
||||
AtEntry.SetInsertPoint(IP->getParent(), IP);
|
||||
AtEntry.SetInsertPoint(IP);
|
||||
|
||||
// Push the entry onto the shadow stack.
|
||||
Instruction *EntryNextPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
|
||||
|
@ -182,7 +182,7 @@ void SjLjEHPrepareImpl::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
|
||||
Type *LPadType = LPI->getType();
|
||||
Value *LPadVal = PoisonValue::get(LPadType);
|
||||
auto *SelI = cast<Instruction>(SelVal);
|
||||
IRBuilder<> Builder(SelI->getParent(), std::next(SelI->getIterator()));
|
||||
IRBuilder<> Builder(std::next(SelI->getIterator()));
|
||||
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
|
||||
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
|
||||
|
||||
@ -206,8 +206,7 @@ SjLjEHPrepareImpl::setupFunctionContext(Function &F,
|
||||
|
||||
// Fill in the function context structure.
|
||||
for (LandingPadInst *LPI : LPads) {
|
||||
IRBuilder<> Builder(LPI->getParent(),
|
||||
LPI->getParent()->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
|
||||
|
||||
// Reference the __data field.
|
||||
Value *FCData =
|
||||
|
@ -303,7 +303,7 @@ void WasmEHPrepareImpl::prepareEHPad(BasicBlock *BB, bool NeedPersonality,
|
||||
unsigned Index) {
|
||||
assert(BB->isEHPad() && "BB is not an EHPad!");
|
||||
IRBuilder<> IRB(BB->getContext());
|
||||
IRB.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
IRB.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
|
||||
auto *FPI = cast<FuncletPadInst>(BB->getFirstNonPHI());
|
||||
Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr;
|
||||
|
@ -1162,7 +1162,7 @@ void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
|
||||
FI.FiniCB(Builder.saveIP());
|
||||
|
||||
// The continuation block is where code generation continues.
|
||||
Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
|
||||
Builder.SetInsertPoint(NonCancellationBlock->begin());
|
||||
}
|
||||
|
||||
// Callback used to create OpenMP runtime calls to support
|
||||
@ -1196,7 +1196,7 @@ static void targetParallelCallback(
|
||||
|
||||
// Add alloca for kernel args
|
||||
OpenMPIRBuilder ::InsertPointTy CurrentIP = Builder.saveIP();
|
||||
Builder.SetInsertPoint(OuterAllocaBB, OuterAllocaBB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(OuterAllocaBB->getFirstInsertionPt());
|
||||
AllocaInst *ArgsAlloca =
|
||||
Builder.CreateAlloca(ArrayType::get(PtrTy, NumCapturedVars));
|
||||
Value *Args = ArgsAlloca;
|
||||
@ -1571,8 +1571,7 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
|
||||
|
||||
// Store to stack at end of the block that currently branches to the entry
|
||||
// block of the to-be-outlined region.
|
||||
Builder.SetInsertPoint(InsertBB,
|
||||
InsertBB->getTerminator()->getIterator());
|
||||
Builder.SetInsertPoint(InsertBB->getTerminator()->getIterator());
|
||||
Builder.CreateStore(&V, Ptr);
|
||||
|
||||
// Load back next to allocations in the to-be-outlined region.
|
||||
@ -1940,7 +1939,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
|
||||
|
||||
StaleCI->eraseFromParent();
|
||||
|
||||
Builder.SetInsertPoint(TaskAllocaBB, TaskAllocaBB->begin());
|
||||
Builder.SetInsertPoint(TaskAllocaBB->begin());
|
||||
if (HasShareds) {
|
||||
LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
|
||||
OutlinedFn.getArg(1)->replaceUsesWithIf(
|
||||
@ -1954,7 +1953,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
|
||||
};
|
||||
|
||||
addOutlineInfo(std::move(OI));
|
||||
Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin());
|
||||
Builder.SetInsertPoint(TaskExitBB->begin());
|
||||
|
||||
return Builder.saveIP();
|
||||
}
|
||||
@ -2162,7 +2161,7 @@ OpenMPIRBuilder::createReductions(const LocationDescription &Loc,
|
||||
Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator());
|
||||
Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
|
||||
|
||||
Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
|
||||
Builder.SetInsertPoint(InsertBlock->end());
|
||||
|
||||
for (auto En : enumerate(ReductionInfos)) {
|
||||
unsigned Index = En.index();
|
||||
@ -2601,15 +2600,13 @@ OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
|
||||
// the latch block.
|
||||
|
||||
CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
|
||||
Builder.SetInsertPoint(CLI->getBody(),
|
||||
CLI->getBody()->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(CLI->getBody()->getFirstInsertionPt());
|
||||
Builder.SetCurrentDebugLocation(DL);
|
||||
return Builder.CreateAdd(OldIV, LowerBound);
|
||||
});
|
||||
|
||||
// In the "exit" block, call the "fini" function.
|
||||
Builder.SetInsertPoint(CLI->getExit(),
|
||||
CLI->getExit()->getTerminator()->getIterator());
|
||||
Builder.SetInsertPoint(CLI->getExit()->getTerminator()->getIterator());
|
||||
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
|
||||
|
||||
// Add the barrier if requested.
|
||||
@ -2750,7 +2747,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
|
||||
});
|
||||
|
||||
// In the "exit" block, call the "fini" function.
|
||||
Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(DispatchExit->getFirstInsertionPt());
|
||||
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
|
||||
|
||||
// Add the barrier if requested.
|
||||
@ -3169,7 +3166,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
|
||||
PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
|
||||
PreHeader->getParent());
|
||||
// This needs to be 32-bit always, so can't use the IVTy Zero above.
|
||||
Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(OuterCond->getFirstInsertionPt());
|
||||
Value *Res =
|
||||
Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
|
||||
PLowerBound, PUpperBound, PStride});
|
||||
@ -3194,7 +3191,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
|
||||
// Modify the inner condition:
|
||||
// * Use the UpperBound returned from the DynamicNext call.
|
||||
// * jump to the loop outer loop when done with one of the inner loops.
|
||||
Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(Cond->getFirstInsertionPt());
|
||||
UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
|
||||
Instruction *Comp = &*Builder.GetInsertPoint();
|
||||
auto *CI = cast<CmpInst>(Comp);
|
||||
@ -6342,7 +6339,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
|
||||
BasicBlock &OuterAllocaBB = CurrentFunction->getEntryBlock();
|
||||
if (&OuterAllocaBB == Builder.GetInsertBlock()) {
|
||||
BasicBlock *BodyBB = splitBB(Builder, /*CreateBranch=*/true, "teams.entry");
|
||||
Builder.SetInsertPoint(BodyBB, BodyBB->begin());
|
||||
Builder.SetInsertPoint(BodyBB->begin());
|
||||
}
|
||||
|
||||
// The current basic block is split into four basic blocks. After outlining,
|
||||
@ -6466,7 +6463,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
|
||||
|
||||
addOutlineInfo(std::move(OI));
|
||||
|
||||
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
|
||||
Builder.SetInsertPoint(ExitBB->begin());
|
||||
|
||||
return Builder.saveIP();
|
||||
}
|
||||
|
@ -2472,7 +2472,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
|
||||
|
||||
LLVMContext &C = CI->getContext();
|
||||
IRBuilder<> Builder(C);
|
||||
Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
|
||||
Builder.SetInsertPoint(CI->getIterator());
|
||||
|
||||
if (!NewFn) {
|
||||
bool FallthroughToDefaultUpgrade = false;
|
||||
@ -5006,7 +5006,7 @@ void llvm::UpgradeARCRuntime(Module &M) {
|
||||
if (!CI || CI->getCalledFunction() != Fn)
|
||||
continue;
|
||||
|
||||
IRBuilder<> Builder(CI->getParent(), CI->getIterator());
|
||||
IRBuilder<> Builder(CI->getIterator());
|
||||
FunctionType *NewFuncTy = NewFn->getFunctionType();
|
||||
SmallVector<Value *, 2> Args;
|
||||
|
||||
|
@ -3135,8 +3135,10 @@ LLVMBuilderRef LLVMCreateBuilder(void) {
|
||||
static void LLVMPositionBuilderImpl(IRBuilder<> *Builder, BasicBlock *Block,
|
||||
Instruction *Instr, bool BeforeDbgRecords) {
|
||||
BasicBlock::iterator I = Instr ? Instr->getIterator() : Block->end();
|
||||
assert(I.getNodeParent() == Block &&
|
||||
"Non-null Instr must be contained in Block!");
|
||||
I.setHeadBit(BeforeDbgRecords);
|
||||
Builder->SetInsertPoint(Block, I);
|
||||
Builder->SetInsertPoint(I);
|
||||
}
|
||||
|
||||
void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
|
||||
|
@ -158,7 +158,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
|
||||
|
||||
LLVMContext &Ctx = BB.getContext();
|
||||
IRBuilder<> Builder(Ctx);
|
||||
Builder.SetInsertPoint(&BB, ++MostEncompassingPTrue->getIterator());
|
||||
Builder.SetInsertPoint(++MostEncompassingPTrue->getIterator());
|
||||
|
||||
auto *MostEncompassingPTrueVTy =
|
||||
cast<VectorType>(MostEncompassingPTrue->getType());
|
||||
@ -175,7 +175,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
|
||||
if (MostEncompassingPTrueVTy != PTrueVTy) {
|
||||
ConvertFromCreated = true;
|
||||
|
||||
Builder.SetInsertPoint(&BB, ++ConvertToSVBool->getIterator());
|
||||
Builder.SetInsertPoint(++ConvertToSVBool->getIterator());
|
||||
auto *ConvertFromSVBool =
|
||||
Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
|
||||
{PTrueVTy}, {ConvertToSVBool});
|
||||
|
@ -994,7 +994,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
|
||||
|
||||
if (IsPixelShader) {
|
||||
// Need a final PHI to reconverge to above the helper lane branch mask.
|
||||
B.SetInsertPoint(PixelExitBB, PixelExitBB->getFirstNonPHIIt());
|
||||
B.SetInsertPoint(PixelExitBB->getFirstNonPHIIt());
|
||||
|
||||
PHINode *const PHI = B.CreatePHI(Ty, 2);
|
||||
PHI->addIncoming(PoisonValue::get(Ty), PixelEntryBB);
|
||||
|
@ -1118,7 +1118,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
|
||||
if (ReqdAccuracy < 1.0f)
|
||||
return false;
|
||||
|
||||
IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
|
||||
IRBuilder<> Builder(std::next(FDiv.getIterator()));
|
||||
Builder.setFastMathFlags(DivFMF);
|
||||
Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
|
||||
|
||||
|
@ -1328,7 +1328,7 @@ AMDGPULibCalls::insertSinCos(Value *Arg, FastMathFlags FMF, IRBuilder<> &B,
|
||||
// sincos call there. Otherwise, right after the allocas works well enough
|
||||
// if it's an argument or constant.
|
||||
|
||||
B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
|
||||
B.SetInsertPoint(++ArgInst->getIterator());
|
||||
|
||||
// SetInsertPoint unwelcomely always tries to set the debug loc.
|
||||
B.SetCurrentDebugLocation(DL);
|
||||
|
@ -106,7 +106,7 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
|
||||
LLVMContext &Ctx = F.getParent()->getContext();
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
BasicBlock &EntryBlock = *F.begin();
|
||||
IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock));
|
||||
IRBuilder<> Builder(getInsertPt(EntryBlock));
|
||||
|
||||
const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
|
||||
const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
|
||||
|
@ -283,7 +283,7 @@ class AMDGPULowerModuleLDS {
|
||||
// codegen would suffice for that, but one would still need to ensure that
|
||||
// the variables are allocated in the anticpated order.
|
||||
BasicBlock *Entry = &Func->getEntryBlock();
|
||||
IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
|
||||
IRBuilder<> Builder(Entry->getFirstNonPHIIt());
|
||||
|
||||
Function *Decl =
|
||||
Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
|
||||
|
@ -336,7 +336,7 @@ bool SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
|
||||
// Split edge to make Def dominate Use
|
||||
FirstInsertionPt = SplitEdge(DefBB, BB, DT, LI)->getFirstInsertionPt();
|
||||
}
|
||||
IRBuilder<> IRB(FirstInsertionPt->getParent(), FirstInsertionPt);
|
||||
IRBuilder<> IRB(FirstInsertionPt);
|
||||
// TODO: StructurizeCFG 'Flow' blocks have debug locations from the
|
||||
// condition, for now just avoid copying these DebugLocs so that stepping
|
||||
// out of the then/else block in a debugger doesn't step to the condition.
|
||||
|
@ -637,8 +637,7 @@ void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
|
||||
Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
|
||||
Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
|
||||
|
||||
IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
|
||||
BasicBlock::iterator(InsertAfter));
|
||||
IRBuilder<NoFolder> Builder(InsertAfter->getIterator());
|
||||
Instruction *Call = Builder.CreateCall(SMLAD, Args);
|
||||
NumSMLAD++;
|
||||
return Call;
|
||||
@ -758,8 +757,7 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
|
||||
|
||||
// Insert the load at the point of the original dominating load.
|
||||
LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
|
||||
IRBuilder<NoFolder> IRB(DomLoad->getParent(),
|
||||
++BasicBlock::iterator(DomLoad));
|
||||
IRBuilder<NoFolder> IRB(++BasicBlock::iterator(DomLoad));
|
||||
|
||||
// Create the wide load, while making sure to maintain the original alignment
|
||||
// as this prevents ldrd from being generated when it could be illegal due to
|
||||
|
@ -382,7 +382,7 @@ static bool tryInterleave(Instruction *Start,
|
||||
for (Instruction *I : Truncs) {
|
||||
LLVM_DEBUG(dbgs() << "Replacing trunc " << *I << "\n");
|
||||
|
||||
Builder.SetInsertPoint(I->getParent(), ++I->getIterator());
|
||||
Builder.SetInsertPoint(++I->getIterator());
|
||||
Value *Shuf = Builder.CreateShuffleVector(I, TruncMask);
|
||||
I->replaceAllUsesWith(Shuf);
|
||||
cast<Instruction>(Shuf)->setOperand(0, I);
|
||||
|
@ -381,7 +381,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
|
||||
cast<FixedVectorType>(ActiveLaneMask->getType())->getNumElements();
|
||||
|
||||
// Insert a phi to count the number of elements processed by the loop.
|
||||
Builder.SetInsertPoint(L->getHeader(), L->getHeader()->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(L->getHeader()->getFirstNonPHIIt());
|
||||
PHINode *Processed = Builder.CreatePHI(Ty, 2);
|
||||
Processed->addIncoming(Start, L->getLoopPreheader());
|
||||
|
||||
|
@ -1115,7 +1115,7 @@ bool PolynomialMultiplyRecognize::promoteTypes(BasicBlock *LoopB,
|
||||
assert(Ty0 == DestTy);
|
||||
// In order to create the trunc, P must have the promoted type.
|
||||
P->mutateType(Ty0);
|
||||
Value *T = IRBuilder<>(ExitB, End).CreateTrunc(P, PTy);
|
||||
Value *T = IRBuilder<>(End).CreateTrunc(P, PTy);
|
||||
// In order for the RAUW to work, the types of P and T must match.
|
||||
P->mutateType(PTy);
|
||||
P->replaceAllUsesWith(T);
|
||||
@ -1462,7 +1462,7 @@ bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
|
||||
// them right after the loop exit.
|
||||
// Take advantage of the loop-closed SSA form, which has all the post-
|
||||
// loop values in phi nodes.
|
||||
IRB.SetInsertPoint(ExitB, ExitB->getFirstInsertionPt());
|
||||
IRB.SetInsertPoint(ExitB->getFirstInsertionPt());
|
||||
for (auto P = ExitB->begin(), Q = ExitB->end(); P != Q; ++P) {
|
||||
if (!isa<PHINode>(P))
|
||||
break;
|
||||
|
@ -1460,8 +1460,7 @@ auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
|
||||
InsertAt = &*std::next(InsertAt->getIterator());
|
||||
}
|
||||
|
||||
IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(),
|
||||
InstSimplifyFolder(HVC.DL));
|
||||
IRBuilder Builder(InsertAt->getIterator(), InstSimplifyFolder(HVC.DL));
|
||||
Value *AlignAddr = nullptr; // Actual aligned address.
|
||||
Value *AlignVal = nullptr; // Right-shift amount (for valign).
|
||||
|
||||
@ -1741,8 +1740,7 @@ auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const
|
||||
// TODO: Add multiplication of vectors by scalar registers (up to 4 bytes).
|
||||
|
||||
Value *X = Op.X.Val, *Y = Op.Y.Val;
|
||||
IRBuilder Builder(In.getParent(), In.getIterator(),
|
||||
InstSimplifyFolder(HVC.DL));
|
||||
IRBuilder Builder(In.getIterator(), InstSimplifyFolder(HVC.DL));
|
||||
|
||||
auto roundUpWidth = [](unsigned Width) -> unsigned {
|
||||
if (Width <= 32 && !isPowerOf2_32(Width)) {
|
||||
|
@ -552,7 +552,7 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
|
||||
}
|
||||
BasicBlock *BB = BEInst->getParent();
|
||||
IRBuilder<> IRB(BB);
|
||||
IRB.SetInsertPoint(BB, BB->getFirstNonPHIIt());
|
||||
IRB.SetInsertPoint(BB->getFirstNonPHIIt());
|
||||
Value *BEVal = BEInst;
|
||||
PHINode *NewPhi;
|
||||
for (int i = Iterations-1; i >=0 ; --i) {
|
||||
|
@ -111,7 +111,7 @@ class PPCBoolRetToInt : public FunctionPass {
|
||||
if (auto *I = dyn_cast<Instruction>(V))
|
||||
IRB.SetInsertPoint(I->getNextNode());
|
||||
else
|
||||
IRB.SetInsertPoint(&Func->getEntryBlock(), Func->getEntryBlock().begin());
|
||||
IRB.SetInsertPoint(Func->getEntryBlock().begin());
|
||||
return IRB.CreateZExt(V, IntTy);
|
||||
}
|
||||
|
||||
|
@ -1402,7 +1402,7 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
|
||||
AggrStores.insert(&I);
|
||||
}
|
||||
|
||||
B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
|
||||
B.SetInsertPoint(Func.getEntryBlock().begin());
|
||||
for (auto &GV : Func.getParent()->globals())
|
||||
processGlobalValue(GV, B);
|
||||
|
||||
|
@ -1305,7 +1305,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
|
||||
// Add a phi to the tail, which will be the output of setjmp, which
|
||||
// indicates if this is the first call or a longjmp back. The phi directly
|
||||
// uses the right value based on where we arrive from
|
||||
IRB.SetInsertPoint(Tail, Tail->getFirstNonPHIIt());
|
||||
IRB.SetInsertPoint(Tail->getFirstNonPHIIt());
|
||||
PHINode *SetjmpRet = IRB.CreatePHI(IRB.getInt32Ty(), 2, "setjmp.ret");
|
||||
|
||||
// setjmp initial call returns 0
|
||||
|
@ -492,7 +492,7 @@ X86LowerAMXIntrinsics::lowerTileDP(Instruction *TileDP) {
|
||||
KDWord, C, A, B);
|
||||
// we cannot assume there always be bitcast after tiledpbssd. So we need to
|
||||
// insert one bitcast as required
|
||||
Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(End->getFirstNonPHIIt());
|
||||
Value *ResAMX =
|
||||
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
|
||||
// Delete TileDP intrinsic and do some clean-up.
|
||||
@ -536,7 +536,7 @@ bool X86LowerAMXIntrinsics::lowerTileLoadStore(Instruction *TileLoadStore) {
|
||||
if (IsTileLoad) {
|
||||
// we cannot assume there always be bitcast after tileload. So we need to
|
||||
// insert one bitcast as required
|
||||
Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(End->getFirstNonPHIIt());
|
||||
Value *ResAMX =
|
||||
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
|
||||
// Delete tileloadd6 intrinsic and do some clean-up
|
||||
|
@ -476,9 +476,8 @@ static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) {
|
||||
Value *Row = II->getOperand(0);
|
||||
Value *Col = II->getOperand(1);
|
||||
|
||||
BasicBlock *BB = TileDef->getParent();
|
||||
BasicBlock::iterator Iter = TileDef->getIterator();
|
||||
IRBuilder<> Builder(BB, ++Iter);
|
||||
IRBuilder<> Builder(++Iter);
|
||||
Value *Stride = Builder.getInt64(64);
|
||||
std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef};
|
||||
|
||||
|
@ -274,7 +274,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
|
||||
// Struct type of RegNode. Used for GEPing.
|
||||
Type *RegNodeTy;
|
||||
|
||||
IRBuilder<> Builder(&F->getEntryBlock(), F->getEntryBlock().begin());
|
||||
IRBuilder<> Builder(F->getEntryBlock().begin());
|
||||
Type *Int8PtrType = Builder.getPtrTy();
|
||||
Type *Int32Ty = Builder.getInt32Ty();
|
||||
Type *VoidTy = Builder.getVoidTy();
|
||||
|
@ -139,7 +139,7 @@ static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
|
||||
if (Pred != CmpInst::ICMP_EQ)
|
||||
return false;
|
||||
|
||||
IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(PhiBB->getFirstInsertionPt());
|
||||
|
||||
if (ShVal0 == ShVal1)
|
||||
++NumGuardedRotates;
|
||||
|
@ -1859,7 +1859,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
|
||||
}
|
||||
|
||||
auto Index = FrameData.getFieldIndex(Def);
|
||||
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
auto *G = Builder.CreateConstInBoundsGEP2_32(
|
||||
FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
|
||||
if (ByValTy) {
|
||||
@ -1879,8 +1879,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
|
||||
// reference provided with the frame GEP.
|
||||
if (CurrentBlock != U->getParent()) {
|
||||
CurrentBlock = U->getParent();
|
||||
Builder.SetInsertPoint(CurrentBlock,
|
||||
CurrentBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(CurrentBlock->getFirstInsertionPt());
|
||||
|
||||
auto *GEP = GetFramePointer(E.first);
|
||||
GEP->setName(E.first->getName() + Twine(".reload.addr"));
|
||||
@ -1971,7 +1970,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
|
||||
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
|
||||
Shape.ABI == coro::ABI::Async) {
|
||||
// If we found any allocas, replace all of their remaining uses with Geps.
|
||||
Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
|
||||
Builder.SetInsertPoint(SpillBlock->begin());
|
||||
for (const auto &P : FrameData.Allocas) {
|
||||
AllocaInst *Alloca = P.Alloca;
|
||||
auto *G = GetFramePointer(Alloca);
|
||||
@ -1990,8 +1989,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
|
||||
// dbg.declares and dbg.values with the reload from the frame.
|
||||
// Note: We cannot replace the alloca with GEP instructions indiscriminately,
|
||||
// as some of the uses may not be dominated by CoroBegin.
|
||||
Builder.SetInsertPoint(Shape.AllocaSpillBlock,
|
||||
Shape.AllocaSpillBlock->begin());
|
||||
Builder.SetInsertPoint(Shape.AllocaSpillBlock->begin());
|
||||
SmallVector<Instruction *, 4> UsersToUpdate;
|
||||
for (const auto &A : FrameData.Allocas) {
|
||||
AllocaInst *Alloca = A.Alloca;
|
||||
@ -2876,7 +2874,7 @@ salvageDebugInfoImpl(SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
|
||||
auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
|
||||
while (isa<IntrinsicInst>(InsertPt))
|
||||
++InsertPt;
|
||||
Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
|
||||
while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
|
||||
if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
|
||||
|
@ -7469,7 +7469,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
|
||||
unsigned ArgNo, BasicBlock::iterator IP) {
|
||||
assert(PrivType && "Expected privatizable type!");
|
||||
|
||||
IRBuilder<NoFolder> IRB(IP->getParent(), IP);
|
||||
IRBuilder<NoFolder> IRB(IP);
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
|
||||
// Traverse the type, build GEPs and stores.
|
||||
|
@ -1722,8 +1722,7 @@ private:
|
||||
auto &IRBuilder = OMPInfoCache.OMPBuilder;
|
||||
Function *F = RuntimeCall.getCaller();
|
||||
BasicBlock &Entry = F->getEntryBlock();
|
||||
IRBuilder.Builder.SetInsertPoint(&Entry,
|
||||
Entry.getFirstNonPHIOrDbgOrAlloca());
|
||||
IRBuilder.Builder.SetInsertPoint(Entry.getFirstNonPHIOrDbgOrAlloca());
|
||||
Value *Handle = IRBuilder.Builder.CreateAlloca(
|
||||
IRBuilder.AsyncInfo, /*ArraySize=*/nullptr, "handle");
|
||||
Handle =
|
||||
|
@ -4153,7 +4153,7 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
|
||||
// users are freely-invertible, so that 'not' *will* get folded away.
|
||||
BuilderTy::InsertPointGuard Guard(Builder);
|
||||
// Set insertion point to right after the Y.
|
||||
Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
|
||||
Builder.SetInsertPoint(++(Y->getIterator()));
|
||||
Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
|
||||
// Replace all uses of Y (excluding the one in NotY!) with NotY.
|
||||
Worklist.pushUsersToWorkList(*Y);
|
||||
|
@ -514,7 +514,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
|
||||
bool Before = true) {
|
||||
if (auto *PHI = dyn_cast<PHINode>(V)) {
|
||||
BasicBlock *Parent = PHI->getParent();
|
||||
Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(Parent->getFirstInsertionPt());
|
||||
return;
|
||||
}
|
||||
if (auto *I = dyn_cast<Instruction>(V)) {
|
||||
@ -526,7 +526,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
|
||||
if (auto *A = dyn_cast<Argument>(V)) {
|
||||
// Set the insertion point in the entry block.
|
||||
BasicBlock &Entry = A->getParent()->getEntryBlock();
|
||||
Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(Entry.getFirstInsertionPt());
|
||||
return;
|
||||
}
|
||||
// Otherwise, this is a constant and we don't need to set a new
|
||||
|
@ -1371,7 +1371,7 @@ static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
|
||||
// sinking.
|
||||
auto InsertPt = BB->getFirstInsertionPt();
|
||||
if (InsertPt != BB->end()) {
|
||||
Self.Builder.SetInsertPoint(&*BB, InsertPt);
|
||||
Self.Builder.SetInsertPoint(InsertPt);
|
||||
return Self.Builder.CreateNot(Cond);
|
||||
}
|
||||
|
||||
@ -1417,7 +1417,7 @@ static Value *foldDependentIVs(PHINode &PN, IRBuilderBase &Builder) {
|
||||
if (Iv2Start != Identity)
|
||||
return nullptr;
|
||||
|
||||
Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
if (!BO) {
|
||||
auto *GEP = cast<GEPOperator>(IvNext);
|
||||
return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
|
||||
|
@ -2678,7 +2678,7 @@ static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Builder.SetInsertPoint(BB, BB->begin());
|
||||
Builder.SetInsertPoint(BB->begin());
|
||||
auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
|
||||
for (auto *Pred : predecessors(BB))
|
||||
PN->addIncoming(Inputs[Pred], Pred);
|
||||
|
@ -1123,7 +1123,7 @@ Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
|
||||
// Note that the same block can be a predecessor more than once,
|
||||
// and we need to preserve that invariant for the PHI node.
|
||||
BuilderTy::InsertPointGuard Guard(Builder);
|
||||
Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(UseBB->getFirstNonPHIIt());
|
||||
auto *PHI =
|
||||
Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
|
||||
for (BasicBlock *Pred : Preds)
|
||||
|
@ -1956,8 +1956,7 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment(
|
||||
void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
|
||||
GlobalValue *ModuleName) {
|
||||
// Set up the arguments to our poison/unpoison functions.
|
||||
IRBuilder<> IRB(&GlobalInit.front(),
|
||||
GlobalInit.front().getFirstInsertionPt());
|
||||
IRBuilder<> IRB(GlobalInit.front().getFirstInsertionPt());
|
||||
|
||||
// Add a call to poison all external globals before the given function starts.
|
||||
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
|
||||
@ -2869,7 +2868,7 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
|
||||
if (F.getName().contains(" load]")) {
|
||||
FunctionCallee AsanInitFunction =
|
||||
declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
|
||||
IRBuilder<> IRB(&F.front(), F.front().begin());
|
||||
IRBuilder<> IRB(F.front().begin());
|
||||
IRB.CreateCall(AsanInitFunction, {});
|
||||
return true;
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
|
||||
SmallVector<std::pair<Instruction *, Value *>, 4> TrapInfo;
|
||||
for (Instruction &I : instructions(F)) {
|
||||
Value *Or = nullptr;
|
||||
BuilderTy IRB(I.getParent(), BasicBlock::iterator(&I), TargetFolder(DL));
|
||||
BuilderTy IRB(I.getIterator(), TargetFolder(DL));
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
|
||||
if (!LI->isVolatile())
|
||||
Or = getBoundsCheckCond(LI->getPointerOperand(), LI, DL, TLI,
|
||||
@ -215,7 +215,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
|
||||
// Add the checks.
|
||||
for (const auto &Entry : TrapInfo) {
|
||||
Instruction *Inst = Entry.first;
|
||||
BuilderTy IRB(Inst->getParent(), BasicBlock::iterator(Inst), TargetFolder(DL));
|
||||
BuilderTy IRB(Inst->getIterator(), TargetFolder(DL));
|
||||
insertBoundsCheck(Entry.second, IRB, GetTrapBB);
|
||||
}
|
||||
|
||||
|
@ -988,7 +988,7 @@ Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
|
||||
if (DFS.isZeroShadow(PrimitiveShadow))
|
||||
return DFS.getZeroShadow(ShadowTy);
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
SmallVector<unsigned, 4> Indices;
|
||||
Value *Shadow = UndefValue::get(ShadowTy);
|
||||
Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
|
||||
@ -1039,7 +1039,7 @@ Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
|
||||
if (CS && DT.dominates(CS, Pos))
|
||||
return CS;
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
|
||||
// Caches the converted primitive shadow value.
|
||||
CS = PrimitiveShadow;
|
||||
@ -1772,7 +1772,7 @@ bool DataFlowSanitizer::runImpl(
|
||||
Pos = DFSF.F->getEntryBlock().begin();
|
||||
while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
|
||||
Pos = std::next(Pos->getIterator());
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
|
||||
Value *Ne =
|
||||
IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
|
||||
@ -1919,7 +1919,7 @@ std::pair<Value *, Value *>
|
||||
DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
|
||||
BasicBlock::iterator Pos) {
|
||||
// Returns ((Addr & shadow_mask) + origin_base - shadow_base) & ~4UL
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowOffset = getShadowOffset(Addr, IRB);
|
||||
Value *ShadowLong = ShadowOffset;
|
||||
uint64_t ShadowBase = MapParams->ShadowBase;
|
||||
@ -1952,13 +1952,13 @@ DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
|
||||
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
|
||||
BasicBlock::iterator Pos,
|
||||
Value *ShadowOffset) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
return IRB.CreateIntToPtr(ShadowOffset, PrimitiveShadowPtrTy);
|
||||
}
|
||||
|
||||
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
|
||||
BasicBlock::iterator Pos) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowOffset = getShadowOffset(Addr, IRB);
|
||||
return getShadowAddress(Addr, Pos, ShadowOffset);
|
||||
}
|
||||
@ -2010,7 +2010,7 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2,
|
||||
Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
|
||||
Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
CCS.Block = Pos->getParent();
|
||||
CCS.Shadow = IRB.CreateOr(PV1, PV2);
|
||||
|
||||
@ -2074,7 +2074,7 @@ Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
|
||||
}
|
||||
Value *OpShadow = Shadows[I];
|
||||
Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
|
||||
Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
|
||||
}
|
||||
@ -2143,7 +2143,7 @@ bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
|
||||
Value *DataFlowSanitizer::loadNextOrigin(BasicBlock::iterator Pos,
|
||||
Align OriginAlign,
|
||||
Value **OriginAddr) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
*OriginAddr =
|
||||
IRB.CreateGEP(OriginTy, *OriginAddr, ConstantInt::get(IntptrTy, 1));
|
||||
return IRB.CreateAlignedLoad(OriginTy, *OriginAddr, OriginAlign);
|
||||
@ -2175,7 +2175,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
|
||||
Type *WideShadowTy =
|
||||
ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx);
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *CombinedWideShadow =
|
||||
IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
|
||||
|
||||
@ -2244,7 +2244,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
|
||||
const auto SI = AllocaShadowMap.find(AI);
|
||||
if (SI != AllocaShadowMap.end()) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
|
||||
const auto OI = AllocaOriginMap.find(AI);
|
||||
assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
|
||||
@ -2279,7 +2279,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
|
||||
// tracking.
|
||||
if (ShouldTrackOrigins &&
|
||||
useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
CallInst *Call =
|
||||
IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
|
||||
{Addr, ConstantInt::get(DFS.IntptrTy, Size)});
|
||||
@ -2298,7 +2298,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
|
||||
const Align OriginAlign = getOriginAlign(InstAlignment);
|
||||
Value *Origin = nullptr;
|
||||
if (ShouldTrackOrigins) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
|
||||
}
|
||||
|
||||
@ -2311,7 +2311,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
|
||||
return {LI, Origin};
|
||||
}
|
||||
case 2: {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
|
||||
ConstantInt::get(DFS.IntptrTy, 1));
|
||||
Value *Load =
|
||||
@ -2327,7 +2327,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
|
||||
return loadShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
|
||||
OriginAlign, Origin, Pos);
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
CallInst *FallbackCall = IRB.CreateCall(
|
||||
DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
|
||||
FallbackCall->addRetAttr(Attribute::ZExt);
|
||||
@ -2342,7 +2342,7 @@ DFSanFunction::loadShadowOrigin(Value *Addr, uint64_t Size, Align InstAlignment,
|
||||
loadShadowOriginSansLoadTracking(Addr, Size, InstAlignment, Pos);
|
||||
if (DFS.shouldTrackOrigins()) {
|
||||
if (ClTrackOrigins == 2) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
auto *ConstantShadow = dyn_cast<Constant>(PrimitiveShadow);
|
||||
if (!ConstantShadow || !ConstantShadow->isZeroValue())
|
||||
Origin = updateOriginIfTainted(PrimitiveShadow, Origin, IRB);
|
||||
@ -2445,14 +2445,14 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
|
||||
}
|
||||
|
||||
if (ClEventCallbacks) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *Addr = LI.getPointerOperand();
|
||||
CallInst *CI =
|
||||
IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr});
|
||||
CI->addParamAttr(0, Attribute::ZExt);
|
||||
}
|
||||
|
||||
IRBuilder<> IRB(AfterLi->getParent(), AfterLi);
|
||||
IRBuilder<> IRB(AfterLi);
|
||||
DFSF.addReachesFunctionCallbacksIfEnabled(IRB, LI, &LI);
|
||||
}
|
||||
|
||||
@ -2531,7 +2531,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
|
||||
// untainted sinks.
|
||||
const Align OriginAlignment = getOriginAlign(InstAlignment);
|
||||
Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
|
||||
if (!ConstantShadow->isZeroValue())
|
||||
paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
|
||||
@ -2558,7 +2558,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
|
||||
void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
|
||||
Align ShadowAlign,
|
||||
BasicBlock::iterator Pos) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
IntegerType *ShadowTy =
|
||||
IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
|
||||
Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
|
||||
@ -2578,7 +2578,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
|
||||
const auto SI = AllocaShadowMap.find(AI);
|
||||
if (SI != AllocaShadowMap.end()) {
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
IRB.CreateStore(PrimitiveShadow, SI->second);
|
||||
|
||||
// Do not write origins for 0 shadows because we do not trace origins for
|
||||
@ -2598,7 +2598,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
|
||||
return;
|
||||
}
|
||||
|
||||
IRBuilder<> IRB(Pos->getParent(), Pos);
|
||||
IRBuilder<> IRB(Pos);
|
||||
Value *ShadowAddr, *OriginAddr;
|
||||
std::tie(ShadowAddr, OriginAddr) =
|
||||
DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
|
||||
|
@ -918,7 +918,7 @@ bool GCOVProfiler::emitProfileNotes(
|
||||
|
||||
for (size_t I : llvm::seq<size_t>(0, Measured)) {
|
||||
const Edge &E = *MST.allEdges()[I];
|
||||
IRBuilder<> Builder(E.Place, E.Place->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(E.Place->getFirstInsertionPt());
|
||||
Value *V = Builder.CreateConstInBoundsGEP2_64(
|
||||
Counters->getValueType(), Counters, 0, I);
|
||||
// Disable sanitizers to decrease size bloat. We don't expect
|
||||
|
@ -1622,7 +1622,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
|
||||
A.removeAttr(llvm::Attribute::WriteOnly);
|
||||
|
||||
BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
|
||||
IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
|
||||
IRBuilder<> EntryIRB(InsertPt);
|
||||
emitPrologue(EntryIRB,
|
||||
/*WithFrameRecord*/ ClRecordStackHistory != none &&
|
||||
Mapping.WithFrameRecord &&
|
||||
|
@ -558,7 +558,7 @@ bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
|
||||
if (F.getName().contains(" load]")) {
|
||||
FunctionCallee MemProfInitFunction =
|
||||
declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {});
|
||||
IRBuilder<> IRB(&F.front(), F.front().begin());
|
||||
IRBuilder<> IRB(F.front().begin());
|
||||
IRB.CreateCall(MemProfInitFunction, {});
|
||||
return true;
|
||||
}
|
||||
|
@ -885,7 +885,7 @@ static void instrumentOneFunc(
|
||||
FuncInfo.FunctionHash);
|
||||
if (PGOFunctionEntryCoverage) {
|
||||
auto &EntryBB = F.getEntryBlock();
|
||||
IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
|
||||
IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
|
||||
// llvm.instrprof.cover(i8* <name>, i64 <hash>, i32 <num-counters>,
|
||||
// i32 <index>)
|
||||
Builder.CreateCall(
|
||||
@ -940,7 +940,7 @@ static void instrumentOneFunc(
|
||||
if (PGOTemporalInstrumentation) {
|
||||
NumCounters += PGOBlockCoverage ? 8 : 1;
|
||||
auto &EntryBB = F.getEntryBlock();
|
||||
IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
|
||||
IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
|
||||
// llvm.instrprof.timestamp(i8* <name>, i64 <hash>, i32 <num-counters>,
|
||||
// i32 <index>)
|
||||
Builder.CreateCall(
|
||||
@ -950,7 +950,7 @@ static void instrumentOneFunc(
|
||||
}
|
||||
|
||||
for (auto *InstrBB : InstrumentBBs) {
|
||||
IRBuilder<> Builder(InstrBB, InstrBB->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(InstrBB->getFirstInsertionPt());
|
||||
assert(Builder.GetInsertPoint() != InstrBB->end() &&
|
||||
"Cannot get the Instrumentation point");
|
||||
// llvm.instrprof.increment(i8* <name>, i64 <hash>, i32 <num-counters>,
|
||||
|
@ -80,7 +80,7 @@ CallInst *BundledRetainClaimRVs::insertRVCall(BasicBlock::iterator InsertPt,
|
||||
CallInst *BundledRetainClaimRVs::insertRVCallWithColors(
|
||||
BasicBlock::iterator InsertPt, CallBase *AnnotatedCall,
|
||||
const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
|
||||
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
|
||||
IRBuilder<> Builder(InsertPt);
|
||||
Function *Func = *objcarc::getAttachedARCFunction(AnnotatedCall);
|
||||
assert(Func && "operand isn't a Function");
|
||||
Type *ParamTy = Func->getArg(0)->getType();
|
||||
|
@ -1575,7 +1575,7 @@ void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B,
|
||||
static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B,
|
||||
SmallVectorImpl<Instruction *> &ToRemove) {
|
||||
bool Changed = false;
|
||||
IRBuilder<> Builder(II->getParent(), II->getIterator());
|
||||
IRBuilder<> Builder(II->getIterator());
|
||||
Value *Sub = nullptr;
|
||||
for (User *U : make_early_inc_range(II->users())) {
|
||||
if (match(U, m_ExtractValue<0>(m_Value()))) {
|
||||
|
@ -792,7 +792,7 @@ Value *GuardWideningImpl::hoistChecks(SmallVectorImpl<Value *> &ChecksToHoist,
|
||||
Value *OldCondition,
|
||||
BasicBlock::iterator InsertPt) {
|
||||
assert(!ChecksToHoist.empty());
|
||||
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
|
||||
IRBuilder<> Builder(InsertPt);
|
||||
makeAvailableAt(ChecksToHoist, InsertPt);
|
||||
makeAvailableAt(OldCondition, InsertPt);
|
||||
Value *Result = Builder.CreateAnd(ChecksToHoist);
|
||||
|
@ -430,7 +430,7 @@ static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI,
|
||||
ExitingCond.BI->setSuccessor(1, PostLoopPreHeader);
|
||||
|
||||
// Update phi node in exit block of post-loop.
|
||||
Builder.SetInsertPoint(PostLoopPreHeader, PostLoopPreHeader->begin());
|
||||
Builder.SetInsertPoint(PostLoopPreHeader->begin());
|
||||
for (PHINode &PN : PostLoop->getExitBlock()->phis()) {
|
||||
for (auto i : seq<int>(0, PN.getNumOperands())) {
|
||||
// Check incoming block is pre-loop's exiting block.
|
||||
|
@ -2491,7 +2491,7 @@ bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
|
||||
// Step 4: Rewrite the loop into a countable form, with canonical IV.
|
||||
|
||||
// The new canonical induction variable.
|
||||
Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
|
||||
Builder.SetInsertPoint(LoopHeaderBB->begin());
|
||||
auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
|
||||
|
||||
// The induction itself.
|
||||
@ -2815,11 +2815,11 @@ bool LoopIdiomRecognize::recognizeShiftUntilZero() {
|
||||
// Step 3: Rewrite the loop into a countable form, with canonical IV.
|
||||
|
||||
// The new canonical induction variable.
|
||||
Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
|
||||
Builder.SetInsertPoint(LoopHeaderBB->begin());
|
||||
auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
|
||||
|
||||
// The induction itself.
|
||||
Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(LoopHeaderBB->getFirstNonPHIIt());
|
||||
auto *CIVNext =
|
||||
Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next",
|
||||
/*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
|
||||
|
@ -782,7 +782,7 @@ bool LoopPredication::widenWidenableBranchGuardConditions(
|
||||
BI->setCondition(AllChecks);
|
||||
if (InsertAssumesOfPredicatedGuardsConditions) {
|
||||
BasicBlock *IfTrueBB = BI->getSuccessor(0);
|
||||
Builder.SetInsertPoint(IfTrueBB, IfTrueBB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(IfTrueBB->getFirstInsertionPt());
|
||||
// If this block has other predecessors, we might not be able to use Cond.
|
||||
// In this case, create a Phi where every other input is `true` and input
|
||||
// from guard block is Cond.
|
||||
|
@ -1656,7 +1656,7 @@ public:
|
||||
// condition holds, they alias, otherwise they are guaranteed to not
|
||||
// overlap.
|
||||
Check1->getTerminator()->eraseFromParent();
|
||||
Builder.SetInsertPoint(Check1, Check1->begin());
|
||||
Builder.SetInsertPoint(Check1->begin());
|
||||
Value *LoadEnd = Builder.CreateAdd(
|
||||
LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
|
||||
"load.end", true, true);
|
||||
@ -1664,7 +1664,7 @@ public:
|
||||
Fusion);
|
||||
|
||||
// Copy load operand to new alloca.
|
||||
Builder.SetInsertPoint(Copy, Copy->begin());
|
||||
Builder.SetInsertPoint(Copy->begin());
|
||||
auto *VT = cast<FixedVectorType>(Load->getType());
|
||||
// Use an array type for the alloca, to avoid potentially huge alignment
|
||||
// requirements for large vector types.
|
||||
@ -1674,7 +1674,7 @@ public:
|
||||
|
||||
Builder.CreateMemCpy(Alloca, Alloca->getAlign(), Load->getPointerOperand(),
|
||||
Load->getAlign(), LoadLoc.Size.getValue());
|
||||
Builder.SetInsertPoint(Fusion, Fusion->begin());
|
||||
Builder.SetInsertPoint(Fusion->begin());
|
||||
PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
|
||||
PHI->addIncoming(Load->getPointerOperand(), Check0);
|
||||
PHI->addIncoming(Load->getPointerOperand(), Check1);
|
||||
|
@ -70,7 +70,7 @@ static bool optimizeSQRT(CallInst *Call, Function *CalledFunc,
|
||||
// Create phi that will merge results of either sqrt and replace all uses.
|
||||
BasicBlock *JoinBB = LibCallTerm->getSuccessor(0);
|
||||
JoinBB->setName(CurrBB.getName() + ".split");
|
||||
Builder.SetInsertPoint(JoinBB, JoinBB->begin());
|
||||
Builder.SetInsertPoint(JoinBB->begin());
|
||||
PHINode *Phi = Builder.CreatePHI(Ty, 2);
|
||||
Call->replaceAllUsesWith(Phi);
|
||||
|
||||
|
@ -1870,7 +1870,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
|
||||
UnwindBlock->getUniquePredecessor() &&
|
||||
"can't safely insert in this block!");
|
||||
|
||||
Builder.SetInsertPoint(UnwindBlock, UnwindBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(UnwindBlock->getFirstInsertionPt());
|
||||
Builder.SetCurrentDebugLocation(II->getDebugLoc());
|
||||
|
||||
// Attach exceptional gc relocates to the landingpad.
|
||||
@ -1885,7 +1885,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
|
||||
NormalDest->getUniquePredecessor() &&
|
||||
"can't safely insert in this block!");
|
||||
|
||||
Builder.SetInsertPoint(NormalDest, NormalDest->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(NormalDest->getFirstInsertionPt());
|
||||
|
||||
// gc relocates will be generated later as if it were regular call
|
||||
// statepoint
|
||||
|
@ -2942,7 +2942,7 @@ private:
|
||||
// after the load, so that variable values referring to the load are
|
||||
// dominated by it.
|
||||
LIIt.setHeadBit(true);
|
||||
IRB.SetInsertPoint(LI.getParent(), LIIt);
|
||||
IRB.SetInsertPoint(LIIt);
|
||||
// Create a placeholder value with the same type as LI to use as the
|
||||
// basis for the new value. This allows us to replace the uses of LI with
|
||||
// the computed value, and then replace the placeholder with LI, leaving
|
||||
@ -3604,8 +3604,7 @@ private:
|
||||
// dominate the PHI.
|
||||
IRBuilderBase::InsertPointGuard Guard(IRB);
|
||||
if (isa<PHINode>(OldPtr))
|
||||
IRB.SetInsertPoint(OldPtr->getParent(),
|
||||
OldPtr->getParent()->getFirstInsertionPt());
|
||||
IRB.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt());
|
||||
else
|
||||
IRB.SetInsertPoint(OldPtr);
|
||||
IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc());
|
||||
|
@ -239,7 +239,7 @@ static void scalarizeMaskedLoad(const DataLayout &DL, CallInst *CI,
|
||||
IfBlock = NewIfBlock;
|
||||
|
||||
// Create the phi to join the new and previous value.
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
|
||||
Phi->addIncoming(NewVResult, CondBlock);
|
||||
Phi->addIncoming(VResult, PrevIfBlock);
|
||||
@ -366,7 +366,7 @@ static void scalarizeMaskedStore(const DataLayout &DL, CallInst *CI,
|
||||
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
|
||||
NewIfBlock->setName("else");
|
||||
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
}
|
||||
CI->eraseFromParent();
|
||||
|
||||
@ -493,7 +493,7 @@ static void scalarizeMaskedGather(const DataLayout &DL, CallInst *CI,
|
||||
IfBlock = NewIfBlock;
|
||||
|
||||
// Create the phi to join the new and previous value.
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
|
||||
Phi->addIncoming(NewVResult, CondBlock);
|
||||
Phi->addIncoming(VResult, PrevIfBlock);
|
||||
@ -615,7 +615,7 @@ static void scalarizeMaskedScatter(const DataLayout &DL, CallInst *CI,
|
||||
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
|
||||
NewIfBlock->setName("else");
|
||||
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
}
|
||||
CI->eraseFromParent();
|
||||
|
||||
@ -733,7 +733,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
|
||||
IfBlock = NewIfBlock;
|
||||
|
||||
// Create the phi to join the new and previous value.
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
PHINode *ResultPhi = Builder.CreatePHI(VecType, 2, "res.phi.else");
|
||||
ResultPhi->addIncoming(NewVResult, CondBlock);
|
||||
ResultPhi->addIncoming(VResult, PrevIfBlock);
|
||||
@ -847,7 +847,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
|
||||
BasicBlock *PrevIfBlock = IfBlock;
|
||||
IfBlock = NewIfBlock;
|
||||
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
|
||||
// Add a PHI for the pointer if this isn't the last iteration.
|
||||
if ((Idx + 1) != VectorWidth) {
|
||||
@ -918,7 +918,7 @@ static void scalarizeMaskedVectorHistogram(const DataLayout &DL, CallInst *CI,
|
||||
// Create "else" block, fill it in the next iteration
|
||||
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
|
||||
NewIfBlock->setName("else");
|
||||
Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
|
||||
Builder.SetInsertPoint(NewIfBlock->begin());
|
||||
}
|
||||
|
||||
CI->eraseFromParent();
|
||||
|
@ -127,10 +127,10 @@ public:
|
||||
Scatterer() = default;
|
||||
|
||||
// Scatter V into Size components. If new instructions are needed,
|
||||
// insert them before BBI in BB. If Cache is nonnull, use it to cache
|
||||
// insert them before BBI. If Cache is nonnull, use it to cache
|
||||
// the results.
|
||||
Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
|
||||
const VectorSplit &VS, ValueVector *cachePtr = nullptr);
|
||||
Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
|
||||
ValueVector *cachePtr = nullptr);
|
||||
|
||||
// Return component I, creating a new Value for it if necessary.
|
||||
Value *operator[](unsigned I);
|
||||
@ -139,7 +139,6 @@ public:
|
||||
unsigned size() const { return VS.NumFragments; }
|
||||
|
||||
private:
|
||||
BasicBlock *BB;
|
||||
BasicBlock::iterator BBI;
|
||||
Value *V;
|
||||
VectorSplit VS;
|
||||
@ -342,9 +341,9 @@ private:
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
|
||||
const VectorSplit &VS, ValueVector *cachePtr)
|
||||
: BB(bb), BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
|
||||
Scatterer::Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
|
||||
ValueVector *cachePtr)
|
||||
: BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
|
||||
IsPointer = V->getType()->isPointerTy();
|
||||
if (!CachePtr) {
|
||||
Tmp.resize(VS.NumFragments, nullptr);
|
||||
@ -363,7 +362,7 @@ Value *Scatterer::operator[](unsigned Frag) {
|
||||
// Try to reuse a previous value.
|
||||
if (CV[Frag])
|
||||
return CV[Frag];
|
||||
IRBuilder<> Builder(BB, BBI);
|
||||
IRBuilder<> Builder(BBI);
|
||||
if (IsPointer) {
|
||||
if (Frag == 0)
|
||||
CV[Frag] = V;
|
||||
@ -443,7 +442,7 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
|
||||
// so that it can be used everywhere.
|
||||
Function *F = VArg->getParent();
|
||||
BasicBlock *BB = &F->getEntryBlock();
|
||||
return Scatterer(BB, BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
|
||||
return Scatterer(BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
|
||||
}
|
||||
if (Instruction *VOp = dyn_cast<Instruction>(V)) {
|
||||
// When scalarizing PHI nodes we might try to examine/rewrite InsertElement
|
||||
@ -453,18 +452,17 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
|
||||
// originating from instructions in unreachable blocks as undef we do not
|
||||
// need to analyse them further.
|
||||
if (!DT->isReachableFromEntry(VOp->getParent()))
|
||||
return Scatterer(Point->getParent(), Point->getIterator(),
|
||||
PoisonValue::get(V->getType()), VS);
|
||||
return Scatterer(Point->getIterator(), PoisonValue::get(V->getType()),
|
||||
VS);
|
||||
// Put the scattered form of an instruction directly after the
|
||||
// instruction, skipping over PHI nodes and debug intrinsics.
|
||||
BasicBlock *BB = VOp->getParent();
|
||||
return Scatterer(
|
||||
BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
|
||||
skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
|
||||
&Scattered[{V, VS.SplitTy}]);
|
||||
}
|
||||
// In the fallback case, just put the scattered before Point and
|
||||
// keep the result local to Point.
|
||||
return Scatterer(Point->getParent(), Point->getIterator(), V, VS);
|
||||
return Scatterer(Point->getIterator(), V, VS);
|
||||
}
|
||||
|
||||
// Replace Op with the gathered form of the components in CV. Defer the
|
||||
@ -1181,7 +1179,7 @@ bool ScalarizerVisitor::finish() {
|
||||
BasicBlock *BB = Op->getParent();
|
||||
IRBuilder<> Builder(Op);
|
||||
if (isa<PHINode>(Op))
|
||||
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
|
||||
VectorSplit VS = *getVectorSplit(Ty);
|
||||
assert(VS.NumFragments == CV.size());
|
||||
|
@ -134,7 +134,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
|
||||
Builder.CreateCondBr(Cmp, WhileDone, While);
|
||||
|
||||
// Add one to the computed length.
|
||||
Builder.SetInsertPoint(WhileDone, WhileDone->begin());
|
||||
Builder.SetInsertPoint(WhileDone->begin());
|
||||
auto Begin = Builder.CreatePtrToInt(Str, Int64Ty);
|
||||
auto End = Builder.CreatePtrToInt(PtrPhi, Int64Ty);
|
||||
auto Len = Builder.CreateSub(End, Begin);
|
||||
@ -142,7 +142,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
|
||||
|
||||
// Final join.
|
||||
BranchInst::Create(Join, WhileDone);
|
||||
Builder.SetInsertPoint(Join, Join->begin());
|
||||
Builder.SetInsertPoint(Join->begin());
|
||||
auto LenPhi = Builder.CreatePHI(Len->getType(), 2);
|
||||
LenPhi->addIncoming(Len, WhileDone);
|
||||
LenPhi->addIncoming(Zero, Prev);
|
||||
|
@ -260,7 +260,7 @@ QuotRemWithBB FastDivInsertionTask::createSlowBB(BasicBlock *SuccessorBB) {
|
||||
QuotRemWithBB DivRemPair;
|
||||
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
|
||||
MainBB->getParent(), SuccessorBB);
|
||||
IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
|
||||
IRBuilder<> Builder(DivRemPair.BB->begin());
|
||||
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
|
||||
|
||||
Value *Dividend = SlowDivOrRem->getOperand(0);
|
||||
@ -284,7 +284,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
|
||||
QuotRemWithBB DivRemPair;
|
||||
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
|
||||
MainBB->getParent(), SuccessorBB);
|
||||
IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
|
||||
IRBuilder<> Builder(DivRemPair.BB->begin());
|
||||
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
|
||||
|
||||
Value *Dividend = SlowDivOrRem->getOperand(0);
|
||||
@ -310,7 +310,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
|
||||
QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
|
||||
QuotRemWithBB &RHS,
|
||||
BasicBlock *PhiBB) {
|
||||
IRBuilder<> Builder(PhiBB, PhiBB->begin());
|
||||
IRBuilder<> Builder(PhiBB->begin());
|
||||
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
|
||||
PHINode *QuoPhi = Builder.CreatePHI(getSlowType(), 2);
|
||||
QuoPhi->addIncoming(LHS.Quotient, LHS.BB);
|
||||
@ -327,7 +327,7 @@ QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
|
||||
/// doesn't need a runtime check.
|
||||
Value *FastDivInsertionTask::insertOperandRuntimeCheck(Value *Op1, Value *Op2) {
|
||||
assert((Op1 || Op2) && "Nothing to check");
|
||||
IRBuilder<> Builder(MainBB, MainBB->end());
|
||||
IRBuilder<> Builder(MainBB->end());
|
||||
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
|
||||
|
||||
Value *OrV;
|
||||
@ -397,7 +397,7 @@ std::optional<QuotRemPair> FastDivInsertionTask::insertFastDivAndRem() {
|
||||
isa<ConstantInt>(BCI->getOperand(0)))
|
||||
return std::nullopt;
|
||||
|
||||
IRBuilder<> Builder(MainBB, MainBB->end());
|
||||
IRBuilder<> Builder(MainBB->end());
|
||||
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
|
||||
|
||||
if (DividendShort && !isSignedOp()) {
|
||||
|
@ -113,7 +113,7 @@ static void createRetPHINode(Instruction *OrigInst, Instruction *NewInst,
|
||||
if (OrigInst->getType()->isVoidTy() || OrigInst->use_empty())
|
||||
return;
|
||||
|
||||
Builder.SetInsertPoint(MergeBlock, MergeBlock->begin());
|
||||
Builder.SetInsertPoint(MergeBlock->begin());
|
||||
PHINode *Phi = Builder.CreatePHI(OrigInst->getType(), 0);
|
||||
SmallVector<User *, 16> UsersToUpdate(OrigInst->users());
|
||||
for (User *U : UsersToUpdate)
|
||||
|
@ -487,7 +487,6 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
|
||||
FirstEntryBlock->splice(FirstEntryBlock->end(), SecondEntryBlock);
|
||||
BranchInst *PBI = cast<BranchInst>(FirstEntryBlock->getTerminator());
|
||||
assert(PBI->getCondition() == CInst2);
|
||||
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
||||
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
|
||||
Builder.SetInsertPoint(PBI);
|
||||
if (InvertCond2) {
|
||||
@ -495,7 +494,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
|
||||
}
|
||||
Value *NC = Builder.CreateBinOp(CombineOp, CInst1, PBI->getCondition());
|
||||
PBI->replaceUsesOfWith(PBI->getCondition(), NC);
|
||||
Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
|
||||
Builder.SetInsertPoint(SaveInsertPt);
|
||||
|
||||
// Remove IfTrue1
|
||||
if (IfTrue1 != FirstEntryBlock) {
|
||||
|
@ -1611,7 +1611,7 @@ static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
|
||||
Module *M, BasicBlock *InsertBlock,
|
||||
InlineFunctionInfo &IFI,
|
||||
Function *CalledFunc) {
|
||||
IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
|
||||
IRBuilder<> Builder(InsertBlock->begin());
|
||||
|
||||
Value *Size =
|
||||
Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
|
||||
@ -2611,7 +2611,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
|
||||
// `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
|
||||
if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
|
||||
!IFI.StaticAllocas.empty()) {
|
||||
IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
|
||||
IRBuilder<> builder(FirstNewBlock->begin());
|
||||
for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
|
||||
AllocaInst *AI = IFI.StaticAllocas[ai];
|
||||
// Don't mark swifterror allocas. They can't have bitcast uses.
|
||||
@ -2666,8 +2666,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
|
||||
// code with llvm.stacksave/llvm.stackrestore intrinsics.
|
||||
if (InlinedFunctionInfo.ContainsDynamicAllocas) {
|
||||
// Insert the llvm.stacksave.
|
||||
CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
|
||||
.CreateStackSave("savedstack");
|
||||
CallInst *SavedPtr =
|
||||
IRBuilder<>(FirstNewBlock->begin()).CreateStackSave("savedstack");
|
||||
|
||||
// Insert a call to llvm.stackrestore before any return instructions in the
|
||||
// inlined function.
|
||||
|
@ -316,7 +316,7 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
|
||||
// ; end: ; preds = %loop-exit, %special-cases
|
||||
// ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
|
||||
// ; ret i32 %q_5
|
||||
Builder.SetInsertPoint(End, End->begin());
|
||||
Builder.SetInsertPoint(End->begin());
|
||||
PHINode *Q_5 = Builder.CreatePHI(DivTy, 2);
|
||||
|
||||
// Populate the Phis, since all values have now been created. Our Phis were:
|
||||
|
@ -150,7 +150,7 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT,
|
||||
// We've computed IDF, now insert new phi-nodes there.
|
||||
SmallVector<PHINode *, 4> InsertedPHIsForVar;
|
||||
for (auto *FrontierBB : IDFBlocks) {
|
||||
IRBuilder<> B(FrontierBB, FrontierBB->begin());
|
||||
IRBuilder<> B(FrontierBB->begin());
|
||||
PHINode *PN = B.CreatePHI(R.Ty, 0, R.Name);
|
||||
R.Defines[FrontierBB] = PN;
|
||||
InsertedPHIsForVar.push_back(PN);
|
||||
|
@ -1069,7 +1069,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
|
||||
// Create the PHI.
|
||||
BasicBlock *Header = L->getHeader();
|
||||
Builder.SetInsertPoint(Header, Header->begin());
|
||||
Builder.SetInsertPoint(Header->begin());
|
||||
PHINode *PN =
|
||||
Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
|
||||
|
||||
@ -1521,7 +1521,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
|
||||
return I->second;
|
||||
|
||||
SCEVInsertPointGuard Guard(Builder, this);
|
||||
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
|
||||
// Expand the expression into instructions.
|
||||
SmallVector<Instruction *> DropPoisonGeneratingInsts;
|
||||
@ -1656,7 +1656,7 @@ void SCEVExpander::replaceCongruentIVInc(
|
||||
else
|
||||
IP = OrigInc->getNextNonDebugInstruction()->getIterator();
|
||||
|
||||
IRBuilder<> Builder(IP->getParent(), IP);
|
||||
IRBuilder<> Builder(IP);
|
||||
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
|
||||
NewInc =
|
||||
Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
|
||||
@ -1759,8 +1759,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
|
||||
++NumElim;
|
||||
Value *NewIV = OrigPhiRef;
|
||||
if (OrigPhiRef->getType() != Phi->getType()) {
|
||||
IRBuilder<> Builder(L->getHeader(),
|
||||
L->getHeader()->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
|
||||
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
|
||||
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
|
||||
}
|
||||
|
@ -4180,7 +4180,7 @@ static bool mergeConditionalStoreToAddress(
|
||||
QStore->getParent(), PPHI);
|
||||
|
||||
BasicBlock::iterator PostBBFirst = PostBB->getFirstInsertionPt();
|
||||
IRBuilder<> QB(PostBB, PostBBFirst);
|
||||
IRBuilder<> QB(PostBBFirst);
|
||||
QB.SetCurrentDebugLocation(PostBBFirst->getStableDebugLoc());
|
||||
|
||||
Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond);
|
||||
|
@ -1796,8 +1796,7 @@ bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
|
||||
assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
|
||||
"Not a LCSSA Phi?");
|
||||
WidePN->addIncoming(WideBO, LoopExitingBlock);
|
||||
Builder.SetInsertPoint(User->getParent(),
|
||||
User->getParent()->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(User->getParent()->getFirstInsertionPt());
|
||||
auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
|
||||
User->replaceAllUsesWith(TruncPN);
|
||||
DeadInsts.emplace_back(User);
|
||||
@ -1860,7 +1859,7 @@ Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU,
|
||||
UsePhi->getIterator());
|
||||
WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
|
||||
BasicBlock *WidePhiBB = WidePhi->getParent();
|
||||
IRBuilder<> Builder(WidePhiBB, WidePhiBB->getFirstInsertionPt());
|
||||
IRBuilder<> Builder(WidePhiBB->getFirstInsertionPt());
|
||||
Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType(), "",
|
||||
CanWidenByZExt, CanWidenBySExt);
|
||||
UsePhi->replaceAllUsesWith(Trunc);
|
||||
|
@ -2829,12 +2829,12 @@ static bool insertSinCosCall(IRBuilderBase &B, Function *OrigCallee, Value *Arg,
|
||||
if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
|
||||
// If the argument is an instruction, it must dominate all uses so put our
|
||||
// sincos call there.
|
||||
B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
|
||||
B.SetInsertPoint(++ArgInst->getIterator());
|
||||
} else {
|
||||
// Otherwise (e.g. for a constant) the beginning of the function is as
|
||||
// good a place as any.
|
||||
BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
|
||||
B.SetInsertPoint(&EntryBB, EntryBB.begin());
|
||||
B.SetInsertPoint(EntryBB.begin());
|
||||
}
|
||||
|
||||
SinCos = B.CreateCall(Callee, Arg, "sincospi");
|
||||
|
@ -646,7 +646,7 @@ Value *LoopIdiomVectorize::expandFindMismatch(
|
||||
// 3. We didn't find a mismatch in the vector loop, so we return MaxLen.
|
||||
// 4. We exitted the vector loop early due to a mismatch and need to return
|
||||
// the index that we found.
|
||||
Builder.SetInsertPoint(EndBlock, EndBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(EndBlock->getFirstInsertionPt());
|
||||
PHINode *ResPhi = Builder.CreatePHI(ResType, 4, "mismatch_result");
|
||||
ResPhi->addIncoming(MaxLen, LoopIncBlock);
|
||||
ResPhi->addIncoming(IndexPhi, LoopStartBlock);
|
||||
|
@ -3026,8 +3026,7 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
|
||||
|
||||
// Compute the end value for the additional bypass (if applicable).
|
||||
if (AdditionalBypass.first) {
|
||||
B.SetInsertPoint(AdditionalBypass.first,
|
||||
AdditionalBypass.first->getFirstInsertionPt());
|
||||
B.SetInsertPoint(AdditionalBypass.first->getFirstInsertionPt());
|
||||
EndValueFromAdditionalBypass =
|
||||
emitTransformedIndex(B, AdditionalBypass.second, II.getStartValue(),
|
||||
Step, II.getKind(), II.getInductionBinOp());
|
||||
@ -3441,8 +3440,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
|
||||
|
||||
// Fix LCSSA phis not already fixed earlier. Extracts may need to be generated
|
||||
// in the exit block, so update the builder.
|
||||
State.Builder.SetInsertPoint(State.CFG.ExitBB,
|
||||
State.CFG.ExitBB->getFirstNonPHIIt());
|
||||
State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHIIt());
|
||||
for (const auto &KV : Plan.getLiveOuts())
|
||||
KV.second->fixPhi(Plan, State);
|
||||
|
||||
@ -3485,7 +3483,7 @@ void InnerLoopVectorizer::fixFixedOrderRecurrence(VPLiveOut *LO,
|
||||
PHINode *ScalarHeaderPhi = LO->getPhi();
|
||||
auto *InitScalarFOR =
|
||||
ScalarHeaderPhi->getIncomingValueForBlock(LoopScalarPreHeader);
|
||||
Builder.SetInsertPoint(LoopScalarPreHeader, LoopScalarPreHeader->begin());
|
||||
Builder.SetInsertPoint(LoopScalarPreHeader->begin());
|
||||
auto *ScalarPreheaderPhi =
|
||||
Builder.CreatePHI(ScalarHeaderPhi->getType(), 2, "scalar.recur.init");
|
||||
for (auto *BB : predecessors(LoopScalarPreHeader)) {
|
||||
|
@ -11374,12 +11374,11 @@ void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
|
||||
LastInstIt = LastInst->getParent()->getFirstNonPHIIt();
|
||||
if (IsPHI || (E->State != TreeEntry::NeedToGather &&
|
||||
doesNotNeedToSchedule(E->Scalars))) {
|
||||
Builder.SetInsertPoint(LastInst->getParent(), LastInstIt);
|
||||
Builder.SetInsertPoint(LastInstIt);
|
||||
} else {
|
||||
// Set the insertion point after the last instruction in the bundle. Set the
|
||||
// debug location to Front.
|
||||
Builder.SetInsertPoint(
|
||||
LastInst->getParent(),
|
||||
LastInst->getNextNonDebugInstruction()->getIterator());
|
||||
}
|
||||
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
|
||||
@ -12615,8 +12614,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
|
||||
if (PostponedPHIs && E->VectorizedValue)
|
||||
return E->VectorizedValue;
|
||||
auto *PH = cast<PHINode>(VL0);
|
||||
Builder.SetInsertPoint(PH->getParent(),
|
||||
PH->getParent()->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(PH->getParent()->getFirstNonPHIIt());
|
||||
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
|
||||
if (PostponedPHIs || !E->VectorizedValue) {
|
||||
PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
|
||||
@ -12624,8 +12622,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
|
||||
Value *V = NewPhi;
|
||||
|
||||
// Adjust insertion point once all PHI's have been generated.
|
||||
Builder.SetInsertPoint(PH->getParent(),
|
||||
PH->getParent()->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(PH->getParent()->getFirstInsertionPt());
|
||||
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
|
||||
|
||||
V = FinalShuffle(V, E, VecTy);
|
||||
@ -13497,10 +13494,9 @@ Value *BoUpSLP::vectorizeTree(
|
||||
EntryToLastInstruction.clear();
|
||||
|
||||
if (ReductionRoot)
|
||||
Builder.SetInsertPoint(ReductionRoot->getParent(),
|
||||
ReductionRoot->getIterator());
|
||||
Builder.SetInsertPoint(ReductionRoot->getIterator());
|
||||
else
|
||||
Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
|
||||
Builder.SetInsertPoint(F->getEntryBlock().begin());
|
||||
|
||||
// Postpone emission of PHIs operands to avoid cyclic dependencies issues.
|
||||
(void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true);
|
||||
@ -13754,13 +13750,11 @@ Value *BoUpSLP::vectorizeTree(
|
||||
"instructions");
|
||||
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
|
||||
if (auto *PHI = dyn_cast<PHINode>(VecI))
|
||||
Builder.SetInsertPoint(PHI->getParent(),
|
||||
PHI->getParent()->getFirstNonPHIIt());
|
||||
Builder.SetInsertPoint(PHI->getParent()->getFirstNonPHIIt());
|
||||
else
|
||||
Builder.SetInsertPoint(VecI->getParent(),
|
||||
std::next(VecI->getIterator()));
|
||||
Builder.SetInsertPoint(std::next(VecI->getIterator()));
|
||||
} else {
|
||||
Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
|
||||
Builder.SetInsertPoint(F->getEntryBlock().begin());
|
||||
}
|
||||
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
|
||||
// Required to update internally referenced instructions.
|
||||
@ -13866,8 +13860,7 @@ Value *BoUpSLP::vectorizeTree(
|
||||
Instruction *IncomingTerminator =
|
||||
PH->getIncomingBlock(I)->getTerminator();
|
||||
if (isa<CatchSwitchInst>(IncomingTerminator)) {
|
||||
Builder.SetInsertPoint(VecI->getParent(),
|
||||
std::next(VecI->getIterator()));
|
||||
Builder.SetInsertPoint(std::next(VecI->getIterator()));
|
||||
} else {
|
||||
Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator());
|
||||
}
|
||||
@ -13881,7 +13874,7 @@ Value *BoUpSLP::vectorizeTree(
|
||||
User->replaceUsesOfWith(Scalar, NewInst);
|
||||
}
|
||||
} else {
|
||||
Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
|
||||
Builder.SetInsertPoint(F->getEntryBlock().begin());
|
||||
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
|
||||
User->replaceUsesOfWith(Scalar, NewInst);
|
||||
}
|
||||
@ -14051,8 +14044,7 @@ Value *BoUpSLP::vectorizeTree(
|
||||
It != MinBWs.end() &&
|
||||
ReductionBitWidth != It->second.first) {
|
||||
IRBuilder<>::InsertPointGuard Guard(Builder);
|
||||
Builder.SetInsertPoint(ReductionRoot->getParent(),
|
||||
ReductionRoot->getIterator());
|
||||
Builder.SetInsertPoint(ReductionRoot->getIterator());
|
||||
Vec = Builder.CreateIntCast(
|
||||
Vec,
|
||||
VectorType::get(Builder.getIntNTy(ReductionBitWidth),
|
||||
|
@ -121,7 +121,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
||||
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
||||
B.SetInsertPoint(Entry);
|
||||
B.CreateCondBr(B.getTrue(), Left, Right);
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
Argument *PointerArg = &*F->arg_begin();
|
||||
B.SetInsertPoint(Left);
|
||||
B.CreateBr(Merge);
|
||||
@ -132,14 +132,14 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
||||
MemorySSA &MSSA = *Analyses->MSSA;
|
||||
MemorySSAUpdater Updater(&MSSA);
|
||||
// Add the store
|
||||
B.SetInsertPoint(Entry, Entry->begin());
|
||||
B.SetInsertPoint(Entry->begin());
|
||||
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
||||
MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
|
||||
EntryStore, nullptr, Entry, MemorySSA::Beginning);
|
||||
Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
|
||||
|
||||
// Add the load
|
||||
B.SetInsertPoint(Merge, Merge->begin());
|
||||
B.SetInsertPoint(Merge->begin());
|
||||
LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
||||
|
||||
// MemoryPHI should not already exist.
|
||||
@ -156,7 +156,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
||||
|
||||
// Create a store on the left
|
||||
// Add the store
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
|
||||
MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
|
||||
LeftStore, nullptr, Left, MemorySSA::Beginning);
|
||||
@ -167,7 +167,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
||||
EXPECT_NE(MP, nullptr);
|
||||
|
||||
// Add the second load
|
||||
B.SetInsertPoint(Merge, Merge->begin());
|
||||
B.SetInsertPoint(Merge->begin());
|
||||
LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
||||
|
||||
// Create the load memory access
|
||||
@ -181,7 +181,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
|
||||
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
|
||||
EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
|
||||
// Now create a store below the existing one in the entry
|
||||
B.SetInsertPoint(Entry, --Entry->end());
|
||||
B.SetInsertPoint(--Entry->end());
|
||||
StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
|
||||
MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
|
||||
SecondEntryStore, nullptr, Entry, MemorySSA::End);
|
||||
@ -210,7 +210,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
|
||||
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
||||
B.SetInsertPoint(Entry);
|
||||
B.CreateCondBr(B.getTrue(), Left, Right);
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
Argument *PointerArg = &*F->arg_begin();
|
||||
B.SetInsertPoint(Left);
|
||||
B.CreateBr(Merge);
|
||||
@ -220,7 +220,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
|
||||
setupAnalyses();
|
||||
MemorySSA &MSSA = *Analyses->MSSA;
|
||||
MemorySSAUpdater Updater(&MSSA);
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
// Add the store
|
||||
StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
|
||||
MemoryAccess *StoreAccess =
|
||||
@ -232,7 +232,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
|
||||
EXPECT_NE(MP, nullptr);
|
||||
|
||||
// Add the load
|
||||
B.SetInsertPoint(Merge, Merge->begin());
|
||||
B.SetInsertPoint(Merge->begin());
|
||||
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
||||
|
||||
// Create the load memory acccess
|
||||
@ -253,7 +253,7 @@ TEST_F(MemorySSATest, SinkLoad) {
|
||||
BasicBlock *Merge(BasicBlock::Create(C, "", F));
|
||||
B.SetInsertPoint(Entry);
|
||||
B.CreateCondBr(B.getTrue(), Left, Right);
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
Argument *PointerArg = &*F->arg_begin();
|
||||
B.SetInsertPoint(Left);
|
||||
B.CreateBr(Merge);
|
||||
@ -261,10 +261,10 @@ TEST_F(MemorySSATest, SinkLoad) {
|
||||
B.CreateBr(Merge);
|
||||
|
||||
// Load in left block
|
||||
B.SetInsertPoint(Left, Left->begin());
|
||||
B.SetInsertPoint(Left->begin());
|
||||
LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
|
||||
// Store in merge block
|
||||
B.SetInsertPoint(Merge, Merge->begin());
|
||||
B.SetInsertPoint(Merge->begin());
|
||||
B.CreateStore(B.getInt8(16), PointerArg);
|
||||
|
||||
setupAnalyses();
|
||||
|
@ -2253,7 +2253,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkshareLoopTarget) {
|
||||
BasicBlock *Preheader = CLI->getPreheader();
|
||||
Value *TripCount = CLI->getTripCount();
|
||||
|
||||
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
|
||||
IRBuilder<>::InsertPoint AfterIP = OMPBuilder.applyWorkshareLoop(
|
||||
DL, CLI, AllocaIP, true, OMP_SCHEDULE_Static, nullptr, false, false,
|
||||
@ -2317,7 +2317,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkShareLoop) {
|
||||
Value *IV = CLI->getIndVar();
|
||||
BasicBlock *ExitBlock = CLI->getExit();
|
||||
|
||||
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
InsertPointTy AllocaIP = Builder.saveIP();
|
||||
|
||||
OMPBuilder.applyWorkshareLoop(DL, CLI, AllocaIP, /*NeedsBarrier=*/true,
|
||||
@ -2507,7 +2507,7 @@ TEST_P(OpenMPIRBuilderTestWithParams, DynamicWorkShareLoop) {
|
||||
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
|
||||
/*IsSigned=*/false, /*InclusiveStop=*/false);
|
||||
|
||||
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
InsertPointTy AllocaIP = Builder.saveIP();
|
||||
|
||||
// Collect all the info from CLI, as it isn't usable after the call to
|
||||
@ -2649,7 +2649,7 @@ TEST_F(OpenMPIRBuilderTest, DynamicWorkShareLoopOrdered) {
|
||||
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
|
||||
/*IsSigned=*/false, /*InclusiveStop=*/false);
|
||||
|
||||
Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(BB->getFirstInsertionPt());
|
||||
InsertPointTy AllocaIP = Builder.saveIP();
|
||||
|
||||
// Collect all the info from CLI, as it isn't usable after the call to
|
||||
@ -4850,7 +4850,7 @@ static bool findGEPZeroOne(Value *Ptr, Value *&Zero, Value *&One) {
|
||||
static OpenMPIRBuilder::InsertPointTy
|
||||
sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
|
||||
Value *&Result) {
|
||||
IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
|
||||
IRBuilder<> Builder(IP.getPoint());
|
||||
Result = Builder.CreateFAdd(LHS, RHS, "red.add");
|
||||
return Builder.saveIP();
|
||||
}
|
||||
@ -4858,7 +4858,7 @@ sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
|
||||
static OpenMPIRBuilder::InsertPointTy
|
||||
sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
|
||||
Value *RHS) {
|
||||
IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
|
||||
IRBuilder<> Builder(IP.getPoint());
|
||||
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
|
||||
Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, LHS, Partial, std::nullopt,
|
||||
AtomicOrdering::Monotonic);
|
||||
@ -4868,7 +4868,7 @@ sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
|
||||
static OpenMPIRBuilder::InsertPointTy
|
||||
xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
|
||||
Value *&Result) {
|
||||
IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
|
||||
IRBuilder<> Builder(IP.getPoint());
|
||||
Result = Builder.CreateXor(LHS, RHS, "red.xor");
|
||||
return Builder.saveIP();
|
||||
}
|
||||
@ -4876,7 +4876,7 @@ xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
|
||||
static OpenMPIRBuilder::InsertPointTy
|
||||
xorAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
|
||||
Value *RHS) {
|
||||
IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
|
||||
IRBuilder<> Builder(IP.getPoint());
|
||||
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
|
||||
Builder.CreateAtomicRMW(AtomicRMWInst::Xor, LHS, Partial, std::nullopt,
|
||||
AtomicOrdering::Monotonic);
|
||||
|
@ -202,7 +202,7 @@ TEST_F(InstrOrderInvalidationTest, InsertInvalidation) {
|
||||
EXPECT_TRUE(BB->isInstrOrderValid());
|
||||
|
||||
// Invalidate orders.
|
||||
IRBuilder<> Builder(BB, I2->getIterator());
|
||||
IRBuilder<> Builder(I2->getIterator());
|
||||
Instruction *I1a = Builder.CreateCall(Nop);
|
||||
EXPECT_FALSE(BB->isInstrOrderValid());
|
||||
EXPECT_TRUE(I1->comesBefore(I1a));
|
||||
|
@ -698,7 +698,7 @@ TEST(IRBuilder, GetSetInsertionPointWithEmptyBasicBlock) {
|
||||
SmallVector<Value *, 3> Args = {DIV, DIV, DIV};
|
||||
Builder.CreateCall(DbgDeclare, Args);
|
||||
auto IP = BB->getFirstInsertionPt();
|
||||
Builder.SetInsertPoint(BB.get(), IP);
|
||||
Builder.SetInsertPoint(IP);
|
||||
}
|
||||
|
||||
TEST(AssignmentTrackingTest, InstrMethods) {
|
||||
|
@ -1188,7 +1188,7 @@ TEST_F(IRBuilderTest, DebugLoc) {
|
||||
EXPECT_EQ(DL1, Call1->getDebugLoc());
|
||||
|
||||
Call1->setDebugLoc(DL2);
|
||||
Builder.SetInsertPoint(Call1->getParent(), Call1->getIterator());
|
||||
Builder.SetInsertPoint(Call1->getIterator());
|
||||
EXPECT_EQ(DL2, Builder.getCurrentDebugLocation());
|
||||
auto Call2 = Builder.CreateCall(Callee, std::nullopt);
|
||||
EXPECT_EQ(DL2, Call2->getDebugLoc());
|
||||
@ -1311,10 +1311,10 @@ TEST_F(IRBuilderTest, CTAD) {
|
||||
// The block BB is empty, so don't test this one.
|
||||
// IRBuilder Builder5(BB->getTerminator());
|
||||
// static_assert(std::is_same_v<decltype(Builder5), IRBuilder<>>);
|
||||
IRBuilder Builder6(BB, BB->end(), Folder);
|
||||
IRBuilder Builder6(BB->end(), Folder);
|
||||
static_assert(
|
||||
std::is_same_v<decltype(Builder6), IRBuilder<InstSimplifyFolder>>);
|
||||
IRBuilder Builder7(BB, BB->end());
|
||||
IRBuilder Builder7(BB->end());
|
||||
static_assert(std::is_same_v<decltype(Builder7), IRBuilder<>>);
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ TEST(SSAUpdaterBulk, SimpleMerge) {
|
||||
Value *SubOp2 = B.CreateSub(FirstArg, ConstantInt::get(I32Ty, 4));
|
||||
B.CreateBr(MergeBB);
|
||||
|
||||
B.SetInsertPoint(MergeBB, MergeBB->begin());
|
||||
B.SetInsertPoint(MergeBB->begin());
|
||||
auto *I1 = cast<Instruction>(B.CreateAdd(AddOp1, ConstantInt::get(I32Ty, 5)));
|
||||
auto *I2 = cast<Instruction>(B.CreateAdd(AddOp2, ConstantInt::get(I32Ty, 6)));
|
||||
auto *I3 = cast<Instruction>(B.CreateAdd(SubOp1, SubOp2));
|
||||
|
@ -193,7 +193,7 @@ static llvm::BasicBlock *convertOmpOpRegions(
|
||||
if (continuationBlockPHIs) {
|
||||
llvm::IRBuilderBase::InsertPointGuard guard(builder);
|
||||
continuationBlockPHIs->reserve(continuationBlockPHITypes.size());
|
||||
builder.SetInsertPoint(continuationBlock, continuationBlock->begin());
|
||||
builder.SetInsertPoint(continuationBlock->begin());
|
||||
for (llvm::Type *ty : continuationBlockPHITypes)
|
||||
continuationBlockPHIs->push_back(builder.CreatePHI(ty, numYields));
|
||||
}
|
||||
@ -413,8 +413,7 @@ static LogicalResult inlineConvertOmpRegions(
|
||||
return failure();
|
||||
if (continuationBlockArgs)
|
||||
llvm::append_range(*continuationBlockArgs, phis);
|
||||
builder.SetInsertPoint(continuationBlock,
|
||||
continuationBlock->getFirstInsertionPt());
|
||||
builder.SetInsertPoint(continuationBlock->getFirstInsertionPt());
|
||||
return success();
|
||||
}
|
||||
|
||||
|
@ -630,9 +630,9 @@ void BlockGenerator::generateConditionalExecution(
|
||||
|
||||
// Put the client code into the conditional block and continue in the merge
|
||||
// block afterwards.
|
||||
Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(ThenBlock->getFirstInsertionPt());
|
||||
GenThenFunc();
|
||||
Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
|
||||
Builder.SetInsertPoint(TailBlock->getFirstInsertionPt());
|
||||
}
|
||||
|
||||
static std::string getInstName(Value *Val) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user