[NFC][TargetTransformInfo][VectorUtils] Consolidate isVectorIntrinsic... api (#117635)

- update `VectorUtils:isVectorIntrinsicWithScalarOpAtArg` to use TTI for
all uses, to allow specifiction of target specific intrinsics
- add TTI to the `isVectorIntrinsicWithStructReturnOverloadAtField` api
- update TTI api to provide `isTargetIntrinsicWith...` functions and
  consistently name them
- move `isTriviallyScalarizable` to VectorUtils
  
- update all uses of the api and provide the TTI parameter

Resolves #117030
This commit is contained in:
Finn Plummer 2024-12-19 11:54:26 -08:00 committed by GitHub
parent b5d02786be
commit 45c01e8a33
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 145 additions and 85 deletions

View File

@ -905,14 +905,20 @@ public:
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const;
/// Identifies if the vector form of the intrinsic has a scalar operand.
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx) const;
/// Identifies if the vector form of the intrinsic is overloaded on the type
/// of the operand at index \p OpdIdx, or on the return type if \p OpdIdx is
/// -1.
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) const;
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) const;
/// Identifies if the vector form of the intrinsic that returns a struct is
/// overloaded at the struct element index \p RetIdx.
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) const;
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the demanded result elements need to be inserted and/or
@ -2020,8 +2026,11 @@ public:
virtual bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) = 0;
virtual bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx) = 0;
virtual bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) = 0;
virtual bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) = 0;
virtual bool
isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) = 0;
virtual InstructionCost
getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
bool Insert, bool Extract, TargetCostKind CostKind,
@ -2610,9 +2619,14 @@ public:
return Impl.isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
}
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) override {
return Impl.isVectorIntrinsicWithOverloadTypeAtArg(ID, ScalarOpdIdx);
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) override {
return Impl.isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
}
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) override {
return Impl.isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
}
InstructionCost getScalarizationOverhead(VectorType *Ty,

View File

@ -396,9 +396,14 @@ public:
return false;
}
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) const {
return ScalarOpdIdx == -1;
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) const {
return OpdIdx == -1;
}
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) const {
return RetIdx == 0;
}
InstructionCost getScalarizationOverhead(VectorType *Ty,

View File

@ -132,11 +132,25 @@ typedef unsigned ID;
/// This method returns true if the intrinsic's argument types are all scalars
/// for the scalar form of the intrinsic and all vectors (or scalars handled by
/// isVectorIntrinsicWithScalarOpAtArg) for the vector form of the intrinsic.
///
/// Note: isTriviallyVectorizable implies isTriviallyScalarizable.
bool isTriviallyVectorizable(Intrinsic::ID ID);
/// Identify if the intrinsic is trivially scalarizable.
/// This method returns true following the same predicates of
/// isTriviallyVectorizable.
/// Note: There are intrinsics where implementing vectorization for the
/// intrinsic is redundant, but we want to implement scalarization of the
/// vector. To prevent the requirement that an intrinsic also implements
/// vectorization we provide this seperate function.
bool isTriviallyScalarizable(Intrinsic::ID ID, const TargetTransformInfo *TTI);
/// Identifies if the vector form of the intrinsic has a scalar operand.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx);
/// \p TTI is used to consider target specific intrinsics, if no target specific
/// intrinsics will be considered then it is appropriate to pass in nullptr.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx,
const TargetTransformInfo *TTI);
/// Identifies if the vector form of the intrinsic is overloaded on the type of
/// the operand at index \p OpdIdx, or on the return type if \p OpdIdx is -1.
@ -146,9 +160,11 @@ bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx,
const TargetTransformInfo *TTI);
/// Identifies if the vector form of the intrinsic that returns a struct is
/// overloaded at the struct element index \p RetIdx.
bool isVectorIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx);
/// overloaded at the struct element index \p RetIdx. /// \p TTI is used to
/// consider target specific intrinsics, if no target specific intrinsics
/// will be considered then it is appropriate to pass in nullptr.
bool isVectorIntrinsicWithStructReturnOverloadAtField(
Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI);
/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns

View File

@ -819,9 +819,14 @@ public:
return false;
}
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) const {
return ScalarOpdIdx == -1;
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) const {
return OpdIdx == -1;
}
bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) const {
return RetIdx == 0;
}
/// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.

View File

@ -3447,7 +3447,7 @@ static Constant *ConstantFoldFixedVectorCall(
// Gather a column of constants.
for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
// Some intrinsics use a scalar type for certain arguments.
if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) {
if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J, /*TTI=*/nullptr)) {
Lane[J] = Operands[J];
continue;
}

View File

@ -615,9 +615,14 @@ bool TargetTransformInfo::isTargetIntrinsicWithScalarOpAtArg(
return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
}
bool TargetTransformInfo::isVectorIntrinsicWithOverloadTypeAtArg(
Intrinsic::ID ID, int ScalarOpdIdx) const {
return TTIImpl->isVectorIntrinsicWithOverloadTypeAtArg(ID, ScalarOpdIdx);
bool TargetTransformInfo::isTargetIntrinsicWithOverloadTypeAtArg(
Intrinsic::ID ID, int OpdIdx) const {
return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
}
bool TargetTransformInfo::isTargetIntrinsicWithStructReturnOverloadAtField(
Intrinsic::ID ID, int RetIdx) const {
return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
}
InstructionCost TargetTransformInfo::getScalarizationOverhead(

View File

@ -113,9 +113,31 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
}
}
bool llvm::isTriviallyScalarizable(Intrinsic::ID ID,
const TargetTransformInfo *TTI) {
if (isTriviallyVectorizable(ID))
return true;
if (TTI && Intrinsic::isTargetIntrinsic(ID))
return TTI->isTargetIntrinsicTriviallyScalarizable(ID);
// TODO: Move frexp to isTriviallyVectorizable.
// https://github.com/llvm/llvm-project/issues/112408
switch (ID) {
case Intrinsic::frexp:
return true;
}
return false;
}
/// Identifies if the vector form of the intrinsic has a scalar operand.
bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
unsigned ScalarOpdIdx,
const TargetTransformInfo *TTI) {
if (TTI && Intrinsic::isTargetIntrinsic(ID))
return TTI->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
switch (ID) {
case Intrinsic::abs:
case Intrinsic::vp_abs:
@ -142,7 +164,7 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
if (TTI && Intrinsic::isTargetIntrinsic(ID))
return TTI->isVectorIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
return TTI->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
if (VPCastIntrinsic::isVPCast(ID))
return OpdIdx == -1 || OpdIdx == 0;
@ -167,8 +189,12 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
}
}
bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID,
int RetIdx) {
bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(
Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI) {
if (TTI && Intrinsic::isTargetIntrinsic(ID))
return TTI->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
switch (ID) {
case Intrinsic::frexp:
return RetIdx == 0 || RetIdx == 1;

View File

@ -121,7 +121,7 @@ static bool replaceWithCallToVeclib(const TargetLibraryInfo &TLI,
auto *ArgTy = Arg.value()->getType();
bool IsOloadTy = isVectorIntrinsicWithOverloadTypeAtArg(IID, Arg.index(),
/*TTI=*/nullptr);
if (isVectorIntrinsicWithScalarOpAtArg(IID, Arg.index())) {
if (isVectorIntrinsicWithScalarOpAtArg(IID, Arg.index(), /*TTI=*/nullptr)) {
ScalarArgTypes.push_back(ArgTy);
if (IsOloadTy)
OloadTys.push_back(ArgTy);

View File

@ -25,13 +25,13 @@ bool DirectXTTIImpl::isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
}
}
bool DirectXTTIImpl::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx) {
bool DirectXTTIImpl::isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) {
switch (ID) {
case Intrinsic::dx_asdouble:
return ScalarOpdIdx == 0;
return OpdIdx == 0;
default:
return ScalarOpdIdx == -1;
return OpdIdx == -1;
}
}

View File

@ -37,8 +37,7 @@ public:
bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const;
bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx);
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int ScalarOpdIdx);
bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx);
};
} // namespace llvm

View File

@ -279,8 +279,6 @@ public:
bool visit(Function &F);
bool isTriviallyScalarizable(Intrinsic::ID ID);
// InstVisitor methods. They return true if the instruction was scalarized,
// false if nothing changed.
bool visitInstruction(Instruction &I) { return false; }
@ -683,19 +681,6 @@ bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) {
return true;
}
bool ScalarizerVisitor::isTriviallyScalarizable(Intrinsic::ID ID) {
if (isTriviallyVectorizable(ID))
return true;
// TODO: Move frexp to isTriviallyVectorizable.
// https://github.com/llvm/llvm-project/issues/112408
switch (ID) {
case Intrinsic::frexp:
return true;
}
return Intrinsic::isTargetIntrinsic(ID) &&
TTI->isTargetIntrinsicTriviallyScalarizable(ID);
}
/// If a call to a vector typed intrinsic function, split into a scalar call per
/// element if possible for the intrinsic.
bool ScalarizerVisitor::splitCall(CallInst &CI) {
@ -715,7 +700,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
Intrinsic::ID ID = F->getIntrinsicID();
if (ID == Intrinsic::not_intrinsic || !isTriviallyScalarizable(ID))
if (ID == Intrinsic::not_intrinsic || !isTriviallyScalarizable(ID, TTI))
return false;
// unsigned NumElems = VT->getNumElements();
@ -743,7 +728,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
// will only scalarize when the struct elements have the same bitness.
if (!CurrVS || CurrVS->NumPacked != VS->NumPacked)
return false;
if (isVectorIntrinsicWithStructReturnOverloadAtField(ID, I))
if (isVectorIntrinsicWithStructReturnOverloadAtField(ID, I, TTI))
Tys.push_back(CurrVS->SplitTy);
}
}
@ -794,8 +779,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
Tys[0] = VS->RemainderTy;
for (unsigned J = 0; J != NumArgs; ++J) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, J) ||
TTI->isTargetIntrinsicWithScalarOpAtArg(ID, J)) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, J, TTI)) {
ScalarCallOps.push_back(ScalarOperands[J]);
} else {
ScalarCallOps.push_back(Scattered[J][I]);
@ -1089,7 +1073,7 @@ bool ScalarizerVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
if (!F)
return false;
Intrinsic::ID ID = F->getIntrinsicID();
if (ID == Intrinsic::not_intrinsic || !isTriviallyScalarizable(ID))
if (ID == Intrinsic::not_intrinsic || !isTriviallyScalarizable(ID, TTI))
return false;
// Note: Fall through means Operand is a`CallInst` and it is defined in
// `isTriviallyScalarizable`.

View File

@ -926,7 +926,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
auto *SE = PSE.getSE();
Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
for (unsigned Idx = 0; Idx < CI->arg_size(); ++Idx)
if (isVectorIntrinsicWithScalarOpAtArg(IntrinID, Idx)) {
if (isVectorIntrinsicWithScalarOpAtArg(IntrinID, Idx, TTI)) {
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(Idx)),
TheLoop)) {
reportVectorizationFailure("Found unvectorizable intrinsic",

View File

@ -1091,7 +1091,8 @@ static bool allSameType(ArrayRef<Value *> VL) {
/// \returns True if in-tree use also needs extract. This refers to
/// possible scalar operand in vectorized instruction.
static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
TargetLibraryInfo *TLI) {
TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI) {
if (!UserInst)
return false;
unsigned Opcode = UserInst->getOpcode();
@ -1108,7 +1109,7 @@ static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
CallInst *CI = cast<CallInst>(UserInst);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
return any_of(enumerate(CI->args()), [&](auto &&Arg) {
return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) &&
return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index(), TTI) &&
Arg.value().get() == Scalar;
});
}
@ -6503,7 +6504,7 @@ void BoUpSLP::buildExternalUses(
// be used.
if (UseEntry->State == TreeEntry::ScatterVectorize ||
!doesInTreeUserNeedToExtract(
Scalar, getRootEntryInstruction(*UseEntry), TLI)) {
Scalar, getRootEntryInstruction(*UseEntry), TLI, TTI)) {
LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
<< ".\n");
assert(!UseEntry->isGather() && "Bad state");
@ -7828,7 +7829,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
unsigned NumArgs = CI->arg_size();
SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr);
for (unsigned J = 0; J != NumArgs; ++J)
if (isVectorIntrinsicWithScalarOpAtArg(ID, J))
if (isVectorIntrinsicWithScalarOpAtArg(ID, J, TTI))
ScalarArgs[J] = CI->getArgOperand(J);
for (Value *V : VL) {
CallInst *CI2 = dyn_cast<CallInst>(V);
@ -7844,7 +7845,7 @@ BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
// Some intrinsics have scalar arguments and should be same in order for
// them to be vectorized.
for (unsigned J = 0; J != NumArgs; ++J) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, J, TTI)) {
Value *A1J = CI2->getArgOperand(J);
if (ScalarArgs[J] != A1J) {
LLVM_DEBUG(dbgs()
@ -8716,7 +8717,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned I : seq<unsigned>(CI->arg_size())) {
// For scalar operands no need to create an entry since no need to
// vectorize it.
if (isVectorIntrinsicWithScalarOpAtArg(ID, I))
if (isVectorIntrinsicWithScalarOpAtArg(ID, I, TTI))
continue;
buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
}
@ -10946,14 +10947,14 @@ TTI::CastContextHint BoUpSLP::getCastContextHint(const TreeEntry &TE) const {
/// Builds the arguments types vector for the given call instruction with the
/// given \p ID for the specified vector factor.
static SmallVector<Type *> buildIntrinsicArgTypes(const CallInst *CI,
const Intrinsic::ID ID,
const unsigned VF,
unsigned MinBW) {
static SmallVector<Type *>
buildIntrinsicArgTypes(const CallInst *CI, const Intrinsic::ID ID,
const unsigned VF, unsigned MinBW,
const TargetTransformInfo *TTI) {
SmallVector<Type *> ArgTys;
for (auto [Idx, Arg] : enumerate(CI->args())) {
if (ID != Intrinsic::not_intrinsic) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, Idx)) {
if (isVectorIntrinsicWithScalarOpAtArg(ID, Idx, TTI)) {
ArgTys.push_back(Arg->getType());
continue;
}
@ -11655,9 +11656,9 @@ BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
auto GetVectorCost = [=](InstructionCost CommonCost) {
auto *CI = cast<CallInst>(VL0);
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
SmallVector<Type *> ArgTys =
buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
It != MinBWs.end() ? It->second.first : 0);
SmallVector<Type *> ArgTys = buildIntrinsicArgTypes(
CI, ID, VecTy->getNumElements(),
It != MinBWs.end() ? It->second.first : 0, TTI);
auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost;
};
@ -15815,9 +15816,9 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
SmallVector<Type *> ArgTys =
buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
It != MinBWs.end() ? It->second.first : 0);
SmallVector<Type *> ArgTys = buildIntrinsicArgTypes(
CI, ID, VecTy->getNumElements(),
It != MinBWs.end() ? It->second.first : 0, TTI);
auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
VecCallCosts.first <= VecCallCosts.second;
@ -15833,7 +15834,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
ValueList OpVL;
// Some intrinsics have scalar arguments. This argument should not be
// vectorized.
if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) {
if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I, TTI)) {
ScalarArg = CEI->getArgOperand(I);
// if decided to reduce bitwidth of abs intrinsic, it second argument
// must be set false (do not return poison, if value issigned min).
@ -16372,7 +16373,7 @@ BoUpSLP::vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues,
E->State == TreeEntry::StridedVectorize) &&
doesInTreeUserNeedToExtract(
Scalar, getRootEntryInstruction(*UseEntry),
TLI);
TLI, TTI);
})) &&
"Scalar with nullptr User must be registered in "
"ExternallyUsedValues map or remain as scalar in vectorized "
@ -17872,7 +17873,8 @@ bool BoUpSLP::collectValuesToDemote(
// Choose the best bitwidth based on cost estimations.
auto Checker = [&](unsigned BitWidth, unsigned) {
unsigned MinBW = PowerOf2Ceil(BitWidth);
SmallVector<Type *> ArgTys = buildIntrinsicArgTypes(IC, ID, VF, MinBW);
SmallVector<Type *> ArgTys =
buildIntrinsicArgTypes(IC, ID, VF, MinBW, TTI);
auto VecCallCosts = getVectorCallCosts(
IC, getWidenedType(IntegerType::get(IC->getContext(), MinBW), VF),
TTI, TLI, ArgTys);

View File

@ -1027,7 +1027,8 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
// Some intrinsics have a scalar argument - don't replace it with a
// vector.
Value *Arg;
if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
if (isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index(),
State.TTI))
Arg = State.get(I.value(), VPLane(0));
else
Arg = State.get(I.value(), onlyFirstLaneUsed(I.value()));

View File

@ -1977,7 +1977,7 @@ bool VectorCombine::foldShuffleOfIntrinsics(Instruction &I) {
return false;
for (unsigned I = 0, E = II0->arg_size(); I != E; ++I)
if (isVectorIntrinsicWithScalarOpAtArg(IID, I) &&
if (isVectorIntrinsicWithScalarOpAtArg(IID, I, &TTI) &&
II0->getArgOperand(I) != II1->getArgOperand(I))
return false;
@ -1990,7 +1990,7 @@ bool VectorCombine::foldShuffleOfIntrinsics(Instruction &I) {
SmallVector<Type *> NewArgsTy;
InstructionCost NewCost = 0;
for (unsigned I = 0, E = II0->arg_size(); I != E; ++I)
if (isVectorIntrinsicWithScalarOpAtArg(IID, I)) {
if (isVectorIntrinsicWithScalarOpAtArg(IID, I, &TTI)) {
NewArgsTy.push_back(II0->getArgOperand(I)->getType());
} else {
auto *VecTy = cast<FixedVectorType>(II0->getArgOperand(I)->getType());
@ -2011,7 +2011,7 @@ bool VectorCombine::foldShuffleOfIntrinsics(Instruction &I) {
SmallVector<Value *> NewArgs;
for (unsigned I = 0, E = II0->arg_size(); I != E; ++I)
if (isVectorIntrinsicWithScalarOpAtArg(IID, I)) {
if (isVectorIntrinsicWithScalarOpAtArg(IID, I, &TTI)) {
NewArgs.push_back(II0->getArgOperand(I));
} else {
Value *Shuf = Builder.CreateShuffleVector(II0->getArgOperand(I),
@ -2102,7 +2102,8 @@ static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
const SmallPtrSet<Use *, 4> &IdentityLeafs,
const SmallPtrSet<Use *, 4> &SplatLeafs,
const SmallPtrSet<Use *, 4> &ConcatLeafs,
IRBuilder<> &Builder) {
IRBuilder<> &Builder,
const TargetTransformInfo *TTI) {
auto [FrontU, FrontLane] = Item.front();
if (IdentityLeafs.contains(FrontU)) {
@ -2137,13 +2138,14 @@ static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
SmallVector<Value *> Ops(NumOps);
for (unsigned Idx = 0; Idx < NumOps; Idx++) {
if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx)) {
if (II &&
isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx, TTI)) {
Ops[Idx] = II->getOperand(Idx);
continue;
}
Ops[Idx] =
generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx), Ty,
IdentityLeafs, SplatLeafs, ConcatLeafs, Builder);
Ops[Idx] = generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx),
Ty, IdentityLeafs, SplatLeafs, ConcatLeafs,
Builder, TTI);
}
SmallVector<Value *, 8> ValueList;
@ -2315,7 +2317,8 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
II && isTriviallyVectorizable(II->getIntrinsicID()) &&
!II->hasOperandBundles()) {
for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op)) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op,
&TTI)) {
if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
Value *FrontV = Item.front().first->get();
Use *U = IL.first;
@ -2346,7 +2349,7 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
// removed. Scan through again and generate the new tree of instructions.
Builder.SetInsertPoint(&I);
Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs,
ConcatLeafs, Builder);
ConcatLeafs, Builder, &TTI);
replaceValue(I, *V);
return true;
}