mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-17 23:36:40 +00:00
[LV] Rename ToVectorTy
to toVectorTy
(NFC) (#120404)
This is for consistency with other helpers (and also follows the LLVM naming conventions).
This commit is contained in:
parent
93d4b1f7a7
commit
9ab5474e56
@ -16,14 +16,14 @@ namespace llvm {
|
||||
/// A helper function for converting Scalar types to vector types. If
|
||||
/// the incoming type is void, we return void. If the EC represents a
|
||||
/// scalar, we return the scalar type.
|
||||
inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
|
||||
inline Type *toVectorTy(Type *Scalar, ElementCount EC) {
|
||||
if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
|
||||
return Scalar;
|
||||
return VectorType::get(Scalar, EC);
|
||||
}
|
||||
|
||||
inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
|
||||
return ToVectorTy(Scalar, ElementCount::getFixed(VF));
|
||||
inline Type *toVectorTy(Type *Scalar, unsigned VF) {
|
||||
return toVectorTy(Scalar, ElementCount::getFixed(VF));
|
||||
}
|
||||
|
||||
/// A helper for converting structs of scalar types to structs of vector types.
|
||||
@ -41,7 +41,7 @@ Type *toScalarizedStructTy(StructType *StructTy);
|
||||
bool isVectorizedStructTy(StructType *StructTy);
|
||||
|
||||
/// A helper for converting to vectorized types. For scalar types, this is
|
||||
/// equivalent to calling `ToVectorTy`. For struct types, this returns a new
|
||||
/// equivalent to calling `toVectorTy`. For struct types, this returns a new
|
||||
/// struct where each element type has been widened to a vector type.
|
||||
/// Note:
|
||||
/// - If the incoming type is void, we return void
|
||||
@ -50,7 +50,7 @@ bool isVectorizedStructTy(StructType *StructTy);
|
||||
inline Type *toVectorizedTy(Type *Ty, ElementCount EC) {
|
||||
if (StructType *StructTy = dyn_cast<StructType>(Ty))
|
||||
return toVectorizedStructTy(StructTy, EC);
|
||||
return ToVectorTy(Ty, EC);
|
||||
return toVectorTy(Ty, EC);
|
||||
}
|
||||
|
||||
/// A helper for converting vectorized types to scalarized (non-vector) types.
|
||||
|
@ -1251,8 +1251,8 @@ public:
|
||||
return false;
|
||||
|
||||
// Get the source and destination types of the truncate.
|
||||
Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
|
||||
Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
|
||||
Type *SrcTy = toVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
|
||||
Type *DestTy = toVectorTy(cast<CastInst>(I)->getDestTy(), VF);
|
||||
|
||||
// If the truncate is free for the given types, return false. Replacing a
|
||||
// free truncate with an induction variable would add an induction variable
|
||||
@ -3535,14 +3535,14 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
|
||||
}
|
||||
InstructionCost SafeDivisorCost = 0;
|
||||
|
||||
auto *VecTy = ToVectorTy(I->getType(), VF);
|
||||
auto *VecTy = toVectorTy(I->getType(), VF);
|
||||
|
||||
// The cost of the select guard to ensure all lanes are well defined
|
||||
// after we speculate above any internal control flow.
|
||||
SafeDivisorCost += TTI.getCmpSelInstrCost(
|
||||
Instruction::Select, VecTy,
|
||||
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
|
||||
CmpInst::BAD_ICMP_PREDICATE, CostKind);
|
||||
SafeDivisorCost +=
|
||||
TTI.getCmpSelInstrCost(Instruction::Select, VecTy,
|
||||
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
|
||||
CmpInst::BAD_ICMP_PREDICATE, CostKind);
|
||||
|
||||
// Certain instructions can be cheaper to vectorize if they have a constant
|
||||
// second vector operand. One example of this are shifts on x86.
|
||||
@ -4662,7 +4662,7 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
|
||||
}
|
||||
|
||||
auto WillWiden = [&TTI, VF](Type *ScalarTy) {
|
||||
Type *VectorTy = ToVectorTy(ScalarTy, VF);
|
||||
Type *VectorTy = toVectorTy(ScalarTy, VF);
|
||||
unsigned NumLegalParts = TTI.getNumberOfParts(VectorTy);
|
||||
if (!NumLegalParts)
|
||||
return false;
|
||||
@ -5653,7 +5653,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
|
||||
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) {
|
||||
ScalarCost += TTI.getScalarizationOverhead(
|
||||
cast<VectorType>(ToVectorTy(I->getType(), VF)),
|
||||
cast<VectorType>(toVectorTy(I->getType(), VF)),
|
||||
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ true,
|
||||
/*Extract*/ false, CostKind);
|
||||
ScalarCost +=
|
||||
@ -5672,7 +5672,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
|
||||
Worklist.push_back(J);
|
||||
else if (needsExtract(J, VF)) {
|
||||
ScalarCost += TTI.getScalarizationOverhead(
|
||||
cast<VectorType>(ToVectorTy(J->getType(), VF)),
|
||||
cast<VectorType>(toVectorTy(J->getType(), VF)),
|
||||
APInt::getAllOnes(VF.getFixedValue()), /*Insert*/ false,
|
||||
/*Extract*/ true, CostKind);
|
||||
}
|
||||
@ -5783,7 +5783,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
|
||||
|
||||
unsigned AS = getLoadStoreAddressSpace(I);
|
||||
Value *Ptr = getLoadStorePointerOperand(I);
|
||||
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
|
||||
Type *PtrTy = toVectorTy(Ptr->getType(), VF);
|
||||
// NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
|
||||
// that it is being called from this specific place.
|
||||
|
||||
@ -5834,7 +5834,7 @@ InstructionCost
|
||||
LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
|
||||
ElementCount VF) {
|
||||
Type *ValTy = getLoadStoreType(I);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
|
||||
Value *Ptr = getLoadStorePointerOperand(I);
|
||||
unsigned AS = getLoadStoreAddressSpace(I);
|
||||
int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
|
||||
@ -5866,7 +5866,7 @@ LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
|
||||
assert(Legal->isUniformMemOp(*I, VF));
|
||||
|
||||
Type *ValTy = getLoadStoreType(I);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
|
||||
const Align Alignment = getLoadStoreAlignment(I);
|
||||
unsigned AS = getLoadStoreAddressSpace(I);
|
||||
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
@ -5892,7 +5892,7 @@ InstructionCost
|
||||
LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
|
||||
ElementCount VF) {
|
||||
Type *ValTy = getLoadStoreType(I);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
|
||||
const Align Alignment = getLoadStoreAlignment(I);
|
||||
const Value *Ptr = getLoadStorePointerOperand(I);
|
||||
|
||||
@ -5910,7 +5910,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
|
||||
|
||||
Instruction *InsertPos = Group->getInsertPos();
|
||||
Type *ValTy = getLoadStoreType(InsertPos);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
|
||||
unsigned AS = getLoadStoreAddressSpace(InsertPos);
|
||||
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
|
||||
@ -6155,7 +6155,7 @@ InstructionCost LoopVectorizationCostModel::getScalarizationOverhead(
|
||||
return 0;
|
||||
|
||||
InstructionCost Cost = 0;
|
||||
Type *RetTy = ToVectorTy(I->getType(), VF);
|
||||
Type *RetTy = toVectorTy(I->getType(), VF);
|
||||
if (!RetTy->isVoidTy() &&
|
||||
(!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
|
||||
Cost += TTI.getScalarizationOverhead(
|
||||
@ -6421,9 +6421,9 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {
|
||||
|
||||
bool MaskRequired = Legal->isMaskRequired(CI);
|
||||
// Compute corresponding vector type for return value and arguments.
|
||||
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
|
||||
Type *RetTy = toVectorTy(ScalarRetTy, VF);
|
||||
for (Type *ScalarTy : ScalarTys)
|
||||
Tys.push_back(ToVectorTy(ScalarTy, VF));
|
||||
Tys.push_back(toVectorTy(ScalarTy, VF));
|
||||
|
||||
// An in-loop reduction using an fmuladd intrinsic is a special case;
|
||||
// we don't want the normal cost for that intrinsic.
|
||||
@ -6613,7 +6613,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
HasSingleCopyAfterVectorization(I, VF));
|
||||
VectorTy = RetTy;
|
||||
} else
|
||||
VectorTy = ToVectorTy(RetTy, VF);
|
||||
VectorTy = toVectorTy(RetTy, VF);
|
||||
|
||||
if (VF.isVector() && VectorTy->isVectorTy() &&
|
||||
!TTI.getNumberOfParts(VectorTy))
|
||||
@ -6673,8 +6673,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
return Switch->getNumCases() *
|
||||
TTI.getCmpSelInstrCost(
|
||||
Instruction::ICmp,
|
||||
ToVectorTy(Switch->getCondition()->getType(), VF),
|
||||
ToVectorTy(Type::getInt1Ty(I->getContext()), VF),
|
||||
toVectorTy(Switch->getCondition()->getType(), VF),
|
||||
toVectorTy(Type::getInt1Ty(I->getContext()), VF),
|
||||
CmpInst::ICMP_EQ, CostKind);
|
||||
}
|
||||
case Instruction::PHI: {
|
||||
@ -6719,8 +6719,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
}
|
||||
return (Phi->getNumIncomingValues() - 1) *
|
||||
TTI.getCmpSelInstrCost(
|
||||
Instruction::Select, ToVectorTy(ResultTy, VF),
|
||||
ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
|
||||
Instruction::Select, toVectorTy(ResultTy, VF),
|
||||
toVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
|
||||
CmpInst::BAD_ICMP_PREDICATE, CostKind);
|
||||
}
|
||||
|
||||
@ -6729,8 +6729,8 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
if (VF.isVector() && foldTailWithEVL() &&
|
||||
Legal->getReductionVars().contains(Phi) && !isInLoopReduction(Phi)) {
|
||||
IntrinsicCostAttributes ICA(
|
||||
Intrinsic::vp_merge, ToVectorTy(Phi->getType(), VF),
|
||||
{ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
|
||||
Intrinsic::vp_merge, toVectorTy(Phi->getType(), VF),
|
||||
{toVectorTy(Type::getInt1Ty(Phi->getContext()), VF)});
|
||||
return TTI.getIntrinsicInstrCost(ICA, CostKind);
|
||||
}
|
||||
|
||||
@ -6870,7 +6870,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
ValTy = IntegerType::get(ValTy->getContext(), MinBWs[I]);
|
||||
}
|
||||
|
||||
VectorTy = ToVectorTy(ValTy, VF);
|
||||
VectorTy = toVectorTy(ValTy, VF);
|
||||
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
|
||||
cast<CmpInst>(I)->getPredicate(), CostKind,
|
||||
{TTI::OK_AnyValue, TTI::OP_None},
|
||||
@ -6888,7 +6888,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
if (Decision == CM_Scalarize)
|
||||
Width = ElementCount::getFixed(1);
|
||||
}
|
||||
VectorTy = ToVectorTy(getLoadStoreType(I), Width);
|
||||
VectorTy = toVectorTy(getLoadStoreType(I), Width);
|
||||
return getMemoryInstructionCost(I, VF);
|
||||
}
|
||||
case Instruction::BitCast:
|
||||
@ -6969,7 +6969,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
||||
SrcScalarTy =
|
||||
IntegerType::get(SrcScalarTy->getContext(), MinBWs[Op0AsInstruction]);
|
||||
Type *SrcVecTy =
|
||||
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
|
||||
VectorTy->isVectorTy() ? toVectorTy(SrcScalarTy, VF) : SrcScalarTy;
|
||||
|
||||
if (canTruncateToMinimalBitwidth(I, VF)) {
|
||||
// If the result type is <= the source type, there will be no extend
|
||||
@ -7498,7 +7498,7 @@ LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
|
||||
// Pre-compute the cost for I, if it has a reduction pattern cost.
|
||||
for (Instruction *I : ChainOpsAndOperands) {
|
||||
auto ReductionCost = CM.getReductionPatternCost(
|
||||
I, VF, ToVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
|
||||
I, VF, toVectorTy(I->getType(), VF), TTI::TCK_RecipThroughput);
|
||||
if (!ReductionCost)
|
||||
continue;
|
||||
|
||||
|
@ -1031,11 +1031,11 @@ InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
|
||||
Arguments.push_back(V);
|
||||
}
|
||||
|
||||
Type *RetTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *RetTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
SmallVector<Type *> ParamTys;
|
||||
for (unsigned I = 0; I != getNumOperands(); ++I)
|
||||
ParamTys.push_back(
|
||||
ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
|
||||
toVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
|
||||
|
||||
// TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
|
||||
FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
|
||||
@ -1203,7 +1203,7 @@ InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
|
||||
SelectInst *SI = cast<SelectInst>(getUnderlyingValue());
|
||||
bool ScalarCond = getOperand(0)->isDefinedOutsideLoopRegions();
|
||||
Type *ScalarTy = Ctx.Types.inferScalarType(this);
|
||||
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
|
||||
VPValue *Op0, *Op1;
|
||||
@ -1384,7 +1384,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
|
||||
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
switch (Opcode) {
|
||||
case Instruction::FNeg: {
|
||||
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
return Ctx.TTI.getArithmeticInstrCost(
|
||||
Opcode, VectorTy, CostKind,
|
||||
{TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
|
||||
@ -1422,7 +1422,7 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
|
||||
if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
|
||||
getOperand(1)->isDefinedOutsideLoopRegions())
|
||||
RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
|
||||
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
|
||||
|
||||
SmallVector<const Value *, 4> Operands;
|
||||
@ -1435,13 +1435,13 @@ InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
|
||||
}
|
||||
case Instruction::Freeze: {
|
||||
// This opcode is unknown. Assume that it is the same as 'mul'.
|
||||
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
|
||||
}
|
||||
case Instruction::ICmp:
|
||||
case Instruction::FCmp: {
|
||||
Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
|
||||
Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
|
||||
Type *VectorTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
|
||||
return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
|
||||
CostKind,
|
||||
{TTI::OK_AnyValue, TTI::OP_None},
|
||||
@ -1569,8 +1569,8 @@ InstructionCost VPWidenCastRecipe::computeCost(ElementCount VF,
|
||||
}
|
||||
|
||||
auto *SrcTy =
|
||||
cast<VectorType>(ToVectorTy(Ctx.Types.inferScalarType(Operand), VF));
|
||||
auto *DestTy = cast<VectorType>(ToVectorTy(getResultType(), VF));
|
||||
cast<VectorType>(toVectorTy(Ctx.Types.inferScalarType(Operand), VF));
|
||||
auto *DestTy = cast<VectorType>(toVectorTy(getResultType(), VF));
|
||||
// Arm TTI will use the underlying instruction to determine the cost.
|
||||
return Ctx.TTI.getCastInstrCost(
|
||||
Opcode, DestTy, SrcTy, CCH, TTI::TCK_RecipThroughput,
|
||||
@ -2078,8 +2078,8 @@ InstructionCost VPBlendRecipe::computeCost(ElementCount VF,
|
||||
if (vputils::onlyFirstLaneUsed(this))
|
||||
return Ctx.TTI.getCFInstrCost(Instruction::PHI, CostKind);
|
||||
|
||||
Type *ResultTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *CmpTy = ToVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
|
||||
Type *ResultTy = toVectorTy(Ctx.Types.inferScalarType(this), VF);
|
||||
Type *CmpTy = toVectorTy(Type::getInt1Ty(Ctx.Types.getContext()), VF);
|
||||
return (getNumIncomingValues() - 1) *
|
||||
Ctx.TTI.getCmpSelInstrCost(Instruction::Select, ResultTy, CmpTy,
|
||||
CmpInst::BAD_ICMP_PREDICATE, CostKind);
|
||||
@ -2200,7 +2200,7 @@ InstructionCost VPReductionRecipe::computeCost(ElementCount VF,
|
||||
VPCostContext &Ctx) const {
|
||||
RecurKind RdxKind = RdxDesc.getRecurrenceKind();
|
||||
Type *ElementTy = Ctx.Types.inferScalarType(this);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ElementTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ElementTy, VF));
|
||||
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
unsigned Opcode = RdxDesc.getOpcode();
|
||||
|
||||
@ -2452,7 +2452,7 @@ void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,
|
||||
|
||||
InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
|
||||
VPCostContext &Ctx) const {
|
||||
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
const Align Alignment =
|
||||
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
|
||||
unsigned AS =
|
||||
@ -2599,7 +2599,7 @@ InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
|
||||
// legacy model, it will always calculate the cost of mask.
|
||||
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
|
||||
// don't need to compare to the legacy cost model.
|
||||
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
const Align Alignment =
|
||||
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
|
||||
unsigned AS =
|
||||
@ -2720,7 +2720,7 @@ InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
|
||||
// legacy model, it will always calculate the cost of mask.
|
||||
// TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
|
||||
// don't need to compare to the legacy cost model.
|
||||
Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
Type *Ty = toVectorTy(getLoadStoreType(&Ingredient), VF);
|
||||
const Align Alignment =
|
||||
getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
|
||||
unsigned AS =
|
||||
@ -3088,7 +3088,7 @@ InstructionCost VPInterleaveRecipe::computeCost(ElementCount VF,
|
||||
Type *ValTy = Ctx.Types.inferScalarType(
|
||||
getNumDefinedValues() > 0 ? getVPValue(InsertPosIdx)
|
||||
: getStoredValues()[InsertPosIdx]);
|
||||
auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
|
||||
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
|
||||
unsigned AS = getLoadStoreAddressSpace(InsertPos);
|
||||
enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
|
||||
|
||||
@ -3331,7 +3331,7 @@ VPFirstOrderRecurrencePHIRecipe::computeCost(ElementCount VF,
|
||||
SmallVector<int> Mask(VF.getKnownMinValue());
|
||||
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
|
||||
Type *VectorTy =
|
||||
ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
|
||||
toVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
|
||||
|
||||
return Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
|
||||
cast<VectorType>(VectorTy), Mask, CostKind,
|
||||
|
Loading…
x
Reference in New Issue
Block a user