mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-17 23:36:40 +00:00
[VPlan] Introduce VPInstructionWithType, use instead of VPScalarCast(NFC) (#129706)
There are some opcodes that currently require specialized recipes, due to their result type not being implied by their operands, including casts. This leads to duplication from defining multiple full recipes. This patch introduces a new VPInstructionWithType subclass that also stores the result type. The general idea is to have opcodes needing to specify a result type to use this general recipe. The current patch replaces VPScalarCastRecipe with VInstructionWithType, a similar patch for VPWidenCastRecipe will follow soon. There are a few proposed opcodes that should also benefit, without the need of workarounds: * https://github.com/llvm/llvm-project/pull/129508 * https://github.com/llvm/llvm-project/pull/119284 PR: https://github.com/llvm/llvm-project/pull/129706
This commit is contained in:
parent
801b519dfd
commit
6a9e8fc50c
@ -249,10 +249,10 @@ public:
|
||||
new VPDerivedIVRecipe(Kind, FPBinOp, Start, Current, Step, Name));
|
||||
}
|
||||
|
||||
VPScalarCastRecipe *createScalarCast(Instruction::CastOps Opcode, VPValue *Op,
|
||||
Type *ResultTy, DebugLoc DL) {
|
||||
VPInstruction *createScalarCast(Instruction::CastOps Opcode, VPValue *Op,
|
||||
Type *ResultTy, DebugLoc DL) {
|
||||
return tryInsertInstruction(
|
||||
new VPScalarCastRecipe(Opcode, Op, ResultTy, DL));
|
||||
new VPInstructionWithType(Opcode, Op, ResultTy, DL));
|
||||
}
|
||||
|
||||
VPWidenCastRecipe *createWidenCast(Instruction::CastOps Opcode, VPValue *Op,
|
||||
|
@ -4462,7 +4462,6 @@ static bool willGenerateVectors(VPlan &Plan, ElementCount VF,
|
||||
switch (R.getVPDefID()) {
|
||||
case VPDef::VPDerivedIVSC:
|
||||
case VPDef::VPScalarIVStepsSC:
|
||||
case VPDef::VPScalarCastSC:
|
||||
case VPDef::VPReplicateSC:
|
||||
case VPDef::VPInstructionSC:
|
||||
case VPDef::VPCanonicalIVPHISC:
|
||||
@ -10679,8 +10678,8 @@ preparePlanForEpilogueVectorLoop(VPlan &Plan, Loop *L,
|
||||
assert(all_of(IV->users(),
|
||||
[](const VPUser *U) {
|
||||
return isa<VPScalarIVStepsRecipe>(U) ||
|
||||
isa<VPScalarCastRecipe>(U) ||
|
||||
isa<VPDerivedIVRecipe>(U) ||
|
||||
cast<VPRecipeBase>(U)->isScalarCast() ||
|
||||
cast<VPInstruction>(U)->getOpcode() ==
|
||||
Instruction::Add;
|
||||
}) &&
|
||||
|
@ -457,6 +457,9 @@ public:
|
||||
/// Returns the debug location of the recipe.
|
||||
DebugLoc getDebugLoc() const { return DL; }
|
||||
|
||||
/// Return true if the recipe is a scalar cast.
|
||||
bool isScalarCast() const;
|
||||
|
||||
protected:
|
||||
/// Compute the cost of this recipe either using a recipe's specialized
|
||||
/// implementation or using the legacy cost model and the underlying
|
||||
@ -531,7 +534,6 @@ public:
|
||||
case VPRecipeBase::VPWidenIntOrFpInductionSC:
|
||||
case VPRecipeBase::VPWidenPointerInductionSC:
|
||||
case VPRecipeBase::VPReductionPHISC:
|
||||
case VPRecipeBase::VPScalarCastSC:
|
||||
case VPRecipeBase::VPPartialReductionSC:
|
||||
return true;
|
||||
case VPRecipeBase::VPBranchOnMaskSC:
|
||||
@ -1025,6 +1027,56 @@ public:
|
||||
StringRef getName() const { return Name; }
|
||||
};
|
||||
|
||||
/// A specialization of VPInstruction augmenting it with a dedicated result
|
||||
/// type, to be used when the opcode and operands of the VPInstruction don't
|
||||
/// directly determine the result type. Note that there is no separate VPDef ID
|
||||
/// for VPInstructionWithType; it shares the same ID as VPInstruction and is
|
||||
/// distinguished purely by the opcode.
|
||||
class VPInstructionWithType : public VPInstruction {
|
||||
/// Scalar result type produced by the recipe.
|
||||
Type *ResultTy;
|
||||
|
||||
public:
|
||||
VPInstructionWithType(unsigned Opcode, ArrayRef<VPValue *> Operands,
|
||||
Type *ResultTy, DebugLoc DL, const Twine &Name = "")
|
||||
: VPInstruction(Opcode, Operands, DL, Name), ResultTy(ResultTy) {}
|
||||
|
||||
static inline bool classof(const VPRecipeBase *R) {
|
||||
// VPInstructionWithType are VPInstructions with specific opcodes requiring
|
||||
// type information.
|
||||
return R->isScalarCast();
|
||||
}
|
||||
|
||||
static inline bool classof(const VPUser *R) {
|
||||
return isa<VPInstructionWithType>(cast<VPRecipeBase>(R));
|
||||
}
|
||||
|
||||
VPInstruction *clone() override {
|
||||
SmallVector<VPValue *, 2> Operands(operands());
|
||||
auto *New = new VPInstructionWithType(
|
||||
getOpcode(), Operands, getResultType(), getDebugLoc(), getName());
|
||||
New->setUnderlyingValue(getUnderlyingValue());
|
||||
return New;
|
||||
}
|
||||
|
||||
void execute(VPTransformState &State) override;
|
||||
|
||||
/// Return the cost of this VPInstruction.
|
||||
InstructionCost computeCost(ElementCount VF,
|
||||
VPCostContext &Ctx) const override {
|
||||
// TODO: Compute accurate cost after retiring the legacy cost model.
|
||||
return 0;
|
||||
}
|
||||
|
||||
Type *getResultType() const { return ResultTy; }
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
/// Print the recipe.
|
||||
void print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const override;
|
||||
#endif
|
||||
};
|
||||
|
||||
/// A recipe to wrap on original IR instruction not to be modified during
|
||||
/// execution, except for PHIs. PHIs are modeled via the VPIRPhi subclass.
|
||||
/// Expect PHIs, VPIRInstructions cannot have any operands.
|
||||
@ -1211,54 +1263,6 @@ public:
|
||||
Type *getResultType() const { return ResultTy; }
|
||||
};
|
||||
|
||||
/// VPScalarCastRecipe is a recipe to create scalar cast instructions.
|
||||
class VPScalarCastRecipe : public VPSingleDefRecipe {
|
||||
Instruction::CastOps Opcode;
|
||||
|
||||
Type *ResultTy;
|
||||
|
||||
Value *generate(VPTransformState &State);
|
||||
|
||||
public:
|
||||
VPScalarCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy,
|
||||
DebugLoc DL)
|
||||
: VPSingleDefRecipe(VPDef::VPScalarCastSC, {Op}, DL), Opcode(Opcode),
|
||||
ResultTy(ResultTy) {}
|
||||
|
||||
~VPScalarCastRecipe() override = default;
|
||||
|
||||
VPScalarCastRecipe *clone() override {
|
||||
return new VPScalarCastRecipe(Opcode, getOperand(0), ResultTy,
|
||||
getDebugLoc());
|
||||
}
|
||||
|
||||
VP_CLASSOF_IMPL(VPDef::VPScalarCastSC)
|
||||
|
||||
void execute(VPTransformState &State) override;
|
||||
|
||||
/// Return the cost of this VPScalarCastRecipe.
|
||||
InstructionCost computeCost(ElementCount VF,
|
||||
VPCostContext &Ctx) const override {
|
||||
// TODO: Compute accurate cost after retiring the legacy cost model.
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const override;
|
||||
#endif
|
||||
|
||||
/// Returns the result type of the cast.
|
||||
Type *getResultType() const { return ResultTy; }
|
||||
|
||||
bool onlyFirstLaneUsed(const VPValue *Op) const override {
|
||||
// At the moment, only uniform codegen is implemented.
|
||||
assert(is_contained(operands(), Op) &&
|
||||
"Op must be an operand of the recipe");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
/// A recipe for widening vector intrinsics.
|
||||
class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
|
||||
/// ID of the vector intrinsic to widen.
|
||||
|
@ -261,20 +261,18 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) {
|
||||
VPPartialReductionRecipe>([this](const VPRecipeBase *R) {
|
||||
return inferScalarType(R->getOperand(0));
|
||||
})
|
||||
// VPInstructionWithType must be handled before VPInstruction.
|
||||
.Case<VPInstructionWithType, VPWidenIntrinsicRecipe>(
|
||||
[](const auto *R) { return R->getResultType(); })
|
||||
.Case<VPBlendRecipe, VPInstruction, VPWidenRecipe, VPReplicateRecipe,
|
||||
VPWidenCallRecipe, VPWidenMemoryRecipe, VPWidenSelectRecipe>(
|
||||
[this](const auto *R) { return inferScalarTypeForRecipe(R); })
|
||||
.Case<VPWidenIntrinsicRecipe>([](const VPWidenIntrinsicRecipe *R) {
|
||||
return R->getResultType();
|
||||
})
|
||||
.Case<VPInterleaveRecipe>([V](const VPInterleaveRecipe *R) {
|
||||
// TODO: Use info from interleave group.
|
||||
return V->getUnderlyingValue()->getType();
|
||||
})
|
||||
.Case<VPWidenCastRecipe>(
|
||||
[](const VPWidenCastRecipe *R) { return R->getResultType(); })
|
||||
.Case<VPScalarCastRecipe>(
|
||||
[](const VPScalarCastRecipe *R) { return R->getResultType(); })
|
||||
.Case<VPExpandSCEVRecipe>([](const VPExpandSCEVRecipe *R) {
|
||||
return R->getSCEV()->getType();
|
||||
})
|
||||
|
@ -142,7 +142,6 @@ bool VPRecipeBase::mayHaveSideEffects() const {
|
||||
switch (getVPDefID()) {
|
||||
case VPDerivedIVSC:
|
||||
case VPPredInstPHISC:
|
||||
case VPScalarCastSC:
|
||||
case VPVectorEndPointerSC:
|
||||
return false;
|
||||
case VPInstructionSC:
|
||||
@ -278,6 +277,11 @@ bool VPRecipeBase::isPhi() const {
|
||||
cast<VPInstruction>(this)->getOpcode() == Instruction::PHI);
|
||||
}
|
||||
|
||||
bool VPRecipeBase::isScalarCast() const {
|
||||
auto *VPI = dyn_cast<VPInstruction>(this);
|
||||
return VPI && Instruction::isCast(VPI->getOpcode());
|
||||
}
|
||||
|
||||
InstructionCost
|
||||
VPPartialReductionRecipe::computeCost(ElementCount VF,
|
||||
VPCostContext &Ctx) const {
|
||||
@ -417,7 +421,7 @@ bool VPInstruction::doesGeneratePerAllLanes() const {
|
||||
}
|
||||
|
||||
bool VPInstruction::canGenerateScalarForFirstLane() const {
|
||||
if (Instruction::isBinaryOp(getOpcode()))
|
||||
if (Instruction::isBinaryOp(getOpcode()) || Instruction::isCast(getOpcode()))
|
||||
return true;
|
||||
if (isSingleScalar() || isVectorToScalar())
|
||||
return true;
|
||||
@ -908,7 +912,7 @@ void VPInstruction::execute(VPTransformState &State) {
|
||||
}
|
||||
|
||||
bool VPInstruction::opcodeMayReadOrWriteFromMemory() const {
|
||||
if (Instruction::isBinaryOp(getOpcode()))
|
||||
if (Instruction::isBinaryOp(getOpcode()) || Instruction::isCast(getOpcode()))
|
||||
return false;
|
||||
switch (getOpcode()) {
|
||||
case Instruction::ExtractElement:
|
||||
@ -932,7 +936,7 @@ bool VPInstruction::opcodeMayReadOrWriteFromMemory() const {
|
||||
|
||||
bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
|
||||
assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
|
||||
if (Instruction::isBinaryOp(getOpcode()))
|
||||
if (Instruction::isBinaryOp(getOpcode()) || Instruction::isCast(getOpcode()))
|
||||
return vputils::onlyFirstLaneUsed(this);
|
||||
|
||||
switch (getOpcode()) {
|
||||
@ -1070,6 +1074,35 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent,
|
||||
}
|
||||
#endif
|
||||
|
||||
void VPInstructionWithType::execute(VPTransformState &State) {
|
||||
State.setDebugLocFrom(getDebugLoc());
|
||||
assert(vputils::onlyFirstLaneUsed(this) &&
|
||||
"Codegen only implemented for first lane.");
|
||||
switch (getOpcode()) {
|
||||
case Instruction::ZExt:
|
||||
case Instruction::Trunc: {
|
||||
Value *Op = State.get(getOperand(0), VPLane(0));
|
||||
Value *Cast = State.Builder.CreateCast(Instruction::CastOps(getOpcode()),
|
||||
Op, ResultTy);
|
||||
State.set(this, Cast, VPLane(0));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
llvm_unreachable("opcode not implemented yet");
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void VPInstructionWithType::print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const {
|
||||
O << Indent << "EMIT ";
|
||||
printAsOperand(O, SlotTracker);
|
||||
O << " = " << Instruction::getOpcodeName(getOpcode()) << " ";
|
||||
printOperands(O, SlotTracker);
|
||||
O << " to " << *ResultTy;
|
||||
}
|
||||
#endif
|
||||
|
||||
VPIRInstruction *VPIRInstruction ::create(Instruction &I) {
|
||||
if (auto *Phi = dyn_cast<PHINode>(&I))
|
||||
return new VPIRPhi(*Phi);
|
||||
@ -2551,37 +2584,6 @@ void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent,
|
||||
}
|
||||
#endif
|
||||
|
||||
Value *VPScalarCastRecipe ::generate(VPTransformState &State) {
|
||||
assert(vputils::onlyFirstLaneUsed(this) &&
|
||||
"Codegen only implemented for first lane.");
|
||||
switch (Opcode) {
|
||||
case Instruction::SExt:
|
||||
case Instruction::ZExt:
|
||||
case Instruction::Trunc: {
|
||||
// Note: SExt/ZExt not used yet.
|
||||
Value *Op = State.get(getOperand(0), VPLane(0));
|
||||
return State.Builder.CreateCast(Instruction::CastOps(Opcode), Op, ResultTy);
|
||||
}
|
||||
default:
|
||||
llvm_unreachable("opcode not implemented yet");
|
||||
}
|
||||
}
|
||||
|
||||
void VPScalarCastRecipe ::execute(VPTransformState &State) {
|
||||
State.set(this, generate(State), VPLane(0));
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const {
|
||||
O << Indent << "SCALAR-CAST ";
|
||||
printAsOperand(O, SlotTracker);
|
||||
O << " = " << Instruction::getOpcodeName(Opcode) << " ";
|
||||
printOperands(O, SlotTracker);
|
||||
O << " to " << *ResultTy;
|
||||
}
|
||||
#endif
|
||||
|
||||
void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
|
||||
assert(State.Lane && "Branch on Mask works only on single instance.");
|
||||
|
||||
|
@ -111,7 +111,11 @@ bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) {
|
||||
(isa<LoadInst, StoreInst>(R->getUnderlyingValue())) &&
|
||||
all_of(R->operands(), isUniformAcrossVFsAndUFs);
|
||||
})
|
||||
.Case<VPScalarCastRecipe, VPWidenCastRecipe>([](const auto *R) {
|
||||
.Case<VPInstruction>([](const auto *VPI) {
|
||||
return VPI->isScalarCast() &&
|
||||
isUniformAcrossVFsAndUFs(VPI->getOperand(0));
|
||||
})
|
||||
.Case<VPWidenCastRecipe>([](const auto *R) {
|
||||
// A cast is uniform according to its operand.
|
||||
return isUniformAcrossVFsAndUFs(R->getOperand(0));
|
||||
})
|
||||
|
@ -45,8 +45,7 @@ inline bool isUniformAfterVectorization(const VPValue *VPV) {
|
||||
return true;
|
||||
if (auto *Rep = dyn_cast<VPReplicateRecipe>(VPV))
|
||||
return Rep->isUniform();
|
||||
if (isa<VPWidenGEPRecipe, VPDerivedIVRecipe, VPScalarCastRecipe,
|
||||
VPBlendRecipe>(VPV))
|
||||
if (isa<VPWidenGEPRecipe, VPDerivedIVRecipe, VPBlendRecipe>(VPV))
|
||||
return all_of(VPV->getDefiningRecipe()->operands(),
|
||||
isUniformAfterVectorization);
|
||||
if (auto *VPI = dyn_cast<VPInstruction>(VPV))
|
||||
|
@ -335,7 +335,6 @@ public:
|
||||
VPReductionSC,
|
||||
VPPartialReductionSC,
|
||||
VPReplicateSC,
|
||||
VPScalarCastSC,
|
||||
VPScalarIVStepsSC,
|
||||
VPVectorPointerSC,
|
||||
VPVectorEndPointerSC,
|
||||
|
@ -147,8 +147,8 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
|
||||
[&](const VPRecipeBase *S) { return VerifyEVLUse(*S, 2); })
|
||||
.Case<VPWidenLoadEVLRecipe, VPVectorEndPointerRecipe>(
|
||||
[&](const VPRecipeBase *R) { return VerifyEVLUse(*R, 1); })
|
||||
.Case<VPScalarCastRecipe>(
|
||||
[&](const VPScalarCastRecipe *S) { return VerifyEVLUse(*S, 0); })
|
||||
.Case<VPInstructionWithType>(
|
||||
[&](const VPInstructionWithType *S) { return VerifyEVLUse(*S, 0); })
|
||||
.Case<VPInstruction>([&](const VPInstruction *I) {
|
||||
if (I->getOpcode() == Instruction::PHI)
|
||||
return VerifyEVLUse(*I, 1);
|
||||
|
@ -35,7 +35,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -92,7 +92,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -149,7 +149,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -206,7 +206,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -260,7 +260,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTLZ]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -312,7 +312,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTTZ]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -366,7 +366,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -422,7 +422,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -476,7 +476,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ABS]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
|
@ -31,7 +31,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SEXT]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -85,7 +85,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ZEXT]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -137,7 +137,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -189,7 +189,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPEXT]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -241,7 +241,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTRUNC]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -293,7 +293,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SITOFP]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -345,7 +345,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[UITOFP]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -397,7 +397,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOSI]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -449,7 +449,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOUI]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -501,7 +501,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[INTTOPTR]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
|
@ -19,7 +19,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
|
||||
; IF-EVL-NEXT: Successor(s): vector.ph
|
||||
; IF-EVL-EMPTY:
|
||||
; IF-EVL: vector.ph:
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[VF32:%[0-9]+]]> = trunc vp<[[VF]]> to i32
|
||||
; IF-EVL-NEXT: EMIT vp<[[VF32:%[0-9]+]]> = trunc vp<[[VF]]> to i32
|
||||
; IF-EVL-NEXT: Successor(s): vector loop
|
||||
; IF-EVL-EMPTY:
|
||||
; IF-EVL: <x1> vector loop: {
|
||||
@ -39,7 +39,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds nuw ir<%B>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ADD]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
|
@ -52,7 +52,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
|
||||
; IF-EVL-OUTLOOP-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]>
|
||||
; IF-EVL-OUTLOOP-NEXT: WIDEN ir<[[ADD:%.+]]> = add ir<[[LD1]]>, ir<[[RDX_PHI]]>
|
||||
; IF-EVL-OUTLOOP-NEXT: WIDEN-INTRINSIC vp<[[RDX_SELECT]]> = call llvm.vp.merge(ir<true>, ir<[[ADD]]>, ir<[[RDX_PHI]]>, vp<[[EVL]]>)
|
||||
; IF-EVL-OUTLOOP-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-OUTLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
@ -92,7 +92,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
|
||||
; IF-EVL-INLOOP-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]>
|
||||
; IF-EVL-INLOOP-NEXT: WIDEN ir<[[LD1:%.+]]> = vp.load vp<[[PTR1]]>, vp<[[EVL]]>
|
||||
; IF-EVL-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + vp.reduce.add (ir<[[LD1]]>, vp<[[EVL]]>)
|
||||
; IF-EVL-INLOOP-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-INLOOP-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
|
@ -40,7 +40,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
|
||||
|
@ -49,7 +49,7 @@
|
||||
; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: vp<[[PTR3:%.+]]> = vector-pointer ir<[[GEP3]]>
|
||||
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
|
||||
; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEX]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
|
||||
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT]]> = add vp<[[IV]]>, ir<[[VFUF]]>
|
||||
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, ir<[[VTC]]>
|
||||
|
@ -202,14 +202,14 @@ exit:
|
||||
; DBG-NEXT: Successor(s): vector.ph
|
||||
; DBG-EMPTY:
|
||||
; DBG-NEXT: vector.ph:
|
||||
; DBG-NEXT: SCALAR-CAST vp<[[CAST:%.+]]> = trunc ir<1> to i32
|
||||
; DBG-NEXT: EMIT vp<[[CAST:%.+]]> = trunc ir<1> to i32
|
||||
; DBG-NEXT: Successor(s): vector loop
|
||||
; DBG-EMPTY:
|
||||
; DBG-NEXT: <x1> vector loop: {
|
||||
; DBG-NEXT: vector.body:
|
||||
; DBG-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
|
||||
; DBG-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<[[SCALAR_STEPS:.+]]>
|
||||
; DBG-NEXT: SCALAR-CAST vp<[[TRUNC_IV:%.+]]> = trunc vp<[[CAN_IV]]> to i32
|
||||
; DBG-NEXT: EMIT vp<[[TRUNC_IV:%.+]]> = trunc vp<[[CAN_IV]]> to i32
|
||||
; DBG-NEXT: vp<[[SCALAR_STEPS]]> = SCALAR-STEPS vp<[[TRUNC_IV]]>, vp<[[CAST]]>, vp<[[VF]]
|
||||
; DBG-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%for>, vp<[[SCALAR_STEPS]]>
|
||||
; DBG-NEXT: CLONE store vp<[[SPLICE]]>, ir<%dst>
|
||||
|
Loading…
x
Reference in New Issue
Block a user