mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-16 09:16:31 +00:00
[VPlan] Propagate all GEP flags (#119899)
Store GEPNoWrapFlags instead of only InBounds and propagate them.
This commit is contained in:
parent
34c4f6f937
commit
1157187496
@ -222,13 +222,13 @@ public:
|
||||
|
||||
VPInstruction *createPtrAdd(VPValue *Ptr, VPValue *Offset, DebugLoc DL = {},
|
||||
const Twine &Name = "") {
|
||||
return tryInsertInstruction(new VPInstruction(
|
||||
Ptr, Offset, VPRecipeWithIRFlags::GEPFlagsTy(false), DL, Name));
|
||||
return tryInsertInstruction(
|
||||
new VPInstruction(Ptr, Offset, GEPNoWrapFlags::none(), DL, Name));
|
||||
}
|
||||
VPValue *createInBoundsPtrAdd(VPValue *Ptr, VPValue *Offset, DebugLoc DL = {},
|
||||
const Twine &Name = "") {
|
||||
return tryInsertInstruction(new VPInstruction(
|
||||
Ptr, Offset, VPRecipeWithIRFlags::GEPFlagsTy(true), DL, Name));
|
||||
return tryInsertInstruction(
|
||||
new VPInstruction(Ptr, Offset, GEPNoWrapFlags::inBounds(), DL, Name));
|
||||
}
|
||||
|
||||
VPDerivedIVRecipe *createDerivedIV(InductionDescriptor::InductionKind Kind,
|
||||
|
@ -8406,10 +8406,13 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
|
||||
if (Reverse)
|
||||
VectorPtr = new VPReverseVectorPointerRecipe(
|
||||
Ptr, &Plan.getVF(), getLoadStoreType(I),
|
||||
GEP ? GEP->isInBounds() : false, I->getDebugLoc());
|
||||
GEP && GEP->isInBounds() ? GEPNoWrapFlags::inBounds()
|
||||
: GEPNoWrapFlags::none(),
|
||||
I->getDebugLoc());
|
||||
else
|
||||
VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
|
||||
GEP ? GEP->isInBounds() : false,
|
||||
GEP ? GEP->getNoWrapFlags()
|
||||
: GEPNoWrapFlags::none(),
|
||||
I->getDebugLoc());
|
||||
Builder.getInsertBlock()->appendRecipe(VectorPtr);
|
||||
Ptr = VectorPtr;
|
||||
|
@ -952,11 +952,6 @@ public:
|
||||
DisjointFlagsTy(bool IsDisjoint) : IsDisjoint(IsDisjoint) {}
|
||||
};
|
||||
|
||||
struct GEPFlagsTy {
|
||||
char IsInBounds : 1;
|
||||
GEPFlagsTy(bool IsInBounds) : IsInBounds(IsInBounds) {}
|
||||
};
|
||||
|
||||
private:
|
||||
struct ExactFlagsTy {
|
||||
char IsExact : 1;
|
||||
@ -983,7 +978,7 @@ private:
|
||||
WrapFlagsTy WrapFlags;
|
||||
DisjointFlagsTy DisjointFlags;
|
||||
ExactFlagsTy ExactFlags;
|
||||
GEPFlagsTy GEPFlags;
|
||||
GEPNoWrapFlags GEPFlags;
|
||||
NonNegFlagsTy NonNegFlags;
|
||||
FastMathFlagsTy FMFs;
|
||||
unsigned AllFlags;
|
||||
@ -1020,7 +1015,7 @@ public:
|
||||
ExactFlags.IsExact = Op->isExact();
|
||||
} else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
|
||||
OpType = OperationType::GEPOp;
|
||||
GEPFlags.IsInBounds = GEP->isInBounds();
|
||||
GEPFlags = GEP->getNoWrapFlags();
|
||||
} else if (auto *PNNI = dyn_cast<PossiblyNonNegInst>(&I)) {
|
||||
OpType = OperationType::NonNegOp;
|
||||
NonNegFlags.NonNeg = PNNI->hasNonNeg();
|
||||
@ -1060,7 +1055,7 @@ public:
|
||||
protected:
|
||||
template <typename IterT>
|
||||
VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
|
||||
GEPFlagsTy GEPFlags, DebugLoc DL = {})
|
||||
GEPNoWrapFlags GEPFlags, DebugLoc DL = {})
|
||||
: VPSingleDefRecipe(SC, Operands, DL), OpType(OperationType::GEPOp),
|
||||
GEPFlags(GEPFlags) {}
|
||||
|
||||
@ -1097,7 +1092,7 @@ public:
|
||||
ExactFlags.IsExact = false;
|
||||
break;
|
||||
case OperationType::GEPOp:
|
||||
GEPFlags.IsInBounds = false;
|
||||
GEPFlags = GEPNoWrapFlags::none();
|
||||
break;
|
||||
case OperationType::FPMathOp:
|
||||
FMFs.NoNaNs = false;
|
||||
@ -1126,10 +1121,7 @@ public:
|
||||
I->setIsExact(ExactFlags.IsExact);
|
||||
break;
|
||||
case OperationType::GEPOp:
|
||||
// TODO(gep_nowrap): Track the full GEPNoWrapFlags in VPlan.
|
||||
cast<GetElementPtrInst>(I)->setNoWrapFlags(
|
||||
GEPFlags.IsInBounds ? GEPNoWrapFlags::inBounds()
|
||||
: GEPNoWrapFlags::none());
|
||||
cast<GetElementPtrInst>(I)->setNoWrapFlags(GEPFlags);
|
||||
break;
|
||||
case OperationType::FPMathOp:
|
||||
I->setHasAllowReassoc(FMFs.AllowReassoc);
|
||||
@ -1155,11 +1147,7 @@ public:
|
||||
return CmpPredicate;
|
||||
}
|
||||
|
||||
bool isInBounds() const {
|
||||
assert(OpType == OperationType::GEPOp &&
|
||||
"recipe doesn't have inbounds flag");
|
||||
return GEPFlags.IsInBounds;
|
||||
}
|
||||
GEPNoWrapFlags getGEPNoWrapFlags() const { return GEPFlags; }
|
||||
|
||||
/// Returns true if the recipe has fast-math flags.
|
||||
bool hasFastMathFlags() const { return OpType == OperationType::FPMathOp; }
|
||||
@ -1306,7 +1294,7 @@ public:
|
||||
assert(Opcode == Instruction::Or && "only OR opcodes can be disjoint");
|
||||
}
|
||||
|
||||
VPInstruction(VPValue *Ptr, VPValue *Offset, GEPFlagsTy Flags,
|
||||
VPInstruction(VPValue *Ptr, VPValue *Offset, GEPNoWrapFlags Flags,
|
||||
DebugLoc DL = {}, const Twine &Name = "")
|
||||
: VPRecipeWithIRFlags(VPDef::VPInstructionSC,
|
||||
ArrayRef<VPValue *>({Ptr, Offset}), Flags, DL),
|
||||
@ -1914,10 +1902,9 @@ class VPReverseVectorPointerRecipe : public VPRecipeWithIRFlags,
|
||||
|
||||
public:
|
||||
VPReverseVectorPointerRecipe(VPValue *Ptr, VPValue *VF, Type *IndexedTy,
|
||||
bool IsInBounds, DebugLoc DL)
|
||||
GEPNoWrapFlags GEPFlags, DebugLoc DL)
|
||||
: VPRecipeWithIRFlags(VPDef::VPReverseVectorPointerSC,
|
||||
ArrayRef<VPValue *>({Ptr, VF}),
|
||||
GEPFlagsTy(IsInBounds), DL),
|
||||
ArrayRef<VPValue *>({Ptr, VF}), GEPFlags, DL),
|
||||
IndexedTy(IndexedTy) {}
|
||||
|
||||
VP_CLASSOF_IMPL(VPDef::VPReverseVectorPointerSC)
|
||||
@ -1949,8 +1936,9 @@ public:
|
||||
}
|
||||
|
||||
VPReverseVectorPointerRecipe *clone() override {
|
||||
return new VPReverseVectorPointerRecipe(
|
||||
getOperand(0), getVFValue(), IndexedTy, isInBounds(), getDebugLoc());
|
||||
return new VPReverseVectorPointerRecipe(getOperand(0), getVFValue(),
|
||||
IndexedTy, getGEPNoWrapFlags(),
|
||||
getDebugLoc());
|
||||
}
|
||||
|
||||
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
||||
@ -1966,10 +1954,10 @@ class VPVectorPointerRecipe : public VPRecipeWithIRFlags,
|
||||
Type *IndexedTy;
|
||||
|
||||
public:
|
||||
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, bool IsInBounds,
|
||||
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, GEPNoWrapFlags GEPFlags,
|
||||
DebugLoc DL)
|
||||
: VPRecipeWithIRFlags(VPDef::VPVectorPointerSC, ArrayRef<VPValue *>(Ptr),
|
||||
GEPFlagsTy(IsInBounds), DL),
|
||||
GEPFlags, DL),
|
||||
IndexedTy(IndexedTy) {}
|
||||
|
||||
VP_CLASSOF_IMPL(VPDef::VPVectorPointerSC)
|
||||
@ -1991,8 +1979,8 @@ public:
|
||||
}
|
||||
|
||||
VPVectorPointerRecipe *clone() override {
|
||||
return new VPVectorPointerRecipe(getOperand(0), IndexedTy, isInBounds(),
|
||||
getDebugLoc());
|
||||
return new VPVectorPointerRecipe(getOperand(0), IndexedTy,
|
||||
getGEPNoWrapFlags(), getDebugLoc());
|
||||
}
|
||||
|
||||
/// Return the cost of this VPHeaderPHIRecipe.
|
||||
|
@ -621,8 +621,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
|
||||
"can only generate first lane for PtrAdd");
|
||||
Value *Ptr = State.get(getOperand(0), VPLane(0));
|
||||
Value *Addend = State.get(getOperand(1), VPLane(0));
|
||||
return isInBounds() ? Builder.CreateInBoundsPtrAdd(Ptr, Addend, Name)
|
||||
: Builder.CreatePtrAdd(Ptr, Addend, Name);
|
||||
return Builder.CreatePtrAdd(Ptr, Addend, Name, getGEPNoWrapFlags());
|
||||
}
|
||||
case VPInstruction::ResumePhi: {
|
||||
Value *IncomingFromVPlanPred =
|
||||
@ -1276,8 +1275,12 @@ void VPRecipeWithIRFlags::printFlags(raw_ostream &O) const {
|
||||
getFastMathFlags().print(O);
|
||||
break;
|
||||
case OperationType::GEPOp:
|
||||
if (GEPFlags.IsInBounds)
|
||||
if (GEPFlags.isInBounds())
|
||||
O << " inbounds";
|
||||
else if (GEPFlags.hasNoUnsignedSignedWrap())
|
||||
O << " nusw";
|
||||
if (GEPFlags.hasNoUnsignedWrap())
|
||||
O << " nuw";
|
||||
break;
|
||||
case OperationType::NonNegOp:
|
||||
if (NonNegFlags.NonNeg)
|
||||
@ -1906,9 +1909,9 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
|
||||
for (unsigned I = 0, E = getNumOperands(); I != E; I++)
|
||||
Ops.push_back(State.get(getOperand(I), VPLane(0)));
|
||||
|
||||
auto *NewGEP =
|
||||
State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
|
||||
ArrayRef(Ops).drop_front(), "", isInBounds());
|
||||
auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
|
||||
ArrayRef(Ops).drop_front(), "",
|
||||
getGEPNoWrapFlags());
|
||||
Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
|
||||
State.set(this, Splat);
|
||||
State.addMetadata(Splat, GEP);
|
||||
@ -1934,7 +1937,7 @@ void VPWidenGEPRecipe::execute(VPTransformState &State) {
|
||||
// Create the new GEP. Note that this GEP may be a scalar if VF == 1,
|
||||
// but it should be a vector, otherwise.
|
||||
auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
|
||||
Indices, "", isInBounds());
|
||||
Indices, "", getGEPNoWrapFlags());
|
||||
assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
|
||||
"NewGEP is not a pointer vector");
|
||||
State.set(this, NewGEP);
|
||||
@ -1985,9 +1988,10 @@ void VPReverseVectorPointerRecipe::execute(VPTransformState &State) {
|
||||
// LastLane = 1 - RunTimeVF
|
||||
Value *LastLane = Builder.CreateSub(ConstantInt::get(IndexTy, 1), RunTimeVF);
|
||||
Value *Ptr = State.get(getOperand(0), VPLane(0));
|
||||
bool InBounds = isInBounds();
|
||||
Value *ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", InBounds);
|
||||
ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "", InBounds);
|
||||
Value *ResultPtr =
|
||||
Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", getGEPNoWrapFlags());
|
||||
ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "",
|
||||
getGEPNoWrapFlags());
|
||||
|
||||
State.set(this, ResultPtr, /*IsScalar*/ true);
|
||||
}
|
||||
@ -1997,9 +2001,9 @@ void VPReverseVectorPointerRecipe::print(raw_ostream &O, const Twine &Indent,
|
||||
VPSlotTracker &SlotTracker) const {
|
||||
O << Indent;
|
||||
printAsOperand(O, SlotTracker);
|
||||
O << " = reverse-vector-pointer ";
|
||||
if (isInBounds())
|
||||
O << "inbounds ";
|
||||
O << " = reverse-vector-pointer";
|
||||
printFlags(O);
|
||||
O << " ";
|
||||
printOperands(O, SlotTracker);
|
||||
}
|
||||
#endif
|
||||
@ -2011,10 +2015,10 @@ void VPVectorPointerRecipe::execute(VPTransformState &State) {
|
||||
Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
|
||||
CurrentPart, Builder);
|
||||
Value *Ptr = State.get(getOperand(0), VPLane(0));
|
||||
bool InBounds = isInBounds();
|
||||
|
||||
Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
|
||||
Value *ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
|
||||
Value *ResultPtr =
|
||||
Builder.CreateGEP(IndexedTy, Ptr, Increment, "", getGEPNoWrapFlags());
|
||||
|
||||
State.set(this, ResultPtr, /*IsScalar*/ true);
|
||||
}
|
||||
|
@ -81,8 +81,8 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
|
||||
; CHECK-VS1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-VS1-NEXT: [[TMP20:%.*]] = add i64 [[TMP0]], [[INDEX]]
|
||||
; CHECK-VS1-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0
|
||||
; CHECK-VS1-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP21]]
|
||||
; CHECK-VS1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i32 0
|
||||
; CHECK-VS1-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[TMP21]]
|
||||
; CHECK-VS1-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP22]], i32 0
|
||||
; CHECK-VS1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP23]], align 1
|
||||
; CHECK-VS1-NEXT: [[TMP24:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
|
||||
; CHECK-VS1-NEXT: store <vscale x 16 x i8> [[TMP24]], ptr [[TMP23]], align 1
|
||||
@ -115,8 +115,8 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
|
||||
; CHECK-VS1-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; CHECK-VS1-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX5]]
|
||||
; CHECK-VS1-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 0
|
||||
; CHECK-VS1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP32]]
|
||||
; CHECK-VS1-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i32 0
|
||||
; CHECK-VS1-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[TMP32]]
|
||||
; CHECK-VS1-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP33]], i32 0
|
||||
; CHECK-VS1-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 8 x i8>, ptr [[TMP34]], align 1
|
||||
; CHECK-VS1-NEXT: [[TMP35:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]]
|
||||
; CHECK-VS1-NEXT: store <vscale x 8 x i8> [[TMP35]], ptr [[TMP34]], align 1
|
||||
@ -189,8 +189,8 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
|
||||
; CHECK-VS2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-VS2-NEXT: [[TMP20:%.*]] = add i64 [[TMP0]], [[INDEX]]
|
||||
; CHECK-VS2-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0
|
||||
; CHECK-VS2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP21]]
|
||||
; CHECK-VS2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i32 0
|
||||
; CHECK-VS2-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[TMP21]]
|
||||
; CHECK-VS2-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP22]], i32 0
|
||||
; CHECK-VS2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP23]], align 1
|
||||
; CHECK-VS2-NEXT: [[TMP24:%.*]] = add <vscale x 8 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
|
||||
; CHECK-VS2-NEXT: store <vscale x 8 x i8> [[TMP24]], ptr [[TMP23]], align 1
|
||||
@ -223,8 +223,8 @@ define void @low_vf_ic_is_better(ptr nocapture noundef %p, i32 %tc, i16 noundef
|
||||
; CHECK-VS2-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT9:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; CHECK-VS2-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX5]]
|
||||
; CHECK-VS2-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 0
|
||||
; CHECK-VS2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP32]]
|
||||
; CHECK-VS2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i32 0
|
||||
; CHECK-VS2-NEXT: [[TMP33:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[TMP32]]
|
||||
; CHECK-VS2-NEXT: [[TMP34:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP33]], i32 0
|
||||
; CHECK-VS2-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 4 x i8>, ptr [[TMP34]], align 1
|
||||
; CHECK-VS2-NEXT: [[TMP35:%.*]] = add <vscale x 4 x i8> [[WIDE_LOAD6]], [[BROADCAST_SPLAT8]]
|
||||
; CHECK-VS2-NEXT: store <vscale x 4 x i8> [[TMP35]], ptr [[TMP34]], align 1
|
||||
@ -279,7 +279,7 @@ while.end:
|
||||
|
||||
define void @trip_count_too_small(ptr nocapture noundef %p, i32 noundef %tc, i16 noundef %val) {
|
||||
; CHECK-LABEL: define void @trip_count_too_small(
|
||||
; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[TC:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-SAME: ptr nocapture noundef [[P:%.*]], i32 noundef [[TC:%.*]], i16 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: [[ENTRY:.*:]]
|
||||
; CHECK-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TC]], 3
|
||||
; CHECK-NEXT: br i1 [[CMP7]], label %[[WHILE_PREHEADER:.*]], label %[[WHILE_END:.*]]
|
||||
@ -440,8 +440,8 @@ define void @overflow_indvar_known_false(ptr nocapture noundef %p, i32 noundef %
|
||||
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 16 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP0]], [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 0
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[V]], i64 [[TMP12]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i32 0
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw i8, ptr [[V]], i64 [[TMP12]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i32 0
|
||||
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
|
||||
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]]
|
||||
; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP15]], ptr [[TMP14]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
|
||||
|
@ -14,36 +14,36 @@ target triple = "aarch64-unknown-linux-gnu"
|
||||
define void @load_ext_trunc_store(ptr readonly %in, ptr noalias %out, i64 %N) {
|
||||
; CHECK-LABEL: define void @load_ext_trunc_store(
|
||||
; CHECK-SAME: ptr readonly [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
|
||||
; CHECK-NEXT: [[ENTRY:.*]]:
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
|
||||
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4
|
||||
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
||||
; CHECK: [[VECTOR_PH]]:
|
||||
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
||||
; CHECK: vector.ph:
|
||||
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 4
|
||||
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
|
||||
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
||||
; CHECK: [[VECTOR_BODY]]:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[TMP0]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[TMP2]], i32 0
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw double, ptr [[IN]], i64 [[TMP0]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw double, ptr [[TMP2]], i32 0
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP4]], align 8
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = fpext <4 x double> [[WIDE_LOAD]] to <4 x fp128>
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]]
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[OUT]], i64 [[TMP0]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <4 x fp128> [[TMP3]] to <4 x float>
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 0
|
||||
; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP12]], align 4
|
||||
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
||||
; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
||||
; CHECK: [[MIDDLE_BLOCK]]:
|
||||
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
||||
; CHECK: middle.block:
|
||||
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX]], [[N_VEC]]
|
||||
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]]
|
||||
; CHECK: [[SCALAR_PH]]:
|
||||
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
||||
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
|
||||
; CHECK: [[FOR_BODY]]:
|
||||
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
|
||||
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
|
||||
; CHECK: scalar.ph:
|
||||
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
|
||||
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
||||
; CHECK: for.body:
|
||||
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
|
||||
; CHECK-NEXT: [[GEP_IN:%.*]] = getelementptr inbounds nuw double, ptr [[IN]], i64 [[IV]]
|
||||
; CHECK-NEXT: [[LOAD_IN:%.*]] = load double, ptr [[GEP_IN]], align 8
|
||||
; CHECK-NEXT: [[LOAD_EXT:%.*]] = fpext double [[LOAD_IN]] to fp128
|
||||
@ -52,8 +52,8 @@ define void @load_ext_trunc_store(ptr readonly %in, ptr noalias %out, i64 %N) {
|
||||
; CHECK-NEXT: store float [[TRUNC_OUT]], ptr [[GEP_OUT]], align 4
|
||||
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
||||
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[IV_NEXT]], [[N]]
|
||||
; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
|
||||
; CHECK: [[FOR_EXIT]]:
|
||||
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
|
||||
; CHECK: for.exit:
|
||||
; CHECK-NEXT: ret void
|
||||
;
|
||||
entry:
|
||||
|
@ -10,7 +10,7 @@ define dso_local double @test(ptr %Arr) {
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds float, ptr [[ARR:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[ARR:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP0]], align 4
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = tail call fast <2 x double> @__sind2(<2 x double> [[TMP1]])
|
||||
|
@ -612,10 +612,10 @@ define void @cost_duplicate_recipe_for_sinking(ptr %A, i64 %N) #2 {
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = shl nsw i64 [[TMP4]], 2
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = shl nsw i64 [[TMP5]], 2
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = shl nsw i64 [[TMP6]], 2
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[TMP7]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP9]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP10]]
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr nusw double, ptr [[A:%.*]], i64 [[TMP7]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr nusw double, ptr [[A]], i64 [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr nusw double, ptr [[A]], i64 [[TMP9]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr nusw double, ptr [[A]], i64 [[TMP10]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x double>, ptr [[TMP11]], align 8
|
||||
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x double> [[WIDE_VEC]], <16 x double> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
; CHECK-NEXT: [[WIDE_VEC1:%.*]] = load <16 x double>, ptr [[TMP12]], align 8
|
||||
@ -788,7 +788,7 @@ define void @cost_duplicate_recipe_for_sinking(ptr %A, i64 %N) #2 {
|
||||
; CHECK-NEXT: [[INDEX40:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL1]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT51:%.*]], [[PRED_STORE_CONTINUE50:%.*]] ]
|
||||
; CHECK-NEXT: [[TMP86:%.*]] = add i64 [[INDEX40]], 0
|
||||
; CHECK-NEXT: [[TMP87:%.*]] = shl nsw i64 [[TMP86]], 2
|
||||
; CHECK-NEXT: [[TMP89:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP87]]
|
||||
; CHECK-NEXT: [[TMP89:%.*]] = getelementptr nusw double, ptr [[A]], i64 [[TMP87]]
|
||||
; CHECK-NEXT: [[WIDE_VEC41:%.*]] = load <16 x double>, ptr [[TMP89]], align 8
|
||||
; CHECK-NEXT: [[STRIDED_VEC42:%.*]] = shufflevector <16 x double> [[WIDE_VEC41]], <16 x double> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
; CHECK-NEXT: [[TMP90:%.*]] = fcmp oeq <4 x double> [[STRIDED_VEC42]], zeroinitializer
|
||||
|
@ -57,6 +57,86 @@ loop.exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Variant with getelementptr nusw.
|
||||
define void @drop_scalar_gep_nusw(ptr noalias nocapture readonly %input,
|
||||
ptr %output) local_unnamed_addr #0 {
|
||||
; CHECK-LABEL: @drop_scalar_gep_nusw(
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, {{.*}} ]
|
||||
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, {{.*}} ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
||||
; CHECK: [[TMP4:%.*]] = icmp eq <4 x i64> [[VEC_IND]], zeroinitializer
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP4]], splat (i1 true)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP0]], 1
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[INPUT:%.*]], i64 [[TMP5]]
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[TMP6]], i32 0
|
||||
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP8]], i32 4, <4 x i1> [[TMP7]], <4 x float> poison), !invariant.load !0
|
||||
entry:
|
||||
br label %loop.header
|
||||
|
||||
loop.header:
|
||||
%iv = phi i64 [ 0, %entry ], [ %iv.inc, %if.end ]
|
||||
%i23 = icmp eq i64 %iv, 0
|
||||
br i1 %i23, label %if.end, label %if.then
|
||||
|
||||
if.then:
|
||||
%i27 = sub nuw nsw i64 %iv, 1
|
||||
%i29 = getelementptr nusw float, ptr %input, i64 %i27
|
||||
%i30 = load float, ptr %i29, align 4, !invariant.load !0
|
||||
br label %if.end
|
||||
|
||||
if.end:
|
||||
%i34 = phi float [ 0.000000e+00, %loop.header ], [ %i30, %if.then ]
|
||||
%i35 = getelementptr nusw float, ptr %output, i64 %iv
|
||||
store float %i34, ptr %i35, align 4
|
||||
%iv.inc = add nuw nsw i64 %iv, 1
|
||||
%exitcond = icmp eq i64 %iv.inc, 4
|
||||
br i1 %exitcond, label %loop.exit, label %loop.header
|
||||
|
||||
loop.exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Variant with getelementptr nuw.
|
||||
define void @drop_scalar_gep_nuw(ptr noalias nocapture readonly %input,
|
||||
ptr %output) local_unnamed_addr #0 {
|
||||
; CHECK-LABEL: @drop_scalar_gep_nuw(
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, {{.*}} ]
|
||||
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, {{.*}} ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
||||
; CHECK: [[TMP4:%.*]] = icmp eq <4 x i64> [[VEC_IND]], zeroinitializer
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP4]], splat (i1 true)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP0]], 1
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr float, ptr [[INPUT:%.*]], i64 [[TMP5]]
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[TMP6]], i32 0
|
||||
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[TMP8]], i32 4, <4 x i1> [[TMP7]], <4 x float> poison), !invariant.load !0
|
||||
entry:
|
||||
br label %loop.header
|
||||
|
||||
loop.header:
|
||||
%iv = phi i64 [ 0, %entry ], [ %iv.inc, %if.end ]
|
||||
%i23 = icmp eq i64 %iv, 0
|
||||
br i1 %i23, label %if.end, label %if.then
|
||||
|
||||
if.then:
|
||||
%i27 = sub nuw nsw i64 %iv, 1
|
||||
%i29 = getelementptr nuw float, ptr %input, i64 %i27
|
||||
%i30 = load float, ptr %i29, align 4, !invariant.load !0
|
||||
br label %if.end
|
||||
|
||||
if.end:
|
||||
%i34 = phi float [ 0.000000e+00, %loop.header ], [ %i30, %if.then ]
|
||||
%i35 = getelementptr nuw float, ptr %output, i64 %iv
|
||||
store float %i34, ptr %i35, align 4
|
||||
%iv.inc = add nuw nsw i64 %iv, 1
|
||||
%exitcond = icmp eq i64 %iv.inc, 4
|
||||
br i1 %exitcond, label %loop.exit, label %loop.header
|
||||
|
||||
loop.exit:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Drop poison-generating flags from 'sub' and 'getelementptr' feeding a masked load.
|
||||
; In this case, 'sub' and 'getelementptr' are not guarded by the predicate.
|
||||
define void @drop_nonpred_scalar_nuw_nsw(ptr noalias nocapture readonly %input,
|
||||
|
@ -38,7 +38,7 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 {
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD:%.*]] = fadd fast <8 x float> [[VEC_IND]], splat (float 4.000000e+00)
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd fast <8 x float> [[VEC_IND]], splat (float 8.000000e+00)
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd fast <8 x float> [[VEC_IND]], splat (float 1.200000e+01)
|
||||
; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
|
||||
; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]]
|
||||
; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 32
|
||||
; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 64
|
||||
; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 96
|
||||
@ -74,7 +74,7 @@ define void @fp_iv_loop1(ptr noalias nocapture %A, i32 %N) #0 {
|
||||
; AUTO_VEC: vec.epilog.vector.body:
|
||||
; AUTO_VEC-NEXT: [[INDEX10:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT13:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; AUTO_VEC-NEXT: [[VEC_IND11:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT12:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; AUTO_VEC-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX10]]
|
||||
; AUTO_VEC-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX10]]
|
||||
; AUTO_VEC-NEXT: store <4 x float> [[VEC_IND11]], ptr [[TMP8]], align 4
|
||||
; AUTO_VEC-NEXT: [[INDEX_NEXT13]] = add nuw i64 [[INDEX10]], 4
|
||||
; AUTO_VEC-NEXT: [[VEC_IND_NEXT12]] = fadd fast <4 x float> [[VEC_IND11]], splat (float 2.000000e+00)
|
||||
@ -410,7 +410,7 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) {
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD:%.*]] = fadd reassoc <8 x float> [[VEC_IND]], splat (float 3.360000e+02)
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD2:%.*]] = fadd reassoc <8 x float> [[STEP_ADD]], splat (float 3.360000e+02)
|
||||
; AUTO_VEC-NEXT: [[STEP_ADD3:%.*]] = fadd reassoc <8 x float> [[STEP_ADD2]], splat (float 3.360000e+02)
|
||||
; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[P:%.*]], i64 [[INDEX]]
|
||||
; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[P:%.*]], i64 [[INDEX]]
|
||||
; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 32
|
||||
; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 64
|
||||
; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 96
|
||||
@ -454,7 +454,7 @@ define void @fadd_reassoc_FMF(ptr nocapture %p, i32 %N) {
|
||||
; AUTO_VEC: vec.epilog.vector.body:
|
||||
; AUTO_VEC-NEXT: [[INDEX13:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT17:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; AUTO_VEC-NEXT: [[VEC_IND14:%.*]] = phi <4 x float> [ [[INDUCTION]], [[VEC_EPILOG_PH]] ], [ [[VEC_IND_NEXT15:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; AUTO_VEC-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[P]], i64 [[INDEX13]]
|
||||
; AUTO_VEC-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw float, ptr [[P]], i64 [[INDEX13]]
|
||||
; AUTO_VEC-NEXT: [[WIDE_LOAD16:%.*]] = load <4 x float>, ptr [[TMP13]], align 4
|
||||
; AUTO_VEC-NEXT: [[TMP14:%.*]] = fadd reassoc <4 x float> [[VEC_IND14]], [[WIDE_LOAD16]]
|
||||
; AUTO_VEC-NEXT: store <4 x float> [[TMP14]], ptr [[TMP13]], align 4
|
||||
|
@ -479,7 +479,7 @@ define void @geps_feeding_interleave_groups_with_reuse2(ptr %A, ptr %B, i64 %N)
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
|
||||
; CHECK-NEXT: [[TMP50:%.*]] = add i64 [[OFFSET_IDX]], 0
|
||||
; CHECK-NEXT: [[TMP51:%.*]] = lshr exact i64 [[TMP50]], 1
|
||||
; CHECK-NEXT: [[TMP52:%.*]] = getelementptr i32, ptr [[B]], i64 [[TMP51]]
|
||||
; CHECK-NEXT: [[TMP52:%.*]] = getelementptr nusw i32, ptr [[B]], i64 [[TMP51]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP52]], align 4
|
||||
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
; CHECK-NEXT: [[STRIDED_VEC34:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
|
||||
|
@ -1186,16 +1186,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
|
||||
; O1VEC2: vector.body:
|
||||
; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; O1VEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
||||
; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]]
|
||||
; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
|
||||
; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4
|
||||
; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP0]]
|
||||
; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 0
|
||||
; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4
|
||||
; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
|
||||
; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
|
||||
; O1VEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
|
||||
; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
|
||||
; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
|
||||
; O1VEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0
|
||||
; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 4
|
||||
; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[TMP0]]
|
||||
; O1VEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 0
|
||||
; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4
|
||||
; O1VEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP7]], align 4
|
||||
; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4
|
||||
; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
|
||||
@ -1230,16 +1230,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b
|
||||
; OzVEC2: vector.body:
|
||||
; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; OzVEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
||||
; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]]
|
||||
; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
|
||||
; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 4
|
||||
; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i32, ptr [[B:%.*]], i64 [[TMP0]]
|
||||
; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 0
|
||||
; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i32 4
|
||||
; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
|
||||
; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
|
||||
; OzVEC2-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
|
||||
; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
|
||||
; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]]
|
||||
; OzVEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0
|
||||
; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 4
|
||||
; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i32, ptr [[A:%.*]], i64 [[TMP0]]
|
||||
; OzVEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 0
|
||||
; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP6]], i32 4
|
||||
; OzVEC2-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP7]], align 4
|
||||
; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4
|
||||
; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
|
||||
|
@ -32,11 +32,11 @@ define void @gep_for_first_member_does_not_dominate_insert_point(ptr %str, ptr n
|
||||
; CHECK-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
|
||||
; CHECK: [[SCALAR_PH]]:
|
||||
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
||||
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 200, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
||||
; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ 200, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
|
||||
; CHECK-NEXT: br label %[[LOOP:.*]]
|
||||
; CHECK: [[LOOP]]:
|
||||
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
|
||||
; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ]
|
||||
; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[IV2_NEXT:%.*]], %[[LOOP]] ]
|
||||
; CHECK-NEXT: [[OR_1:%.*]] = or disjoint i64 [[IV2]], 1
|
||||
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[STR]], i64 [[OR_1]]
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[GEP1]], align 1
|
||||
@ -91,7 +91,7 @@ define void @test_ig_insert_pos_at_end_of_vpbb(ptr noalias %dst, ptr noalias %sr
|
||||
; CHECK: [[VECTOR_BODY]]:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr { i16, i16, i16, i16 }, ptr [[SRC]], i64 [[TMP3]], i32 2
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr nusw { i16, i16, i16, i16 }, ptr [[SRC]], i64 [[TMP3]], i32 2
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 -4
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i16>, ptr [[TMP5]], align 2
|
||||
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
|
@ -25,7 +25,7 @@ define i32 @read_only_loop_with_runtime_check(ptr noundef %array, i32 noundef %c
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI11:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[ARRAY]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[ARRAY]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
|
||||
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
|
||||
|
@ -48,7 +48,7 @@ define void @loop(ptr %X, ptr %Y) {
|
||||
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8
|
||||
; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x double>, ptr [[TMP2]], align 8
|
||||
@ -60,7 +60,7 @@ define void @loop(ptr %X, ptr %Y) {
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP6]], <2 x double> splat (double 6.000000e+00), <2 x double> [[WIDE_LOAD8]]
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP3]], <2 x double> zeroinitializer, <2 x double> [[TMP7]]
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = select <2 x i1> [[TMP4]], <2 x double> zeroinitializer, <2 x double> [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP11]], i64 16
|
||||
; CHECK-NEXT: store <2 x double> [[TMP9]], ptr [[TMP11]], align 8
|
||||
; CHECK-NEXT: store <2 x double> [[TMP10]], ptr [[TMP12]], align 8
|
||||
@ -148,13 +148,13 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i32, ptr [[C]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META4:![0-9]+]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META4]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], splat (i32 20)
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD7]], splat (i32 20)
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope [[META7:![0-9]+]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope [[META7]]
|
||||
|
@ -17,9 +17,9 @@ define void @add4(ptr noalias noundef %x, ptr noalias noundef %y, i32 noundef %n
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: store <32 x i16> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 2
|
||||
@ -137,9 +137,9 @@ define void @addsubs(ptr noalias noundef %x, ptr noundef %y, i32 noundef %n) {
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = sub <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
|
||||
@ -263,9 +263,9 @@ define void @add2sub2(ptr noalias noundef %x, ptr noundef %y, i32 noundef %n) {
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC24:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = add <32 x i16> [[WIDE_VEC24]], [[WIDE_VEC]]
|
||||
@ -389,11 +389,11 @@ define void @addmul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z, i32
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP2]], align 2
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = add <32 x i16> [[TMP4]], [[WIDE_VEC36]]
|
||||
@ -545,12 +545,12 @@ define void @addsubsmul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z,
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP3]], align 2
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = add <32 x i16> [[TMP2]], [[WIDE_VEC36]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
|
||||
@ -710,12 +710,12 @@ define void @add2sub2mul(ptr noalias noundef %x, ptr noundef %y, ptr noundef %z,
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i16, ptr [[Y:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i16>, ptr [[TMP0]], align 2
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i16, ptr [[Z:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC31:%.*]] = load <32 x i16>, ptr [[TMP1]], align 2
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i16, ptr [[X:%.*]], i64 [[OFFSET_IDX]]
|
||||
; CHECK-NEXT: [[WIDE_VEC36:%.*]] = load <32 x i16>, ptr [[TMP3]], align 2
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = add <32 x i16> [[TMP2]], [[WIDE_VEC36]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = mul <32 x i16> [[WIDE_VEC31]], [[WIDE_VEC]]
|
||||
|
@ -123,7 +123,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
|
||||
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP12]], i64 1
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
|
||||
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[A]], i64 0, i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP17]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP17]], align 8, !alias.scope [[META0:![0-9]+]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <2 x double>, ptr [[TMP18]], align 8, !alias.scope [[META0]]
|
||||
@ -132,7 +132,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: [[BROADCAST_SPLAT23:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22]], <2 x double> poison, <2 x i32> zeroinitializer
|
||||
; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[WIDE_LOAD]], [[BROADCAST_SPLAT23]]
|
||||
; CHECK-NEXT: [[TMP21:%.*]] = fmul <2 x double> [[WIDE_LOAD21]], [[BROADCAST_SPLAT23]]
|
||||
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[B]], i64 0, i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP22]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <2 x double>, ptr [[TMP22]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x double>, ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
@ -199,7 +199,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP45]])
|
||||
; CHECK-NEXT: [[TMP46:%.*]] = extractelement <2 x i1> [[TMP42]], i64 1
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP46]])
|
||||
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP33]]
|
||||
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[A]], i64 0, i64 [[TMP33]]
|
||||
; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP47]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr [[TMP47]], align 8, !alias.scope [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD21_1:%.*]] = load <2 x double>, ptr [[TMP48]], align 8, !alias.scope [[META0]]
|
||||
@ -208,7 +208,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: [[BROADCAST_SPLAT23_1:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_1]], <2 x double> poison, <2 x i32> zeroinitializer
|
||||
; CHECK-NEXT: [[TMP50:%.*]] = fmul <2 x double> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT23_1]]
|
||||
; CHECK-NEXT: [[TMP51:%.*]] = fmul <2 x double> [[WIDE_LOAD21_1]], [[BROADCAST_SPLAT23_1]]
|
||||
; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP33]]
|
||||
; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[B]], i64 0, i64 [[TMP33]]
|
||||
; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP52]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD24_1:%.*]] = load <2 x double>, ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD25_1:%.*]] = load <2 x double>, ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
@ -276,7 +276,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP76]])
|
||||
; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i1> [[TMP73]], i64 1
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP77]])
|
||||
; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP64]]
|
||||
; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[A]], i64 0, i64 [[TMP64]]
|
||||
; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP78]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr [[TMP78]], align 8, !alias.scope [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD21_2:%.*]] = load <2 x double>, ptr [[TMP79]], align 8, !alias.scope [[META0]]
|
||||
@ -285,7 +285,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: [[BROADCAST_SPLAT23_2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_2]], <2 x double> poison, <2 x i32> zeroinitializer
|
||||
; CHECK-NEXT: [[TMP81:%.*]] = fmul <2 x double> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT23_2]]
|
||||
; CHECK-NEXT: [[TMP82:%.*]] = fmul <2 x double> [[WIDE_LOAD21_2]], [[BROADCAST_SPLAT23_2]]
|
||||
; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP64]]
|
||||
; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[B]], i64 0, i64 [[TMP64]]
|
||||
; CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP83]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD24_2:%.*]] = load <2 x double>, ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD25_2:%.*]] = load <2 x double>, ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
@ -353,7 +353,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP107]])
|
||||
; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i1> [[TMP104]], i64 1
|
||||
; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP108]])
|
||||
; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP95]]
|
||||
; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[A]], i64 0, i64 [[TMP95]]
|
||||
; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP109]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr [[TMP109]], align 8, !alias.scope [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD21_3:%.*]] = load <2 x double>, ptr [[TMP110]], align 8, !alias.scope [[META0]]
|
||||
@ -362,7 +362,7 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
|
||||
; CHECK-NEXT: [[BROADCAST_SPLAT23_3:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_3]], <2 x double> poison, <2 x i32> zeroinitializer
|
||||
; CHECK-NEXT: [[TMP112:%.*]] = fmul <2 x double> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT23_3]]
|
||||
; CHECK-NEXT: [[TMP113:%.*]] = fmul <2 x double> [[WIDE_LOAD21_3]], [[BROADCAST_SPLAT23_3]]
|
||||
; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP95]]
|
||||
; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds nuw <225 x double>, ptr [[B]], i64 0, i64 [[TMP95]]
|
||||
; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP114]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD24_3:%.*]] = load <2 x double>, ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD25_3:%.*]] = load <2 x double>, ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
|
||||
|
@ -27,7 +27,7 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
|
||||
; CHECK-NEXT: [[VEC_PHI15:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI16:%.*]] = phi <4 x double> [ <double 0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %[[VECTOR_PH]] ], [ [[TMP14:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI17:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
|
||||
; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
|
||||
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX1]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[ARRAYIDX1]], align 4
|
||||
; CHECK-NEXT: [[WIDE_LOAD18:%.*]] = load <4 x float>, ptr [[TMP23]], align 4
|
||||
@ -213,7 +213,7 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
|
||||
; CHECK-NEXT: [[VEC_PHI31:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP23:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI32:%.*]] = phi <4 x double> [ [[TMP27]], %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI33:%.*]] = phi <4 x double> [ splat (double -0.000000e+00), %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[ARRAYIDX_US1:%.*]] = getelementptr inbounds float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
|
||||
; CHECK-NEXT: [[ARRAYIDX_US1:%.*]] = getelementptr inbounds nuw float, ptr [[SAMPLES]], i64 [[INDVARS_IV1]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX_US1]], i64 16
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[ARRAYIDX_US1]], align 4
|
||||
; CHECK-NEXT: [[WIDE_LOAD34:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
|
||||
|
@ -32,7 +32,7 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) {
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[PIN:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[PIN:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
|
||||
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
|
||||
@ -42,7 +42,7 @@ define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) {
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 65793)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = or disjoint <4 x i32> [[TMP4]], splat (i32 -16777216)
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = or disjoint <4 x i32> [[TMP5]], splat (i32 -16777216)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[POUT:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[POUT:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP8]], i64 16
|
||||
; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4
|
||||
; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4
|
||||
|
@ -17,7 +17,7 @@ define void @foo(ptr noalias noundef %0, ptr noalias noundef %1) optsize {
|
||||
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = load <8 x i32>, ptr [[GEP]], align 4
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_GATHER]], splat (i32 5)
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[INDVARS_IV]]
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP1]], i64 [[INDVARS_IV]]
|
||||
; CHECK-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP10]], align 4
|
||||
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], 8
|
||||
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 256
|
||||
|
@ -22,10 +22,10 @@ define void @test(i32 noundef %nface, i32 noundef %ncell, ptr noalias noundef %f
|
||||
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
||||
; CHECK: [[VECTOR_BODY]]:
|
||||
; CHECK-NEXT: [[INDVARS_IV_EPIL:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[FACE_CELL]], i64 [[INDVARS_IV_EPIL]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[ARRAYIDX_EPIL]], align 4, !tbaa [[TBAA0:![0-9]+]], !llvm.access.group [[ACC_GRP4:![0-9]+]]
|
||||
; CHECK-NEXT: [[GEP_EPIL:%.*]] = getelementptr inbounds i32, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_EPIL]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[GEP_EPIL]], align 4, !tbaa [[TBAA0]], !llvm.access.group [[ACC_GRP4]]
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw i32, ptr [[FACE_CELL]], i64 [[INDVARS_IV_EPIL]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4, !tbaa [[TBAA0:![0-9]+]], !llvm.access.group [[ACC_GRP4:![0-9]+]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV_EPIL]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4, !tbaa [[TBAA0]], !llvm.access.group [[ACC_GRP4]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[Y]], <4 x i64> [[TMP3]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD12]] to <4 x i64>
|
||||
|
@ -23,7 +23,7 @@ define void @vdiv(ptr %a, float %b) #0 {
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4, !tbaa [[TBAA3:![0-9]+]]
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[WIDE_LOAD]], [[TMP0]]
|
||||
; CHECK-NEXT: store <4 x float> [[TMP3]], ptr [[TMP1]], align 4, !tbaa [[TBAA3]]
|
||||
|
@ -39,7 +39,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
|
||||
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
||||
; CHECK: vector.body:
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH1]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 32
|
||||
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 64
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 96
|
||||
@ -51,7 +51,7 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
|
||||
; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x double> [[WIDE_LOAD6]], [[TMP2]]
|
||||
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <4 x double> [[WIDE_LOAD7]], [[TMP3]]
|
||||
; CHECK-NEXT: [[TMP12:%.*]] = fmul fast <4 x double> [[WIDE_LOAD8]], [[TMP4]]
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 32
|
||||
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 64
|
||||
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP13]], i64 96
|
||||
@ -78,10 +78,10 @@ define void @vdiv(ptr %x, ptr %y, double %a, i32 %N) #0 {
|
||||
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
|
||||
; CHECK: vec.epilog.vector.body:
|
||||
; CHECK-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT16:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 [[INDEX12]]
|
||||
; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds nuw double, ptr [[Y]], i64 [[INDEX12]]
|
||||
; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x double>, ptr [[TMP39]], align 8, !tbaa [[TBAA3]]
|
||||
; CHECK-NEXT: [[TMP40:%.*]] = fmul fast <4 x double> [[WIDE_LOAD13]], [[TMP38]]
|
||||
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[INDEX12]]
|
||||
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds nuw double, ptr [[X]], i64 [[INDEX12]]
|
||||
; CHECK-NEXT: store <4 x double> [[TMP40]], ptr [[TMP41]], align 8, !tbaa [[TBAA3]]
|
||||
; CHECK-NEXT: [[INDEX_NEXT16]] = add nuw i64 [[INDEX12]], 4
|
||||
; CHECK-NEXT: [[TMP42:%.*]] = icmp eq i64 [[INDEX_NEXT16]], [[N_VEC11]]
|
||||
|
@ -16,7 +16,7 @@ define i16 @test(ptr %ptr) {
|
||||
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i16> [ zeroinitializer, [[ENTRY]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i16> [ zeroinitializer, [[ENTRY]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i64 [[INDEX]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 8
|
||||
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
|
||||
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1
|
||||
|
Loading…
x
Reference in New Issue
Block a user