[VPlan] Print IR flags for VPRecipeWithIRFlags.

Now that IR flags are modeled as part of VPRecipeWithIRFlags, include
the flags when printing recipes.

Depends on D150027.

Reviewed By: Ayal

Differential Revision: https://reviews.llvm.org/D150029
This commit is contained in:
Florian Hahn 2023-05-23 20:36:15 +01:00
parent 42c83e3270
commit 299f0ff60e
No known key found for this signature in database
GPG Key ID: 9E54DEA47A8F4434
13 changed files with 132 additions and 84 deletions

View File

@ -1078,6 +1078,22 @@ public:
"recipe doesn't have inbounds flag");
return GEPFlags.IsInBounds;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
FastMathFlags getFastMathFlags() const {
FastMathFlags Res;
Res.setAllowReassoc(FMFs.AllowReassoc);
Res.setNoNaNs(FMFs.NoNaNs);
Res.setNoInfs(FMFs.NoInfs);
Res.setNoSignedZeros(FMFs.NoSignedZeros);
Res.setAllowReciprocal(FMFs.AllowReciprocal);
Res.setAllowContract(FMFs.AllowContract);
Res.setApproxFunc(FMFs.ApproxFunc);
return Res;
}
void printFlags(raw_ostream &O) const;
#endif
};
/// VPWidenRecipe is a recipe for producing a copy of vector type its

View File

@ -603,6 +603,33 @@ void VPWidenSelectRecipe::execute(VPTransformState &State) {
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPRecipeWithIRFlags::printFlags(raw_ostream &O) const {
switch (OpType) {
case OperationType::PossiblyExactOp:
if (ExactFlags.IsExact)
O << " exact";
break;
case OperationType::OverflowingBinOp:
if (WrapFlags.HasNUW)
O << " nuw";
if (WrapFlags.HasNSW)
O << " nsw";
break;
case OperationType::FPMathOp:
getFastMathFlags().print(O);
break;
case OperationType::GEPOp:
if (GEPFlags.IsInBounds)
O << " inbounds";
break;
case OperationType::Other:
break;
}
O << " ";
}
#endif
void VPWidenRecipe::execute(VPTransformState &State) {
auto &I = *cast<Instruction>(getUnderlyingValue());
auto &Builder = State.Builder;
@ -699,7 +726,8 @@ void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent,
O << Indent << "WIDEN ";
printAsOperand(O, SlotTracker);
const Instruction *UI = getUnderlyingInstr();
O << " = " << UI->getOpcodeName() << " ";
O << " = " << UI->getOpcodeName();
printFlags(O);
if (auto *Cmp = dyn_cast<CmpInst>(UI))
O << Cmp->getPredicate() << " ";
printOperands(O, SlotTracker);
@ -864,7 +892,8 @@ void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent,
O << " ";
printAsOperand(O, SlotTracker);
O << " = getelementptr ";
O << " = getelementptr";
printFlags(O);
printOperands(O, SlotTracker);
}
#endif
@ -973,14 +1002,17 @@ void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent,
O << " = ";
}
if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
O << "call @" << CB->getCalledFunction()->getName() << "(";
O << "call";
printFlags(O);
O << "@" << CB->getCalledFunction()->getName() << "(";
interleaveComma(make_range(op_begin(), op_begin() + (getNumOperands() - 1)),
O, [&O, &SlotTracker](VPValue *Op) {
Op->printAsOperand(O, SlotTracker);
});
O << ")";
} else {
O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode()) << " ";
O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode());
printFlags(O);
printOperands(O, SlotTracker);
}

View File

@ -20,7 +20,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: EMIT ir<%ptr.iv.1> = WIDEN-POINTER-INDUCTION ir<%start.1>, 8
; CHECK-NEXT: EMIT ir<%ptr.iv.2> = WIDEN-POINTER-INDUCTION ir<%start.2>, 1
; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr ir<%ptr.iv.2>, ir<1>
; CHECK-NEXT: WIDEN-GEP Var[Inv] ir<%ptr.iv.2.next> = getelementptr inbounds ir<%ptr.iv.2>, ir<1>
; CHECK-NEXT: WIDEN store ir<%ptr.iv.1>, ir<%ptr.iv.2.next>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%ptr.iv.2>
; CHECK-NEXT: WIDEN ir<%add> = add ir<%lv>, ir<1>

View File

@ -23,7 +23,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: REPLICATE ir<%call> = call @foo(ir<%load>)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -49,7 +49,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @foo(ir<%load>) (using library function: foo_vector_fixed4_nomask)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -80,7 +80,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @foo(ir<%load>) (using library function: foo_vector_fixed2_nomask)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXST:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -106,7 +106,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @foo(ir<%load>, ir<true>) (using library function: foo_vector_fixed4_mask)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -136,7 +136,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @foo(ir<%load>) (using library function: foo_vector_fixed2_nomask)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -162,7 +162,7 @@ target triple = "aarch64-unknown-linux-gnu"
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%load> = load ir<%gep>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @foo(ir<%load>) (using library function: foo_vector_fixed4_nomask)
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>

View File

@ -17,11 +17,11 @@ target triple = "arm64-apple-ios"
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr ir<%src>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%l> = load ir<%gep.src>
; CHECK-NEXT: WIDEN-CAST ir<%conv> = fpext ir<%l> to double
; CHECK-NEXT: WIDEN-CALL ir<%s> = call @llvm.sin.f64(ir<%conv>) (using library function: __simd_sin_v2f64)
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%s>, ir<%gep.dst>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -44,11 +44,11 @@ target triple = "arm64-apple-ios"
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr ir<%src>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%l> = load ir<%gep.src>
; CHECK-NEXT: WIDEN-CAST ir<%conv> = fpext ir<%l> to double
; CHECK-NEXT: WIDEN-CALL ir<%s> = call @llvm.sin.f64(ir<%conv>) (using vector intrinsic)
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%s>, ir<%gep.dst>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>

View File

@ -59,12 +59,12 @@ define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%i.0> = add vp<[[STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<[[STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
; CHECK-NEXT: WIDEN ir<%1> = load ir<%arrayidx>
; CHECK-NEXT: WIDEN ir<%add9> = add ir<%1>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom>
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
; CHECK-NEXT: WIDEN store ir<%arrayidx3>, ir<%add9>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>
@ -191,12 +191,12 @@ define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocaptur
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%n> + vp<[[CAN_IV]]> * ir<-1>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%i.0> = add vp<[[STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<[[STEPS]]>, ir<-1>
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%B>, ir<%idxprom>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
; CHECK-NEXT: WIDEN ir<%1> = load ir<%arrayidx>
; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%1>, ir<1.000000e+00>
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr ir<%A>, ir<%idxprom>
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
; CHECK-NEXT: WIDEN store ir<%arrayidx3>, ir<%conv1>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VTC]]>

View File

@ -17,7 +17,7 @@ define void @test_chained_first_order_recurrences_1(ptr %ptr) {
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.1> = phi ir<22>, ir<%for.1.next>
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.2> = phi ir<33>, vp<[[FOR1_SPLICE:%.+]]>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%for.1.next> = load ir<%gep.ptr>
; CHECK-NEXT: EMIT vp<[[FOR1_SPLICE]]> = first-order splice ir<%for.1> ir<%for.1.next>
; CHECK-NEXT: EMIT vp<[[FOR2_SPLICE:%.+]]> = first-order splice ir<%for.2> vp<[[FOR1_SPLICE]]>
@ -68,7 +68,7 @@ define void @test_chained_first_order_recurrences_3(ptr %ptr) {
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.2> = phi ir<33>, vp<[[FOR1_SPLICE:%.+]]>
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for.3> = phi ir<33>, vp<[[FOR2_SPLICE:%.+]]>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.ptr> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%for.1.next> = load ir<%gep.ptr>
; CHECK-NEXT: EMIT vp<[[FOR1_SPLICE]]> = first-order splice ir<%for.1> ir<%for.1.next>
; CHECK-NEXT: EMIT vp<[[FOR2_SPLICE]]> = first-order splice ir<%for.2> vp<[[FOR1_SPLICE]]>

View File

@ -60,7 +60,7 @@ for.end:
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%s>, ir<%gep>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:

View File

@ -21,7 +21,7 @@
; DBG-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<%start> + vp<[[CAN_IV]]> * ir<1>
; DBG-NEXT: vp<[[IV_STEPS:%.]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
; DBG-NEXT: CLONE ir<%min> = call @llvm.smin.i32(vp<[[IV_STEPS]]>, ir<65535>)
; DBG-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%dst>, vp<[[IV_STEPS]]>
; DBG-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%dst>, vp<[[IV_STEPS]]>
; DBG-NEXT: CLONE store ir<%min>, ir<%arrayidx>
; DBG-NEXT: EMIT vp<[[INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; DBG-NEXT: EMIT branch-on-count vp<[[INC]]> vp<[[VEC_TC]]>
@ -87,9 +87,9 @@ declare i32 @llvm.smin.i32(i32, i32)
; DBG-EMPTY:
; DBG-NEXT: pred.store.if:
; DBG-NEXT: vp<[[STEPS2:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; DBG-NEXT: CLONE ir<%gep.src> = getelementptr ir<%src>, vp<[[STEPS2]]>
; DBG-NEXT: CLONE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[STEPS2]]>
; DBG-NEXT: CLONE ir<%l> = load ir<%gep.src>
; DBG-NEXT: CLONE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS2]]>
; DBG-NEXT: CLONE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[STEPS2]]>
; DBG-NEXT: CLONE store ir<%l>, ir<%gep.dst>
; DBG-NEXT: Successor(s): pred.store.continue
; DBG-EMPTY:

View File

@ -28,10 +28,10 @@ define void @print_call_and_memory(i64 %n, ptr noalias %y, ptr noalias %x) nounw
; CHECK-NEXT: "vector.body:\l" +
; CHECK-NEXT: " EMIT vp\<[[CAN_IV:%.+]]\> = CANONICAL-INDUCTION\l" +
; CHECK-NEXT: " vp\<[[STEPS:%.+]]\> = SCALAR-STEPS vp\<[[CAN_IV]]\>, ir\<1\>\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx\> = getelementptr ir\<%y\>, vp\<[[STEPS]]\>\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx\> = getelementptr inbounds ir\<%y\>, vp\<[[STEPS]]\>\l" +
; CHECK-NEXT: " WIDEN ir\<%lv\> = load ir\<%arrayidx\>\l" +
; CHECK-NEXT: " WIDEN-CALL ir\<%call\> = call @llvm.sqrt.f32(ir\<%lv\>) (using vector intrinsic)\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx2\> = getelementptr ir\<%x\>, vp\<[[STEPS]]\>\l" +
; CHECK-NEXT: " CLONE ir\<%arrayidx2\> = getelementptr inbounds ir\<%x\>, vp\<[[STEPS]]\>\l" +
; CHECK-NEXT: " WIDEN store ir\<%arrayidx2\>, ir\<%call\>\l" +
; CHECK-NEXT: " EMIT vp\<[[CAN_IV_NEXT:%.+]]\> = VF * UF +(nuw) vp\<[[CAN_IV]]\>\l" +
; CHECK-NEXT: " EMIT branch-on-count vp\<[[CAN_IV_NEXT]]\> vp\<{{.+}}\>\l" +

View File

@ -15,7 +15,7 @@ define void @iv_no_binary_op_in_descriptor(i1 %c, ptr %dst) {
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next.p, ir<1>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%dst>, vp<[[STEPS:%.+]]>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr inbounds ir<%dst>, vp<[[STEPS:%.+]]>
; CHECK-NEXT: WIDEN store ir<%gep>, ir<%iv>
; CHECK-NEXT: EMIT vp<[[CAN_INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_INC]]> vp<[[VEC_TC]]>

View File

@ -19,10 +19,10 @@ define void @print_call_and_memory(i64 %n, ptr noalias %y, ptr noalias %x) nounw
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: WIDEN-CALL ir<%call> = call @llvm.sqrt.f32(ir<%lv>)
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx2>, ir<%call>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VEC_TC]]>
@ -67,12 +67,12 @@ define void @print_widen_gep_and_select(i64 %n, ptr noalias %y, ptr noalias %x,
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi %iv.next, 0, ir<1>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: WIDEN-GEP Inv[Var] ir<%arrayidx> = getelementptr ir<%y>, ir<%iv>
; CHECK-NEXT: WIDEN-GEP Inv[Var] ir<%arrayidx> = getelementptr inbounds ir<%y>, ir<%iv>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: WIDEN ir<%cmp> = icmp eq ir<%arrayidx>, ir<%z>
; CHECK-NEXT: WIDEN-SELECT ir<%sel> = select ir<%cmp>, ir<1.000000e+01>, ir<2.000000e+01>
; CHECK-NEXT: WIDEN ir<%add> = fadd ir<%lv>, ir<%sel>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%arrayidx2>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VEC_TC]]>
@ -119,7 +119,7 @@ define float @print_reduction(i64 %n, ptr noalias %y) {
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>)
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
@ -165,7 +165,7 @@ define void @print_reduction_with_invariant_store(i64 %n, ptr noalias %y, ptr no
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%red> = phi ir<0.000000e+00>, ir<%red.next>
; CHECK-NEXT: vp<[[IV:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%y>, vp<[[IV]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%y>, vp<[[IV]]>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%arrayidx>
; CHECK-NEXT: REDUCE ir<%red.next> = ir<%red> + fast reduce.fadd (ir<%lv>) (with final reduction value stored in invariant address sank outside of loop)
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
@ -288,14 +288,14 @@ define void @print_interleave_groups(i32 %C, i32 %D) {
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * ir<4>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<4>
; CHECK-NEXT: CLONE ir<%gep.AB.0> = getelementptr ir<@AB>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.AB.0> = getelementptr inbounds ir<@AB>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at %AB.0, ir<%gep.AB.0>
; CHECK-NEXT: ir<%AB.0> = load from index 0
; CHECK-NEXT: ir<%AB.1> = load from index 1
; CHECK-NEXT: ir<%AB.3> = load from index 3
; CHECK-NEXT: CLONE ir<%iv.plus.3> = add vp<[[STEPS]]>, ir<3>
; CHECK-NEXT: WIDEN ir<%add> = add ir<%AB.0>, ir<%AB.1>
; CHECK-NEXT: CLONE ir<%gep.CD.3> = getelementptr ir<@CD>, ir<0>, ir<%iv.plus.3>
; CHECK-NEXT: WIDEN ir<%add> = add nsw ir<%AB.0>, ir<%AB.1>
; CHECK-NEXT: CLONE ir<%gep.CD.3> = getelementptr inbounds ir<@CD>, ir<0>, ir<%iv.plus.3>
; CHECK-NEXT: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%gep.CD.3>
; CHECK-NEXT: store ir<%add> to index 0
; CHECK-NEXT: store ir<1> to index 1
@ -356,9 +356,9 @@ define float @print_fmuladd_strict(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%sum.07> = phi ir<0.000000e+00>, ir<%muladd>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%l.a> = load ir<%arrayidx>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%arrayidx2> = getelementptr inbounds ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%l.b> = load ir<%arrayidx2>
; CHECK-NEXT: EMIT vp<[[FMUL:%.+]]> = fmul nnan ninf nsz ir<%l.a> ir<%l.b>
; CHECK-NEXT: REDUCE ir<[[MULADD:%.+]]> = ir<%sum.07> + nnan ninf nsz reduce.fadd (vp<[[FMUL]]>)
@ -406,9 +406,9 @@ define void @debug_loc_vpinstruction(ptr nocapture %asd, ptr nocapture %bsd) !db
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%isd> = getelementptr ir<%asd>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%isd> = getelementptr inbounds ir<%asd>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%lsd> = load ir<%isd>
; CHECK-NEXT: WIDEN ir<%psd> = add ir<%lsd>, ir<23>
; CHECK-NEXT: WIDEN ir<%psd> = add nuw nsw ir<%lsd>, ir<23>
; CHECK-NEXT: WIDEN ir<%cmp1> = icmp slt ir<%lsd>, ir<100>
; CHECK-NEXT: WIDEN ir<%cmp2> = icmp sge ir<%lsd>, ir<200>
; CHECK-NEXT: EMIT vp<[[NOT1:%.+]]> = not ir<%cmp1>, !dbg /tmp/s.c:5:3
@ -453,7 +453,7 @@ loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %if.end ]
%isd = getelementptr inbounds i32, ptr %asd, i64 %iv
%lsd = load i32, ptr %isd, align 4
%psd = add nsw i32 %lsd, 23
%psd = add nuw nsw i32 %lsd, 23
%cmp1 = icmp slt i32 %lsd, 100
br i1 %cmp1, label %if.then, label %check, !dbg !7
@ -501,8 +501,8 @@ define void @print_expand_scev(i64 %y, ptr %ptr) {
; CHECK-NEXT: " ir<%v2>, vp<[[EXP_SCEV]]>
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<0> + vp<[[CAN_IV]]> * vp<[[EXP_SCEV]]>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, vp<[[EXP_SCEV]]>
; CHECK-NEXT: WIDEN ir<%v3> = add ir<%v2>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%v3> = add nuw ir<%v2>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%v3>, ir<%gep>
; CHECK-NEXT: EMIT vp<[[CAN_INC:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_INC]]> vp<[[VTC]]>
@ -522,7 +522,7 @@ entry:
loop: ; preds = %loop, %entry
%iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
%v2 = trunc i64 %iv to i8
%v3 = add i8 %v2, 1
%v3 = add nuw i8 %v2, 1
%gep = getelementptr inbounds i8, ptr %ptr, i64 %iv
store i8 %v3, ptr %gep
@ -548,7 +548,7 @@ define i32 @print_exit_value(ptr %ptr, i32 %off) {
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep> = getelementptr inbounds ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%add> = add ir<%iv>, ir<%off>
; CHECK-NEXT: WIDEN store ir<%gep>, ir<0>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
@ -593,12 +593,12 @@ define void @print_fast_math_flags(i64 %n, ptr noalias %y, ptr noalias %x, ptr %
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.y> = getelementptr ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.y> = getelementptr inbounds ir<%y>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%gep.y>
; CHECK-NEXT: WIDEN ir<%add> = fadd ir<%lv>, ir<1.000000e+00>
; CHECK-NEXT: WIDEN ir<%mul> = fmul ir<%add>, ir<2.000000e+00>
; CHECK-NEXT: WIDEN ir<%div> = fdiv ir<%mul>, ir<2.000000e+00>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%add> = fadd nnan ir<%lv>, ir<1.000000e+00>
; CHECK-NEXT: WIDEN ir<%mul> = fmul reassoc nnan ninf nsz arcp contract afn ir<%add>, ir<2.000000e+00>
; CHECK-NEXT: WIDEN ir<%div> = fdiv reassoc nsz contract ir<%mul>, ir<2.000000e+00>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr inbounds ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN store ir<%gep.x>, ir<%div>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VEC_TC]]>
@ -643,11 +643,11 @@ define void @print_exact_flags(i64 %n, ptr noalias %x) {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: CLONE ir<%gep.x> = getelementptr inbounds ir<%x>, vp<[[STEPS]]>
; CHECK-NEXT: WIDEN ir<%lv> = load ir<%gep.x>
; CHECK-NEXT: WIDEN ir<%div.1> = udiv ir<%lv>, ir<20>
; CHECK-NEXT: WIDEN ir<%div.1> = udiv exact ir<%lv>, ir<20>
; CHECK-NEXT: WIDEN ir<%div.2> = udiv ir<%lv>, ir<60>
; CHECK-NEXT: WIDEN ir<%add> = add ir<%div.1>, ir<%div.2>
; CHECK-NEXT: WIDEN ir<%add> = add nuw nsw ir<%div.1>, ir<%div.2>
; CHECK-NEXT: WIDEN store ir<%gep.x>, ir<%add>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VEC_TC]]>
@ -691,7 +691,7 @@ define void @print_call_flags(ptr readonly %src, ptr noalias %dest, i64 %n) {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: CLONE ir<%ld.addr> = getelementptr ir<%src>, vp<%2>
; CHECK-NEXT: CLONE ir<%ld.addr> = getelementptr inbounds ir<%src>, vp<%2>
; CHECK-NEXT: WIDEN ir<%ld.value> = load ir<%ld.addr>
; CHECK-NEXT: WIDEN ir<%ifcond> = fcmp oeq ir<%ld.value>, ir<5.000000e+00>
; CHECK-NEXT: Successor(s): pred.call
@ -702,7 +702,7 @@ define void @print_call_flags(ptr readonly %src, ptr noalias %dest, i64 %n) {
; CHECK-NEXT: Successor(s): pred.call.if, pred.call.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.call.if:
; CHECK-NEXT: REPLICATE ir<%foo.ret.1> = call @foo(ir<%ld.value>) (S->V)
; CHECK-NEXT: REPLICATE ir<%foo.ret.1> = call nnan ninf nsz @foo(ir<%ld.value>) (S->V)
; CHECK-NEXT: REPLICATE ir<%foo.ret.2> = call @foo(ir<%ld.value>) (S->V)
; CHECK-NEXT: Successor(s): pred.call.continue
; CHECK-EMPTY:
@ -717,7 +717,7 @@ define void @print_call_flags(ptr readonly %src, ptr noalias %dest, i64 %n) {
; CHECK-NEXT: WIDEN ir<%fadd> = fadd vp<%8>, vp<%9>
; CHECK-NEXT: EMIT vp<%11> = not ir<%ifcond>
; CHECK-NEXT: BLEND %st.value = ir<%ld.value>/vp<%11> ir<%fadd>/ir<%ifcond>
; CHECK-NEXT: CLONE ir<%st.addr> = getelementptr ir<%dest>, vp<%2>
; CHECK-NEXT: CLONE ir<%st.addr> = getelementptr inbounds ir<%dest>, vp<%2>
; CHECK-NEXT: WIDEN store ir<%st.addr>, ir<%st.value>
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = VF * UF +(nuw) vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]> vp<[[VEC_TC]]>

View File

@ -36,10 +36,10 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; CHECK: pred.store.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b>
; CHECK-NEXT: REPLICATE ir<%add> = add ir<%lv.b>, ir<10>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, vp<[[STEPS]]
; CHECK-NEXT: REPLICATE ir<%mul> = mul ir<2>, ir<%add>
; CHECK-NEXT: REPLICATE store ir<%mul>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
@ -104,7 +104,7 @@ exit:
; CHECK: pred.load.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b>
; CHECK-NEXT: Successor(s): pred.load.continue
@ -123,7 +123,7 @@ exit:
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%add> = add vp<[[PRED]]>, ir<10>
; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
@ -187,7 +187,7 @@ exit:
; CHECK: pred.load.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b> (S->V)
; CHECK-NEXT: Successor(s): pred.load.continue
@ -207,7 +207,7 @@ exit:
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
@ -261,7 +261,7 @@ define void @uniform_gep(i64 %k, ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: vp<[[DERIVED_IV:%.+]]> = DERIVED-IV ir<21> + vp<[[CAN_IV]]> * ir<1>
; CHECK-NEXT: EMIT vp<[[WIDE_CAN_IV:%.+]]> = WIDEN-CANONICAL-INDUCTION vp<[[CAN_IV]]>
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule vp<[[WIDE_CAN_IV]]> vp<[[BTC]]>
; CHECK-NEXT: CLONE ir<%gep.A.uniform> = getelementptr ir<%A>, ir<0>
; CHECK-NEXT: CLONE ir<%gep.A.uniform> = getelementptr inbounds ir<%A>, ir<0>
; CHECK-NEXT: CLONE ir<%lv> = load ir<%gep.A.uniform>
; CHECK-NEXT: WIDEN ir<%cmp> = icmp ult ir<%iv>, ir<%k>
; CHECK-NEXT: EMIT vp<[[NOT2:%.+]]> = not ir<%cmp>
@ -275,7 +275,7 @@ define void @uniform_gep(i64 %k, ptr noalias %A, ptr noalias %B) {
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.B> = getelementptr ir<%B>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.B> = getelementptr inbounds ir<%B>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%lv>, ir<%gep.B>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -345,7 +345,7 @@ define void @pred_cfg1(i32 %k, i32 %j) {
; CHECK-EMPTY:
; CHECK-NEXT: pred.load.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b> (S->V)
; CHECK-NEXT: Successor(s): pred.load.continue
; CHECK-EMPTY:
@ -368,7 +368,7 @@ define void @pred_cfg1(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE store ir<%p>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -445,7 +445,7 @@ define void @pred_cfg2(i32 %k, i32 %j) {
; CHECK-EMPTY:
; CHECK-NEXT: pred.load.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b> (S->V)
; CHECK-NEXT: Successor(s): pred.load.continue
; CHECK-EMPTY:
@ -469,7 +469,7 @@ define void @pred_cfg2(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE store ir<%p>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -552,7 +552,7 @@ define void @pred_cfg3(i32 %k, i32 %j) {
; CHECK-EMPTY:
; CHECK-NEXT: pred.load.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b>
; CHECK-NEXT: Successor(s): pred.load.continue
; CHECK-EMPTY:
@ -576,9 +576,9 @@ define void @pred_cfg3(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE store ir<0>, ir<%gep.a>
; CHECK-NEXT: REPLICATE ir<%gep.c> = getelementptr ir<@c>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE ir<%gep.c> = getelementptr inbounds ir<@c>, ir<0>, ir<%mul>
; CHECK-NEXT: REPLICATE store ir<%p>, ir<%gep.c>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -657,11 +657,11 @@ define void @merge_3_replicate_region(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.a> = load ir<%gep.a>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.b> = getelementptr inbounds ir<@b>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.b> = load ir<%gep.b>
; CHECK-NEXT: REPLICATE ir<%gep.c> = getelementptr ir<@c>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.c> = getelementptr inbounds ir<@c>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%lv.a>, ir<%gep.c>
; CHECK-NEXT: REPLICATE store ir<%lv.b>, ir<%gep.a>
; CHECK-NEXT: Successor(s): pred.store.continue
@ -685,7 +685,7 @@ define void @merge_3_replicate_region(i32 %k, i32 %j) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%gep.c.1> = getelementptr ir<@c>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.c.1> = getelementptr inbounds ir<@c>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%mul>, ir<%gep.c.1>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -761,7 +761,7 @@ define void @update_2_uses_in_same_recipe_in_merged_block(i32 %k) {
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%lv.a> = load ir<%gep.a>
; CHECK-NEXT: REPLICATE ir<%div> = sdiv ir<%lv.a>, ir<%lv.a>
; CHECK-NEXT: REPLICATE store ir<%div>, ir<%gep.a>
@ -820,7 +820,7 @@ define void @recipe_in_merge_candidate_used_by_first_order_recurrence(i32 %k) {
; CHECK-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<%for> = phi ir<0>, vp<[[PRED:%.+]]>
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv> vp<[[BTC]]>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.a> = getelementptr inbounds ir<@a>, ir<0>, vp<[[STEPS]]>
; CHECK-NEXT: Successor(s): pred.load
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.load: {
@ -1041,9 +1041,9 @@ define void @merge_with_dead_gep_between_regions(i32 %n, ptr noalias %src, ptr n
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<-1>
; CHECK-NEXT: REPLICATE ir<%gep.src> = getelementptr ir<%src>, vp<[[SCALAR_STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.src> = getelementptr inbounds ir<%src>, vp<[[SCALAR_STEPS]]>
; CHECK-NEXT: REPLICATE ir<%l> = load ir<%gep.src>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[SCALAR_STEPS]]>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr inbounds ir<%dst>, vp<[[SCALAR_STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%l>, ir<%gep.dst>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@ -1099,7 +1099,7 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION
; CHECK-NEXT: EMIT ir<%ptr.iv> = WIDEN-POINTER-INDUCTION ir<%start>, -1
; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr ir<%ptr.iv>, ir<-1>
; CHECK-NEXT: CLONE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
; CHECK-NEXT: WIDEN ir<%l> = load ir<%ptr.iv.next>
; CHECK-NEXT: WIDEN ir<%c.1> = icmp eq ir<%l>, ir<0>
; CHECK-NEXT: EMIT vp<[[NEG:%.+]]> = not ir<%c.1>
@ -1111,7 +1111,7 @@ define void @ptr_induction_remove_dead_recipe(ptr %start, ptr %end) {
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr ir<%ptr.iv>, ir<-1>
; CHECK-NEXT: REPLICATE ir<%ptr.iv.next> = getelementptr inbounds ir<%ptr.iv>, ir<-1>
; CHECK-NEXT: REPLICATE store ir<95>, ir<%ptr.iv.next>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY: