mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-17 08:06:40 +00:00
[LV] Remove more references of unrolled parts after 57f5d8f2fe.
Continue to clean up some now stale references of unroll parts and related terminology as pointed out post-commit for 06c3a7d.
This commit is contained in:
parent
b1e4656e8e
commit
3fbf6f8bb1
@ -538,12 +538,6 @@ protected:
|
||||
/// A small list of PHINodes.
|
||||
using PhiVector = SmallVector<PHINode *, 4>;
|
||||
|
||||
/// A type for scalarized values in the new loop. Each value from the
|
||||
/// original loop, when scalarized, is represented by UF x VF scalar values
|
||||
/// in the new unrolled loop, where UF is the unroll factor and VF is the
|
||||
/// vectorization factor.
|
||||
using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
|
||||
|
||||
/// Set up the values of the IVs correctly when exiting the vector loop.
|
||||
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
|
||||
Value *VectorTripCount, Value *EndValue,
|
||||
|
@ -333,10 +333,10 @@ Value *VPTransformState::get(VPValue *Def, bool NeedsScalar) {
|
||||
|
||||
// However, if we are vectorizing, we need to construct the vector values.
|
||||
// If the value is known to be uniform after vectorization, we can just
|
||||
// broadcast the scalar value corresponding to lane zero for each unroll
|
||||
// iteration. Otherwise, we construct the vector values using
|
||||
// insertelement instructions. Since the resulting vectors are stored in
|
||||
// State, we will only generate the insertelements once.
|
||||
// broadcast the scalar value corresponding to lane zero. Otherwise, we
|
||||
// construct the vector values using insertelement instructions. Since the
|
||||
// resulting vectors are stored in State, we will only generate the
|
||||
// insertelements once.
|
||||
Value *VectorValue = nullptr;
|
||||
if (IsUniform) {
|
||||
VectorValue = GetBroadcastInstrs(ScalarValue);
|
||||
@ -769,15 +769,15 @@ void VPRegionBlock::execute(VPTransformState *State) {
|
||||
|
||||
// Enter replicating mode.
|
||||
State->Instance = VPIteration(0, 0);
|
||||
assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
|
||||
for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
|
||||
++Lane) {
|
||||
State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
|
||||
// Visit the VPBlocks connected to \p this, starting from it.
|
||||
for (VPBlockBase *Block : RPOT) {
|
||||
LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
|
||||
Block->execute(State);
|
||||
}
|
||||
assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
|
||||
for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
|
||||
++Lane) {
|
||||
State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
|
||||
// Visit the VPBlocks connected to \p this, starting from it.
|
||||
for (VPBlockBase *Block : RPOT) {
|
||||
LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
|
||||
Block->execute(State);
|
||||
}
|
||||
}
|
||||
|
||||
// Exit replicating mode.
|
||||
|
@ -254,7 +254,7 @@ struct VPTransformState {
|
||||
DominatorTree *DT, IRBuilderBase &Builder,
|
||||
InnerLoopVectorizer *ILV, VPlan *Plan);
|
||||
|
||||
/// The chosen Vectorization and Unroll Factors of the loop being vectorized.
|
||||
/// The chosen Vectorization Factor of the loop being vectorized.
|
||||
ElementCount VF;
|
||||
|
||||
/// Hold the indices to generate specific scalar instructions. Null indicates
|
||||
@ -1253,9 +1253,7 @@ public:
|
||||
ComputeReductionResult,
|
||||
// Takes the VPValue to extract from as first operand and the lane or part
|
||||
// to extract as second operand, counting from the end starting with 1 for
|
||||
// last. The second operand must be a positive constant and <= VF when
|
||||
// extracting from a vector or <= UF when extracting from an unrolled
|
||||
// scalar.
|
||||
// last. The second operand must be a positive constant and <= VF.
|
||||
ExtractFromEnd,
|
||||
LogicalAnd, // Non-poison propagating logical And.
|
||||
// Add an offset in bytes (second operand) to a base pointer (first
|
||||
|
@ -2490,9 +2490,7 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
|
||||
// If the group is reverse, adjust the index to refer to the last vector lane
|
||||
// instead of the first. We adjust the index from the first vector lane,
|
||||
// rather than directly getting the pointer for lane VF - 1, because the
|
||||
// pointer operand of the interleaved access is supposed to be uniform. For
|
||||
// uniform instructions, we're only required to generate a value for the
|
||||
// first vector lane in each unroll iteration.
|
||||
// pointer operand of the interleaved access is supposed to be uniform.
|
||||
if (Group->isReverse()) {
|
||||
Value *RuntimeVF =
|
||||
getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
|
||||
|
Loading…
x
Reference in New Issue
Block a user