2012-10-17 18:25:06 +00:00
|
|
|
//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2013-01-07 10:44:06 +00:00
|
|
|
//
|
|
|
|
// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
|
2013-05-06 03:06:36 +00:00
|
|
|
// and generates target-independent LLVM-IR.
|
|
|
|
// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
|
|
|
|
// of instructions in order to estimate the profitability of vectorization.
|
2013-01-07 10:44:06 +00:00
|
|
|
//
|
2013-02-08 17:43:32 +00:00
|
|
|
// The loop vectorizer combines consecutive loop iterations into a single
|
2013-01-07 10:44:06 +00:00
|
|
|
// 'wide' iteration. After this transformation the index is incremented
|
|
|
|
// by the SIMD vector width, and not by one.
|
|
|
|
//
|
|
|
|
// This pass has three parts:
|
|
|
|
// 1. The main loop pass that drives the different parts.
|
|
|
|
// 2. LoopVectorizationLegality - A unit that checks for the legality
|
|
|
|
// of the vectorization.
|
|
|
|
// 3. InnerLoopVectorizer - A unit that performs the actual
|
|
|
|
// widening of instructions.
|
|
|
|
// 4. LoopVectorizationCostModel - A unit that checks for the profitability
|
|
|
|
// of vectorization. It decides on the optimal vector width, which
|
|
|
|
// can be one, if vectorization is not profitable.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The reduction-variable vectorization is based on the paper:
|
|
|
|
// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
|
|
|
|
//
|
|
|
|
// Variable uniformity checks are inspired by:
|
2013-02-08 12:58:29 +00:00
|
|
|
// Karrenberg, R. and Hack, S. Whole Function Vectorization.
|
2013-01-07 10:44:06 +00:00
|
|
|
//
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
// The interleaved access vectorization is based on the paper:
|
|
|
|
// Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved
|
|
|
|
// Data for SIMD
|
|
|
|
//
|
2013-01-07 10:44:06 +00:00
|
|
|
// Other ideas/concepts are from:
|
|
|
|
// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
|
|
|
|
//
|
|
|
|
// S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of
|
|
|
|
// Vectorizing Compilers.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
#include "llvm/Transforms/Vectorize/LoopVectorize.h"
|
2018-01-07 16:02:58 +00:00
|
|
|
#include "LoopVectorizationPlanner.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/ADT/APInt.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
2013-11-02 13:39:00 +00:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include "llvm/ADT/MapVector.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/ADT/None.h"
|
2017-03-14 13:07:04 +00:00
|
|
|
#include "llvm/ADT/Optional.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2013-06-24 12:09:12 +00:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2013-01-04 17:48:25 +00:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2014-04-23 08:40:37 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/ADT/Twine.h"
|
|
|
|
#include "llvm/ADT/iterator_range.h"
|
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/BlockFrequencyInfo.h"
|
2018-03-02 12:24:25 +00:00
|
|
|
#include "llvm/Analysis/CFG.h"
|
2014-10-14 22:59:49 +00:00
|
|
|
#include "llvm/Analysis/CodeMetrics.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Analysis/DemandedBits.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Analysis/LoopAccessAnalysis.h"
|
|
|
|
#include "llvm/Analysis/LoopAnalysisManager.h"
|
2012-10-17 18:25:06 +00:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2012-12-04 06:15:11 +00:00
|
|
|
#include "llvm/Analysis/LoopIterator.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2012-12-10 21:39:02 +00:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2016-05-05 00:54:54 +00:00
|
|
|
#include "llvm/Analysis/VectorUtils.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/CFG.h"
|
|
|
|
#include "llvm/IR/Constant.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/DebugInfoMetadata.h"
|
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
2014-05-22 14:19:46 +00:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2014-01-13 09:26:24 +00:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Function.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Module.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/Operator.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Type.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/IR/Use.h"
|
[X86] updating TTI costs for arithmetic instructions on X86\SLM arch.
updated instructions:
pmulld, pmullw, pmulhw, mulsd, mulps, mulpd, divss, divps, divsd, divpd, addpd and subpd.
special optimization case which replaces pmulld with pmullw\pmulhw\pshuf seq.
In case if the real operands bitwidth <= 16.
Differential Revision: https://reviews.llvm.org/D28104
llvm-svn: 291657
2017-01-11 08:23:37 +00:00
|
|
|
#include "llvm/IR/User.h"
|
2013-01-02 11:36:10 +00:00
|
|
|
#include "llvm/IR/Value.h"
|
2014-03-04 11:17:44 +00:00
|
|
|
#include "llvm/IR/ValueHandle.h"
|
2014-01-13 09:26:24 +00:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Pass.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Support/Casting.h"
|
2012-10-17 18:25:06 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2012-10-17 18:25:06 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-10-12 23:30:03 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2014-01-07 11:48:04 +00:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-12-03 16:50:05 +00:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
2017-01-19 00:42:28 +00:00
|
|
|
#include "llvm/Transforms/Utils/LoopSimplify.h"
|
2015-03-27 03:44:15 +00:00
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
2016-05-05 00:54:54 +00:00
|
|
|
#include "llvm/Transforms/Utils/LoopVersioning.h"
|
2013-01-07 10:44:06 +00:00
|
|
|
#include <algorithm>
|
2017-10-12 23:30:03 +00:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstdlib>
|
2017-08-27 12:55:46 +00:00
|
|
|
#include <functional>
|
2017-10-12 23:30:03 +00:00
|
|
|
#include <iterator>
|
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2014-04-30 07:21:01 +00:00
|
|
|
#include <tuple>
|
2017-10-12 23:30:03 +00:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2013-01-07 10:44:06 +00:00
|
|
|
|
|
|
|
using namespace llvm;
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2014-04-22 02:55:47 +00:00
|
|
|
#define LV_NAME "loop-vectorize"
|
|
|
|
#define DEBUG_TYPE LV_NAME
|
|
|
|
|
2014-04-23 08:40:37 +00:00
|
|
|
STATISTIC(LoopsVectorized, "Number of loops vectorized");
|
|
|
|
STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
static cl::opt<bool>
|
2016-05-05 00:54:54 +00:00
|
|
|
EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Enable if-conversion during vectorization."));
|
2012-12-03 21:06:35 +00:00
|
|
|
|
2017-06-30 08:02:35 +00:00
|
|
|
/// Loops with a known constant trip count below this number are vectorized only
|
|
|
|
/// if no scalar iteration overheads are incurred.
|
2016-05-05 00:54:54 +00:00
|
|
|
static cl::opt<unsigned> TinyTripCountVectorThreshold(
|
|
|
|
"vectorizer-min-trip-count", cl::init(16), cl::Hidden,
|
2017-06-30 08:02:35 +00:00
|
|
|
cl::desc("Loops with a constant trip count that is smaller than this "
|
|
|
|
"value are vectorized only if no scalar iteration overheads "
|
|
|
|
"are incurred."));
|
2013-01-07 21:54:51 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
static cl::opt<bool> MaximizeBandwidth(
|
2017-07-01 03:24:09 +00:00
|
|
|
"vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
|
2015-11-02 22:53:48 +00:00
|
|
|
cl::desc("Maximize bandwidth when selecting vectorization factor which "
|
|
|
|
"will be determined by the smallest type in loop."));
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
static cl::opt<bool> EnableInterleavedMemAccesses(
|
2015-06-11 09:18:07 +00:00
|
|
|
"enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
|
|
|
|
|
|
|
|
/// Maximum factor for an interleaved memory access.
|
|
|
|
static cl::opt<unsigned> MaxInterleaveGroupFactor(
|
|
|
|
"max-interleave-group-factor", cl::Hidden,
|
|
|
|
cl::desc("Maximum factor for an interleaved access group (default = 8)"),
|
|
|
|
cl::init(8));
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
/// We don't interleave loops with a known constant trip count below this
|
|
|
|
/// number.
|
|
|
|
static const unsigned TinyTripCountInterleaveThreshold = 128;
|
2013-01-07 21:54:51 +00:00
|
|
|
|
2014-01-27 11:12:19 +00:00
|
|
|
static cl::opt<unsigned> ForceTargetNumScalarRegs(
|
|
|
|
"force-target-num-scalar-regs", cl::init(0), cl::Hidden,
|
|
|
|
cl::desc("A flag that overrides the target's number of scalar registers."));
|
|
|
|
|
|
|
|
static cl::opt<unsigned> ForceTargetNumVectorRegs(
|
|
|
|
"force-target-num-vector-regs", cl::init(0), cl::Hidden,
|
|
|
|
cl::desc("A flag that overrides the target's number of vector registers."));
|
|
|
|
|
2014-09-10 17:58:16 +00:00
|
|
|
/// Maximum vectorization interleave count.
|
|
|
|
static const unsigned MaxInterleaveFactor = 16;
|
2013-03-02 01:33:49 +00:00
|
|
|
|
2014-09-10 17:58:16 +00:00
|
|
|
static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
|
|
|
|
"force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
|
|
|
|
cl::desc("A flag that overrides the target's max interleave factor for "
|
|
|
|
"scalar loops."));
|
2014-01-27 11:12:19 +00:00
|
|
|
|
2014-09-10 17:58:16 +00:00
|
|
|
static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
|
|
|
|
"force-target-max-vector-interleave", cl::init(0), cl::Hidden,
|
|
|
|
cl::desc("A flag that overrides the target's max interleave factor for "
|
2014-01-27 11:12:19 +00:00
|
|
|
"vectorized loops."));
|
|
|
|
|
2014-01-27 11:41:50 +00:00
|
|
|
static cl::opt<unsigned> ForceTargetInstructionCost(
|
|
|
|
"force-target-instruction-cost", cl::init(0), cl::Hidden,
|
|
|
|
cl::desc("A flag that overrides the target's expected cost for "
|
|
|
|
"an instruction to a single constant value. Mostly "
|
|
|
|
"useful for getting consistent testing."));
|
|
|
|
|
2014-01-27 11:12:19 +00:00
|
|
|
static cl::opt<unsigned> SmallLoopCost(
|
|
|
|
"small-loop-cost", cl::init(20), cl::Hidden,
|
2015-07-11 00:31:11 +00:00
|
|
|
cl::desc(
|
|
|
|
"The cost of a loop that is considered 'small' by the interleaver."));
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2014-01-28 09:10:41 +00:00
|
|
|
static cl::opt<bool> LoopVectorizeWithBlockFrequency(
|
Verify profile data confirms large loop trip counts.
Summary:
Loops with inequality comparers, such as:
// unsigned bound
for (unsigned i = 1; i < bound; ++i) {...}
have getSmallConstantMaxTripCount report a large maximum static
trip count - in this case, 0xffff fffe. However, profiling info
may show that the trip count is much smaller, and thus
counter-recommend vectorization.
This change:
- flips loop-vectorize-with-block-frequency on by default.
- validates profiled loop frequency data supports vectorization,
when static info appears to not counter-recommend it. Absence
of profile data means we rely on static data, just as we've
done so far.
Reviewers: twoh, mkuper, davidxl, tejohnson, Ayal
Reviewed By: davidxl
Subscribers: bkramer, llvm-commits
Differential Revision: https://reviews.llvm.org/D42946
llvm-svn: 324543
2018-02-07 23:29:52 +00:00
|
|
|
"loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
|
2014-01-28 09:10:41 +00:00
|
|
|
cl::desc("Enable the use of the block frequency analysis to access PGO "
|
|
|
|
"heuristics minimizing code growth in cold regions and being more "
|
|
|
|
"aggressive in hot regions."));
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Runtime interleave loops for load/store throughput.
|
|
|
|
static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
|
|
|
|
"enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Enable runtime interleaving until load/store ports are saturated"));
|
2014-01-28 01:01:53 +00:00
|
|
|
|
|
|
|
/// The number of stores in a loop that are allowed to need predication.
|
|
|
|
static cl::opt<unsigned> NumberOfStoresToPredicate(
|
2014-02-02 03:12:34 +00:00
|
|
|
"vectorize-num-stores-pred", cl::init(1), cl::Hidden,
|
2014-01-28 01:01:53 +00:00
|
|
|
cl::desc("Max number of stores to be predicated behind an if."));
|
|
|
|
|
2014-01-29 04:36:12 +00:00
|
|
|
static cl::opt<bool> EnableIndVarRegisterHeur(
|
2014-02-02 03:12:34 +00:00
|
|
|
"enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
|
2015-07-11 00:31:11 +00:00
|
|
|
cl::desc("Count the induction variable only once when interleaving"));
|
2014-01-29 04:36:12 +00:00
|
|
|
|
2014-01-28 01:01:53 +00:00
|
|
|
static cl::opt<bool> EnableCondStoresVectorization(
|
2016-12-16 19:12:02 +00:00
|
|
|
"enable-cond-stores-vec", cl::init(true), cl::Hidden,
|
2014-01-28 01:01:53 +00:00
|
|
|
cl::desc("Enable if predication of stores during vectorization."));
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
static cl::opt<unsigned> MaxNestedScalarReductionIC(
|
|
|
|
"max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
|
|
|
|
cl::desc("The maximum interleave count to use when interleaving a scalar "
|
2014-08-20 23:53:52 +00:00
|
|
|
"reduction in a nested loop."));
|
|
|
|
|
2015-08-27 18:56:49 +00:00
|
|
|
static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
|
|
|
|
"pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
|
|
|
|
cl::desc("The maximum allowed number of runtime memory checks with a "
|
|
|
|
"vectorize(enable) pragma."));
|
|
|
|
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
|
|
|
|
"vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
|
|
|
|
cl::desc("The maximum number of SCEV checks allowed."));
|
|
|
|
|
|
|
|
static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
|
|
|
|
"pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
|
|
|
|
cl::desc("The maximum number of SCEV checks allowed with a "
|
|
|
|
"vectorize(enable) pragma"));
|
|
|
|
|
2016-09-29 17:05:35 +00:00
|
|
|
/// Create an analysis remark that explains why vectorization failed
|
|
|
|
///
|
|
|
|
/// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p
|
|
|
|
/// RemarkName is the identifier for the remark. If \p I is passed it is an
|
|
|
|
/// instruction that prevents vectorization. Otherwise \p TheLoop is used for
|
|
|
|
/// the location of the remark. \return the remark object that can be
|
|
|
|
/// streamed to.
|
|
|
|
static OptimizationRemarkAnalysis
|
|
|
|
createMissedAnalysis(const char *PassName, StringRef RemarkName, Loop *TheLoop,
|
|
|
|
Instruction *I = nullptr) {
|
|
|
|
Value *CodeRegion = TheLoop->getHeader();
|
|
|
|
DebugLoc DL = TheLoop->getStartLoc();
|
|
|
|
|
|
|
|
if (I) {
|
|
|
|
CodeRegion = I->getParent();
|
|
|
|
// If there is no debug location attached to the instruction, revert back to
|
|
|
|
// using the loop's.
|
|
|
|
if (I->getDebugLoc())
|
|
|
|
DL = I->getDebugLoc();
|
|
|
|
}
|
|
|
|
|
|
|
|
OptimizationRemarkAnalysis R(PassName, RemarkName, DL, CodeRegion);
|
|
|
|
R << "loop not vectorized: ";
|
|
|
|
return R;
|
|
|
|
}
|
|
|
|
|
2016-09-29 17:25:00 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class LoopVectorizationRequirements;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2016-09-29 17:25:00 +00:00
|
|
|
|
2015-03-02 20:43:24 +00:00
|
|
|
/// A helper function for converting Scalar types to vector types.
|
|
|
|
/// If the incoming type is void, we return void. If the VF is 1, we return
|
|
|
|
/// the scalar type.
|
2016-05-05 00:54:54 +00:00
|
|
|
static Type *ToVectorTy(Type *Scalar, unsigned VF) {
|
2015-03-02 20:43:24 +00:00
|
|
|
if (Scalar->isVoidTy() || VF == 1)
|
|
|
|
return Scalar;
|
|
|
|
return VectorType::get(Scalar, VF);
|
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
// FIXME: The following helper functions have multiple implementations
|
|
|
|
// in the project. They can be effectively organized in a common Load/Store
|
|
|
|
// utilities unit.
|
|
|
|
|
|
|
|
/// A helper function that returns the type of loaded or stored value.
|
|
|
|
static Type *getMemInstValueType(Value *I) {
|
|
|
|
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
|
|
|
|
"Expected Load or Store instruction");
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->getType();
|
|
|
|
return cast<StoreInst>(I)->getValueOperand()->getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A helper function that returns the alignment of load or store instruction.
|
|
|
|
static unsigned getMemInstAlignment(Value *I) {
|
|
|
|
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
|
|
|
|
"Expected Load or Store instruction");
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->getAlignment();
|
|
|
|
return cast<StoreInst>(I)->getAlignment();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A helper function that returns the address space of the pointer operand of
|
|
|
|
/// load or store instruction.
|
|
|
|
static unsigned getMemInstAddressSpace(Value *I) {
|
|
|
|
assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
|
|
|
|
"Expected Load or Store instruction");
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->getPointerAddressSpace();
|
|
|
|
return cast<StoreInst>(I)->getPointerAddressSpace();
|
|
|
|
}
|
|
|
|
|
2016-09-08 19:11:07 +00:00
|
|
|
/// A helper function that returns true if the given type is irregular. The
|
|
|
|
/// type is irregular if its allocated size doesn't equal the store size of an
|
|
|
|
/// element of the corresponding vector type at the given vectorization factor.
|
|
|
|
static bool hasIrregularType(Type *Ty, const DataLayout &DL, unsigned VF) {
|
|
|
|
// Determine if an array of VF elements of type Ty is "bitcast compatible"
|
|
|
|
// with a <VF x Ty> vector.
|
|
|
|
if (VF > 1) {
|
|
|
|
auto *VectorTy = VectorType::get(Ty, VF);
|
|
|
|
return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the vectorization factor is one, we just check if an array of type Ty
|
|
|
|
// requires padding between elements.
|
|
|
|
return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
|
|
|
|
}
|
|
|
|
|
2016-10-05 18:30:36 +00:00
|
|
|
/// A helper function that returns the reciprocal of the block probability of
|
|
|
|
/// predicated blocks. If we return X, we are assuming the predicated block
|
2018-01-26 08:15:29 +00:00
|
|
|
/// will execute once for every X iterations of the loop header.
|
2016-10-05 18:30:36 +00:00
|
|
|
///
|
|
|
|
/// TODO: We should use actual block probability here, if available. Currently,
|
|
|
|
/// we always assume predicated blocks have a 50% chance of executing.
|
|
|
|
static unsigned getReciprocalPredBlockProb() { return 2; }
|
|
|
|
|
2017-02-24 18:20:12 +00:00
|
|
|
/// A helper function that adds a 'fast' flag to floating-point operations.
|
|
|
|
static Value *addFastMathFlag(Value *V) {
|
|
|
|
if (isa<FPMathOperator>(V)) {
|
|
|
|
FastMathFlags Flags;
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-06 16:27:15 +00:00
|
|
|
Flags.setFast();
|
2017-02-24 18:20:12 +00:00
|
|
|
cast<Instruction>(V)->setFastMathFlags(Flags);
|
|
|
|
}
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A helper function that returns an integer or floating-point constant with
|
|
|
|
/// value C.
|
|
|
|
static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
|
|
|
|
return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
|
|
|
|
: ConstantFP::get(Ty, C);
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
namespace llvm {
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// InnerLoopVectorizer vectorizes loops which contain only one basic
|
|
|
|
/// block to a specified vectorization factor (VF).
|
|
|
|
/// This class performs the widening of scalars into vectors, or multiple
|
|
|
|
/// scalars. This class also implements the following features:
|
|
|
|
/// * It inserts an epilogue loop for handling loops that don't have iteration
|
|
|
|
/// counts that are known to be a multiple of the vectorization factor.
|
|
|
|
/// * It handles the code generation for reduction variables.
|
|
|
|
/// * Scalarization (implementation using scalars) of un-vectorizable
|
|
|
|
/// instructions.
|
|
|
|
/// InnerLoopVectorizer does not perform any vectorization-legality
|
|
|
|
/// checks, and relies on the caller to check for the different legality
|
|
|
|
/// aspects. The InnerLoopVectorizer relies on the
|
|
|
|
/// LoopVectorizationLegality class to provide information about the induction
|
|
|
|
/// and reduction variables that were found to a given vectorization factor.
|
|
|
|
class InnerLoopVectorizer {
|
|
|
|
public:
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
|
|
|
|
LoopInfo *LI, DominatorTree *DT,
|
|
|
|
const TargetLibraryInfo *TLI,
|
2016-12-19 08:22:17 +00:00
|
|
|
const TargetTransformInfo *TTI, AssumptionCache *AC,
|
2016-07-20 21:44:26 +00:00
|
|
|
OptimizationRemarkEmitter *ORE, unsigned VecWidth,
|
2016-10-05 20:23:46 +00:00
|
|
|
unsigned UnrollFactor, LoopVectorizationLegality *LVL,
|
|
|
|
LoopVectorizationCostModel *CM)
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
: OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
|
2016-12-19 08:22:17 +00:00
|
|
|
AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
|
2017-10-12 23:30:03 +00:00
|
|
|
Builder(PSE.getSE()->getContext()),
|
|
|
|
VectorLoopValueMap(UnrollFactor, VecWidth), Legal(LVL), Cost(CM) {}
|
|
|
|
virtual ~InnerLoopVectorizer() = default;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2017-05-11 11:36:33 +00:00
|
|
|
/// Create a new empty loop. Unlink the old loop and connect the new one.
|
2017-08-27 12:55:46 +00:00
|
|
|
/// Return the pre-header block of the new loop.
|
|
|
|
BasicBlock *createVectorizedLoopSkeleton();
|
2017-05-11 11:36:33 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
/// Widen a single instruction within the innermost loop.
|
|
|
|
void widenInstruction(Instruction &I);
|
2017-05-11 11:36:33 +00:00
|
|
|
|
|
|
|
/// Fix the vectorized code, taking care of header phi's, live-outs, and more.
|
|
|
|
void fixVectorizedLoop();
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-03-09 06:14:18 +00:00
|
|
|
// Return true if any runtime check is added.
|
2016-05-24 16:51:26 +00:00
|
|
|
bool areSafetyChecksAdded() { return AddedSafetyChecks; }
|
2015-03-09 06:14:18 +00:00
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
/// A type for vectorized values in the new loop. Each value from the
|
|
|
|
/// original loop, when vectorized, is represented by UF vector values in the
|
|
|
|
/// new unrolled loop, where UF is the unroll factor.
|
2017-10-12 23:30:03 +00:00
|
|
|
using VectorParts = SmallVector<Value *, 2>;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
/// Vectorize a single PHINode in a block. This method handles the induction
|
|
|
|
/// variable canonicalization. It supports both VF = 1 for unrolled loops and
|
|
|
|
/// arbitrary length vectors.
|
|
|
|
void widenPHIInstruction(Instruction *PN, unsigned UF, unsigned VF);
|
|
|
|
|
|
|
|
/// A helper function to scalarize a single Instruction in the innermost loop.
|
|
|
|
/// Generates a sequence of scalar instances for each lane between \p MinLane
|
|
|
|
/// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
|
|
|
|
/// inclusive..
|
|
|
|
void scalarizeInstruction(Instruction *Instr, const VPIteration &Instance,
|
|
|
|
bool IfPredicateInstr);
|
|
|
|
|
|
|
|
/// Widen an integer or floating-point induction variable \p IV. If \p Trunc
|
|
|
|
/// is provided, the integer induction variable will first be truncated to
|
|
|
|
/// the corresponding type.
|
|
|
|
void widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc = nullptr);
|
|
|
|
|
|
|
|
/// getOrCreateVectorValue and getOrCreateScalarValue coordinate to generate a
|
|
|
|
/// vector or scalar value on-demand if one is not yet available. When
|
|
|
|
/// vectorizing a loop, we visit the definition of an instruction before its
|
|
|
|
/// uses. When visiting the definition, we either vectorize or scalarize the
|
|
|
|
/// instruction, creating an entry for it in the corresponding map. (In some
|
|
|
|
/// cases, such as induction variables, we will create both vector and scalar
|
|
|
|
/// entries.) Then, as we encounter uses of the definition, we derive values
|
|
|
|
/// for each scalar or vector use unless such a value is already available.
|
|
|
|
/// For example, if we scalarize a definition and one of its uses is vector,
|
|
|
|
/// we build the required vector on-demand with an insertelement sequence
|
|
|
|
/// when visiting the use. Otherwise, if the use is scalar, we can use the
|
|
|
|
/// existing scalar definition.
|
|
|
|
///
|
|
|
|
/// Return a value in the new loop corresponding to \p V from the original
|
|
|
|
/// loop at unroll index \p Part. If the value has already been vectorized,
|
|
|
|
/// the corresponding vector entry in VectorLoopValueMap is returned. If,
|
|
|
|
/// however, the value has a scalar entry in VectorLoopValueMap, we construct
|
|
|
|
/// a new vector value on-demand by inserting the scalar values into a vector
|
|
|
|
/// with an insertelement sequence. If the value has been neither vectorized
|
|
|
|
/// nor scalarized, it must be loop invariant, so we simply broadcast the
|
|
|
|
/// value into a vector.
|
|
|
|
Value *getOrCreateVectorValue(Value *V, unsigned Part);
|
|
|
|
|
|
|
|
/// Return a value in the new loop corresponding to \p V from the original
|
|
|
|
/// loop at unroll and vector indices \p Instance. If the value has been
|
|
|
|
/// vectorized but not scalarized, the necessary extractelement instruction
|
|
|
|
/// will be generated.
|
|
|
|
Value *getOrCreateScalarValue(Value *V, const VPIteration &Instance);
|
|
|
|
|
|
|
|
/// Construct the vector value of a scalarized value \p V one lane at a time.
|
|
|
|
void packScalarIntoVectorValue(Value *V, const VPIteration &Instance);
|
|
|
|
|
|
|
|
/// Try to vectorize the interleaved access group that \p Instr belongs to.
|
|
|
|
void vectorizeInterleaveGroup(Instruction *Instr);
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
/// Vectorize Load and Store instructions, optionally masking the vector
|
|
|
|
/// operations if \p BlockInMask is non-null.
|
|
|
|
void vectorizeMemoryInstruction(Instruction *Instr,
|
|
|
|
VectorParts *BlockInMask = nullptr);
|
2017-11-14 12:09:30 +00:00
|
|
|
|
|
|
|
/// \brief Set the debug location in the builder using the debug location in
|
|
|
|
/// the instruction.
|
|
|
|
void setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr);
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
protected:
|
2017-10-12 23:30:03 +00:00
|
|
|
friend class LoopVectorizationPlanner;
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
/// A small list of PHINodes.
|
2017-10-12 23:30:03 +00:00
|
|
|
using PhiVector = SmallVector<PHINode *, 4>;
|
2017-08-27 12:55:46 +00:00
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
/// A type for scalarized values in the new loop. Each value from the
|
|
|
|
/// original loop, when scalarized, is represented by UF x VF scalar values
|
|
|
|
/// in the new unrolled loop, where UF is the unroll factor and VF is the
|
|
|
|
/// vectorization factor.
|
2017-10-12 23:30:03 +00:00
|
|
|
using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
/// Set up the values of the IVs correctly when exiting the vector loop.
|
|
|
|
void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
|
|
|
|
Value *CountRoundDown, Value *EndValue,
|
|
|
|
BasicBlock *MiddleBlock);
|
|
|
|
|
2015-09-02 10:15:09 +00:00
|
|
|
/// Create a new induction variable inside L.
|
|
|
|
PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
|
|
|
|
Value *Step, Instruction *DL);
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2017-03-14 13:50:47 +00:00
|
|
|
/// Handle all cross-iteration phis in the header.
|
|
|
|
void fixCrossIterationPHIs();
|
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
/// Fix a first-order recurrence. This is the second phase of vectorizing
|
|
|
|
/// this phi node.
|
|
|
|
void fixFirstOrderRecurrence(PHINode *Phi);
|
|
|
|
|
2017-03-14 13:50:47 +00:00
|
|
|
/// Fix a reduction cross-iteration phi. This is the second phase of
|
|
|
|
/// vectorizing this phi node.
|
|
|
|
void fixReduction(PHINode *Phi);
|
|
|
|
|
2017-05-13 13:25:57 +00:00
|
|
|
/// \brief The Loop exit block may have single value PHI nodes with some
|
|
|
|
/// incoming value. While vectorizing we only handled real values
|
|
|
|
/// that were defined inside the loop and we should have one value for
|
|
|
|
/// each predecessor of its parent basic block. See PR14725.
|
2013-08-26 22:33:26 +00:00
|
|
|
void fixLCSSAPHIs();
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2016-10-25 18:59:45 +00:00
|
|
|
/// Iteratively sink the scalarized operands of a predicated instruction into
|
|
|
|
/// the block that was created for it.
|
|
|
|
void sinkScalarOperands(Instruction *PredInst);
|
|
|
|
|
2016-10-05 20:23:46 +00:00
|
|
|
/// Shrinks vector element sizes to the smallest bitwidth they can be legally
|
|
|
|
/// represented as.
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
void truncateToMinimalBitwidths();
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// Insert the new loop to the loop hierarchy and pass manager
|
|
|
|
/// and update the analysis passes.
|
|
|
|
void updateAnalysis();
|
|
|
|
|
|
|
|
/// Create a broadcast instruction. This method generates a broadcast
|
|
|
|
/// instruction (shuffle) for loop invariant values and for the induction
|
|
|
|
/// value. If this is the induction variable then we extend it to N, N+1, ...
|
|
|
|
/// this is needed because each iteration in the loop corresponds to a SIMD
|
|
|
|
/// element.
|
2013-08-26 22:33:26 +00:00
|
|
|
virtual Value *getBroadcastInstrs(Value *V);
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-01-30 05:02:21 +00:00
|
|
|
/// This function adds (StartIdx, StartIdx + Step, StartIdx + 2*Step, ...)
|
|
|
|
/// to each vector element of Val. The sequence starts at StartIndex.
|
2016-07-24 07:24:54 +00:00
|
|
|
/// \p Opcode is relevant for FP induction variable.
|
|
|
|
virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step,
|
|
|
|
Instruction::BinaryOps Opcode =
|
|
|
|
Instruction::BinaryOpsEnd);
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2016-07-14 14:36:06 +00:00
|
|
|
/// Compute scalar induction steps. \p ScalarIV is the scalar induction
|
|
|
|
/// variable on which to base the steps, \p Step is the size of the step, and
|
|
|
|
/// \p EntryVal is the value from the original loop that maps to the steps.
|
2018-03-20 09:04:39 +00:00
|
|
|
/// Note that \p EntryVal doesn't have to be an induction variable - it
|
|
|
|
/// can also be a truncate instruction.
|
|
|
|
void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
|
2017-02-24 18:20:12 +00:00
|
|
|
const InductionDescriptor &ID);
|
2016-07-06 14:26:59 +00:00
|
|
|
|
2017-02-17 16:09:07 +00:00
|
|
|
/// Create a vector induction phi node based on an existing scalar one. \p
|
|
|
|
/// EntryVal is the value from the original loop that maps to the vector phi
|
|
|
|
/// node, and \p Step is the loop-invariant step. If \p EntryVal is a
|
|
|
|
/// truncate instruction, instead of widening the original IV, we widen a
|
|
|
|
/// version of the IV truncated to \p EntryVal's type.
|
2017-02-24 18:20:12 +00:00
|
|
|
void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
|
|
|
|
Value *Step, Instruction *EntryVal);
|
2016-07-05 15:41:28 +00:00
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
/// Returns true if an instruction \p I should be scalarized instead of
|
|
|
|
/// vectorized for the chosen vectorization factor.
|
|
|
|
bool shouldScalarizeInstruction(Instruction *I) const;
|
|
|
|
|
2016-08-02 15:25:16 +00:00
|
|
|
/// Returns true if we should generate a scalar version of \p IV.
|
|
|
|
bool needsScalarInduction(Instruction *IV) const;
|
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
/// If there is a cast involved in the induction variable \p ID, which should
|
|
|
|
/// be ignored in the vectorized loop body, this function records the
|
|
|
|
/// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
|
|
|
|
/// cast. We had already proved that the casted Phi is equal to the uncasted
|
|
|
|
/// Phi in the vectorized loop (under a runtime guard), and therefore
|
|
|
|
/// there is no need to vectorize the cast - the same value can be used in the
|
|
|
|
/// vector loop for both the Phi and the cast.
|
|
|
|
/// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
|
|
|
|
/// Otherwise, \p VectorLoopValue is a widened/vectorized value.
|
2018-03-20 09:04:39 +00:00
|
|
|
///
|
|
|
|
/// \p EntryVal is the value from the original loop that maps to the vector
|
|
|
|
/// phi node and is used to distinguish what is the IV currently being
|
|
|
|
/// processed - original one (if \p EntryVal is a phi corresponding to the
|
|
|
|
/// original IV) or the "newly-created" one based on the proof mentioned above
|
|
|
|
/// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
|
|
|
|
/// latter case \p EntryVal is a TruncInst and we must not record anything for
|
|
|
|
/// that IV, but it's error-prone to expect callers of this routine to care
|
|
|
|
/// about that, hence this explicit parameter.
|
|
|
|
void recordVectorLoopValueForInductionCast(const InductionDescriptor &ID,
|
|
|
|
const Instruction *EntryVal,
|
|
|
|
Value *VectorLoopValue,
|
|
|
|
unsigned Part,
|
|
|
|
unsigned Lane = UINT_MAX);
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// Generate a shuffle sequence that will reverse the vector Vec.
|
2013-08-26 22:33:26 +00:00
|
|
|
virtual Value *reverseVector(Value *Vec);
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
/// Returns (and creates if needed) the original loop trip count.
|
|
|
|
Value *getOrCreateTripCount(Loop *NewLoop);
|
|
|
|
|
|
|
|
/// Returns (and creates if needed) the trip count of the widened loop.
|
|
|
|
Value *getOrCreateVectorTripCount(Loop *NewLoop);
|
2015-09-02 10:15:22 +00:00
|
|
|
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
/// Returns a bitcasted value to the requested vector type.
|
|
|
|
/// Also handles bitcasts of vector<float> <-> vector<pointer> types.
|
|
|
|
Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
|
|
|
|
const DataLayout &DL);
|
|
|
|
|
2017-07-19 05:16:39 +00:00
|
|
|
/// Emit a bypass check to see if the vector trip count is zero, including if
|
|
|
|
/// it overflows.
|
2015-09-02 10:15:22 +00:00
|
|
|
void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
|
2017-10-12 23:30:03 +00:00
|
|
|
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
/// Emit a bypass check to see if all of the SCEV assumptions we've
|
|
|
|
/// had to make are correct.
|
|
|
|
void emitSCEVChecks(Loop *L, BasicBlock *Bypass);
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-09-02 10:15:22 +00:00
|
|
|
/// Emit bypass checks to check any memory assumptions we may have made.
|
|
|
|
void emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
|
2016-03-17 20:32:37 +00:00
|
|
|
/// Add additional metadata to \p To that was not present on \p Orig.
|
|
|
|
///
|
|
|
|
/// Currently this is used to add the noalias annotations based on the
|
|
|
|
/// inserted memchecks. Use this for instructions that are *cloned* into the
|
|
|
|
/// vector loop.
|
|
|
|
void addNewMetadata(Instruction *To, const Instruction *Orig);
|
|
|
|
|
|
|
|
/// Add metadata from one instruction to another.
|
|
|
|
///
|
|
|
|
/// This includes both the original MDs from \p From and additional ones (\see
|
|
|
|
/// addNewMetadata). Use this for *newly created* instructions in the vector
|
|
|
|
/// loop.
|
2016-06-30 21:17:59 +00:00
|
|
|
void addMetadata(Instruction *To, Instruction *From);
|
2016-03-17 20:32:37 +00:00
|
|
|
|
|
|
|
/// \brief Similar to the previous function but it adds the metadata to a
|
|
|
|
/// vector of instructions.
|
2016-06-30 21:17:59 +00:00
|
|
|
void addMetadata(ArrayRef<Value *> To, Instruction *From);
|
2016-03-17 20:32:37 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// The original loop.
|
|
|
|
Loop *OrigLoop;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
/// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
|
|
|
|
/// dynamic knowledge to simplify SCEV expressions and converts them to a
|
|
|
|
/// more usable form.
|
|
|
|
PredicatedScalarEvolution &PSE;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// Loop Info.
|
|
|
|
LoopInfo *LI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// Dominator Tree.
|
|
|
|
DominatorTree *DT;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
[LoopVectorize] Use AA to partition potential dependency checks
Prior to this change, the loop vectorizer did not make use of the alias
analysis infrastructure. Instead, it performed memory dependence analysis using
ScalarEvolution-based linear dependence checks within equivalence classes
derived from the results of ValueTracking's GetUnderlyingObjects.
Unfortunately, this meant that:
1. The loop vectorizer had logic that essentially duplicated that in BasicAA
for aliasing based on identified objects.
2. The loop vectorizer could not partition the space of dependency checks
based on information only easily available from within AA (TBAA metadata is
currently the prime example).
This means, for example, regardless of whether -fno-strict-aliasing was
provided, the vectorizer would only vectorize this loop with a runtime
memory-overlap check:
void foo(int *a, float *b) {
for (int i = 0; i < 1600; ++i)
a[i] = b[i];
}
This is suboptimal because the TBAA metadata already provides the information
necessary to show that this check unnecessary. Of course, the vectorizer has a
limit on the number of such checks it will insert, so in practice, ignoring
TBAA means not vectorizing more-complicated loops that we should.
This change causes the vectorizer to use an AliasSetTracker to keep track of
the pointers in the loop. The resulting alias sets are then used to partition
the space of dependency checks, and potential runtime checks; this results in
more-efficient vectorizations.
When pointer locations are added to the AliasSetTracker, two things are done:
1. The location size is set to UnknownSize (otherwise you'd not catch
inter-iteration dependencies)
2. For instructions in blocks that would need to be predicated, TBAA is
removed (because the metadata might have a control dependency on the condition
being speculated).
For non-predicated blocks, you can leave the TBAA metadata. This is safe
because you can't have an iteration dependency on the TBAA metadata (if you
did, and you unrolled sufficiently, you'd end up with the same pointer value
used by two accesses that TBAA says should not alias, and that would yield
undefined behavior).
llvm-svn: 213486
2014-07-20 23:07:52 +00:00
|
|
|
/// Alias Analysis.
|
|
|
|
AliasAnalysis *AA;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-02-27 15:24:19 +00:00
|
|
|
/// Target Library Info.
|
|
|
|
const TargetLibraryInfo *TLI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-03-17 19:17:18 +00:00
|
|
|
/// Target Transform Info.
|
|
|
|
const TargetTransformInfo *TTI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-12-19 08:22:17 +00:00
|
|
|
/// Assumption Cache.
|
|
|
|
AssumptionCache *AC;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
/// Interface to emit optimization remarks.
|
|
|
|
OptimizationRemarkEmitter *ORE;
|
2013-02-27 15:24:19 +00:00
|
|
|
|
2016-03-17 20:32:37 +00:00
|
|
|
/// \brief LoopVersioning. It's only set up (non-null) if memchecks were
|
|
|
|
/// used.
|
|
|
|
///
|
|
|
|
/// This is currently only used to add no-alias metadata based on the
|
|
|
|
/// memchecks. The actually versioning is performed manually.
|
|
|
|
std::unique_ptr<LoopVersioning> LVer;
|
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// The vectorization SIMD factor to use. Each vector will have this many
|
|
|
|
/// vector elements.
|
|
|
|
unsigned VF;
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// The vectorization unroll factor to use. Each scalar is vectorized to this
|
|
|
|
/// many different vector instructions.
|
|
|
|
unsigned UF;
|
|
|
|
|
|
|
|
/// The builder that we use
|
|
|
|
IRBuilder<> Builder;
|
|
|
|
|
|
|
|
// --- Vectorization state ---
|
|
|
|
|
|
|
|
/// The vector-loop preheader.
|
|
|
|
BasicBlock *LoopVectorPreHeader;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// The scalar-loop preheader.
|
|
|
|
BasicBlock *LoopScalarPreHeader;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// Middle Block between the vector and the scalar.
|
|
|
|
BasicBlock *LoopMiddleBlock;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
/// The ExitBlock of the scalar loop.
|
2013-01-07 10:44:06 +00:00
|
|
|
BasicBlock *LoopExitBlock;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
/// The vector loop body.
|
2016-05-12 18:44:51 +00:00
|
|
|
BasicBlock *LoopVectorBody;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
/// The scalar loop body.
|
2013-01-07 10:44:06 +00:00
|
|
|
BasicBlock *LoopScalarBody;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-01-19 13:57:58 +00:00
|
|
|
/// A list of all bypass blocks. The first block is the entry of the loop.
|
|
|
|
SmallVector<BasicBlock *, 4> LoopBypassBlocks;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
|
|
|
/// The new Induction variable which was added to the new block.
|
2017-10-12 23:30:03 +00:00
|
|
|
PHINode *Induction = nullptr;
|
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
/// The induction variable of the old basic block.
|
2017-10-12 23:30:03 +00:00
|
|
|
PHINode *OldInduction = nullptr;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
|
|
|
|
/// Maps values from the original loop to their corresponding values in the
|
|
|
|
/// vectorized loop. A key value can map to either vector values, scalar
|
|
|
|
/// values or both kinds of values, depending on whether the key was
|
|
|
|
/// vectorized and scalarized.
|
2017-08-27 12:55:46 +00:00
|
|
|
VectorizerValueMap VectorLoopValueMap;
|
2016-07-14 14:36:06 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
/// Store instructions that were predicated.
|
|
|
|
SmallVector<Instruction *, 4> PredicatedInstructions;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
/// Trip count of the original loop.
|
2017-10-12 23:30:03 +00:00
|
|
|
Value *TripCount = nullptr;
|
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
/// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
|
2017-10-12 23:30:03 +00:00
|
|
|
Value *VectorTripCount = nullptr;
|
2014-01-10 18:20:32 +00:00
|
|
|
|
2016-10-05 19:53:20 +00:00
|
|
|
/// The legality analysis.
|
2014-01-10 18:20:32 +00:00
|
|
|
LoopVectorizationLegality *Legal;
|
2015-03-09 06:14:18 +00:00
|
|
|
|
2016-10-05 20:23:46 +00:00
|
|
|
/// The profitablity analysis.
|
|
|
|
LoopVectorizationCostModel *Cost;
|
|
|
|
|
2016-05-24 16:51:26 +00:00
|
|
|
// Record whether runtime checks are added.
|
2017-10-12 23:30:03 +00:00
|
|
|
bool AddedSafetyChecks = false;
|
2016-10-19 19:22:02 +00:00
|
|
|
|
2017-01-09 19:05:29 +00:00
|
|
|
// Holds the end values for each induction variable. We save the end values
|
|
|
|
// so we can later fix-up the external users of the induction variables.
|
|
|
|
DenseMap<PHINode *, Value *> IVEndValues;
|
2013-01-07 10:44:06 +00:00
|
|
|
};
|
|
|
|
|
2013-08-26 22:33:26 +00:00
|
|
|
class InnerLoopUnroller : public InnerLoopVectorizer {
|
|
|
|
public:
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
|
|
|
|
LoopInfo *LI, DominatorTree *DT,
|
|
|
|
const TargetLibraryInfo *TLI,
|
2016-12-19 08:22:17 +00:00
|
|
|
const TargetTransformInfo *TTI, AssumptionCache *AC,
|
2016-10-05 19:53:20 +00:00
|
|
|
OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
|
2016-10-05 20:23:46 +00:00
|
|
|
LoopVectorizationLegality *LVL,
|
|
|
|
LoopVectorizationCostModel *CM)
|
2016-12-19 08:22:17 +00:00
|
|
|
: InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1,
|
2016-10-05 20:23:46 +00:00
|
|
|
UnrollFactor, LVL, CM) {}
|
2013-08-26 22:33:26 +00:00
|
|
|
|
|
|
|
private:
|
2014-03-05 09:10:37 +00:00
|
|
|
Value *getBroadcastInstrs(Value *V) override;
|
2016-07-24 07:24:54 +00:00
|
|
|
Value *getStepVector(Value *Val, int StartIdx, Value *Step,
|
|
|
|
Instruction::BinaryOps Opcode =
|
|
|
|
Instruction::BinaryOpsEnd) override;
|
2014-03-05 09:10:37 +00:00
|
|
|
Value *reverseVector(Value *Vec) override;
|
2013-08-26 22:33:26 +00:00
|
|
|
};
|
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2013-06-28 00:38:54 +00:00
|
|
|
/// \brief Look for a meaningful debug location on the instruction or it's
|
|
|
|
/// operands.
|
|
|
|
static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
|
|
|
|
if (!I)
|
|
|
|
return I;
|
|
|
|
|
|
|
|
DebugLoc Empty;
|
|
|
|
if (I->getDebugLoc() != Empty)
|
|
|
|
return I;
|
|
|
|
|
|
|
|
for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) {
|
|
|
|
if (Instruction *OpInst = dyn_cast<Instruction>(*OI))
|
|
|
|
if (OpInst->getDebugLoc() != Empty)
|
|
|
|
return OpInst;
|
|
|
|
}
|
|
|
|
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
2017-02-10 21:09:07 +00:00
|
|
|
void InnerLoopVectorizer::setDebugLocFromInst(IRBuilder<> &B, const Value *Ptr) {
|
|
|
|
if (const Instruction *Inst = dyn_cast_or_null<Instruction>(Ptr)) {
|
|
|
|
const DILocation *DIL = Inst->getDebugLoc();
|
2017-10-26 21:20:52 +00:00
|
|
|
if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
|
|
|
|
!isa<DbgInfoIntrinsic>(Inst))
|
2017-02-10 21:09:07 +00:00
|
|
|
B.SetCurrentDebugLocation(DIL->cloneWithDuplicationFactor(UF * VF));
|
|
|
|
else
|
|
|
|
B.SetCurrentDebugLocation(DIL);
|
|
|
|
} else
|
2013-06-28 16:26:54 +00:00
|
|
|
B.SetCurrentDebugLocation(DebugLoc());
|
|
|
|
}
|
2014-04-07 12:46:30 +00:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2014-05-20 08:26:20 +00:00
|
|
|
/// \return string containing a file name and a line # for the given loop.
|
|
|
|
static std::string getDebugLocString(const Loop *L) {
|
2014-06-26 22:52:05 +00:00
|
|
|
std::string Result;
|
|
|
|
if (L) {
|
|
|
|
raw_string_ostream OS(Result);
|
2015-03-30 19:49:49 +00:00
|
|
|
if (const DebugLoc LoopDbgLoc = L->getStartLoc())
|
2015-02-26 23:32:17 +00:00
|
|
|
LoopDbgLoc.print(OS);
|
2014-06-26 22:52:05 +00:00
|
|
|
else
|
|
|
|
// Just print the module name.
|
|
|
|
OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
|
|
|
|
OS.flush();
|
|
|
|
}
|
|
|
|
return Result;
|
2014-04-07 12:32:17 +00:00
|
|
|
}
|
2014-04-07 12:46:30 +00:00
|
|
|
#endif
|
|
|
|
|
2016-03-17 20:32:37 +00:00
|
|
|
void InnerLoopVectorizer::addNewMetadata(Instruction *To,
|
|
|
|
const Instruction *Orig) {
|
|
|
|
// If the loop was versioned with memchecks, add the corresponding no-alias
|
|
|
|
// metadata.
|
|
|
|
if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
|
|
|
|
LVer->annotateInstWithNoAlias(To, Orig);
|
|
|
|
}
|
|
|
|
|
|
|
|
void InnerLoopVectorizer::addMetadata(Instruction *To,
|
2016-06-30 21:17:59 +00:00
|
|
|
Instruction *From) {
|
2016-03-17 20:32:37 +00:00
|
|
|
propagateMetadata(To, From);
|
|
|
|
addNewMetadata(To, From);
|
|
|
|
}
|
|
|
|
|
2016-06-30 21:17:59 +00:00
|
|
|
void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
|
|
|
|
Instruction *From) {
|
|
|
|
for (Value *V : To) {
|
2014-07-19 13:33:16 +00:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
2016-03-17 20:32:37 +00:00
|
|
|
addMetadata(I, From);
|
2016-06-30 21:17:59 +00:00
|
|
|
}
|
2014-07-19 13:33:16 +00:00
|
|
|
}
|
|
|
|
|
2017-12-16 01:12:50 +00:00
|
|
|
namespace llvm {
|
2017-08-27 12:55:46 +00:00
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
/// \brief The group of interleaved loads/stores sharing the same stride and
|
|
|
|
/// close to each other.
|
|
|
|
///
|
|
|
|
/// Each member in this group has an index starting from 0, and the largest
|
|
|
|
/// index should be less than interleaved factor, which is equal to the absolute
|
|
|
|
/// value of the access's stride.
|
|
|
|
///
|
|
|
|
/// E.g. An interleaved load group of factor 4:
|
|
|
|
/// for (unsigned i = 0; i < 1024; i+=4) {
|
|
|
|
/// a = A[i]; // Member of index 0
|
|
|
|
/// b = A[i+1]; // Member of index 1
|
|
|
|
/// d = A[i+3]; // Member of index 3
|
|
|
|
/// ...
|
|
|
|
/// }
|
|
|
|
///
|
|
|
|
/// An interleaved store group of factor 4:
|
|
|
|
/// for (unsigned i = 0; i < 1024; i+=4) {
|
|
|
|
/// ...
|
|
|
|
/// A[i] = a; // Member of index 0
|
|
|
|
/// A[i+1] = b; // Member of index 1
|
|
|
|
/// A[i+2] = c; // Member of index 2
|
|
|
|
/// A[i+3] = d; // Member of index 3
|
|
|
|
/// }
|
|
|
|
///
|
|
|
|
/// Note: the interleaved load group could have gaps (missing members), but
|
|
|
|
/// the interleaved store group doesn't allow gaps.
|
|
|
|
class InterleaveGroup {
|
|
|
|
public:
|
|
|
|
InterleaveGroup(Instruction *Instr, int Stride, unsigned Align)
|
2017-10-12 23:30:03 +00:00
|
|
|
: Align(Align), InsertPos(Instr) {
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
assert(Align && "The alignment should be non-zero");
|
|
|
|
|
|
|
|
Factor = std::abs(Stride);
|
|
|
|
assert(Factor > 1 && "Invalid interleave factor");
|
|
|
|
|
|
|
|
Reverse = Stride < 0;
|
|
|
|
Members[0] = Instr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isReverse() const { return Reverse; }
|
|
|
|
unsigned getFactor() const { return Factor; }
|
|
|
|
unsigned getAlignment() const { return Align; }
|
|
|
|
unsigned getNumMembers() const { return Members.size(); }
|
|
|
|
|
|
|
|
/// \brief Try to insert a new member \p Instr with index \p Index and
|
|
|
|
/// alignment \p NewAlign. The index is related to the leader and it could be
|
|
|
|
/// negative if it is the new leader.
|
|
|
|
///
|
|
|
|
/// \returns false if the instruction doesn't belong to the group.
|
|
|
|
bool insertMember(Instruction *Instr, int Index, unsigned NewAlign) {
|
|
|
|
assert(NewAlign && "The new member's alignment should be non-zero");
|
|
|
|
|
|
|
|
int Key = Index + SmallestKey;
|
|
|
|
|
|
|
|
// Skip if there is already a member with the same index.
|
|
|
|
if (Members.count(Key))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Key > LargestKey) {
|
|
|
|
// The largest index is always less than the interleave factor.
|
|
|
|
if (Index >= static_cast<int>(Factor))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
LargestKey = Key;
|
|
|
|
} else if (Key < SmallestKey) {
|
|
|
|
// The largest index is always less than the interleave factor.
|
|
|
|
if (LargestKey - Key >= static_cast<int>(Factor))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
SmallestKey = Key;
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's always safe to select the minimum alignment.
|
|
|
|
Align = std::min(Align, NewAlign);
|
|
|
|
Members[Key] = Instr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the member with the given index \p Index
|
|
|
|
///
|
|
|
|
/// \returns nullptr if contains no such member.
|
|
|
|
Instruction *getMember(unsigned Index) const {
|
|
|
|
int Key = SmallestKey + Index;
|
|
|
|
if (!Members.count(Key))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return Members.find(Key)->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the index for the given member. Unlike the key in the member
|
|
|
|
/// map, the index starts from 0.
|
|
|
|
unsigned getIndex(Instruction *Instr) const {
|
|
|
|
for (auto I : Members)
|
|
|
|
if (I.second == Instr)
|
|
|
|
return I.first - SmallestKey;
|
|
|
|
|
|
|
|
llvm_unreachable("InterleaveGroup contains no such member");
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *getInsertPos() const { return InsertPos; }
|
|
|
|
void setInsertPos(Instruction *Inst) { InsertPos = Inst; }
|
|
|
|
|
2017-12-06 22:42:24 +00:00
|
|
|
/// Add metadata (e.g. alias info) from the instructions in this group to \p
|
|
|
|
/// NewInst.
|
|
|
|
///
|
|
|
|
/// FIXME: this function currently does not add noalias metadata a'la
|
|
|
|
/// addNewMedata. To do that we need to compute the intersection of the
|
|
|
|
/// noalias info from all members.
|
|
|
|
void addMetadata(Instruction *NewInst) const {
|
|
|
|
SmallVector<Value *, 4> VL;
|
|
|
|
std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
|
|
|
|
[](std::pair<int, Instruction *> p) { return p.second; });
|
|
|
|
propagateMetadata(NewInst, VL);
|
|
|
|
}
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
private:
|
|
|
|
unsigned Factor; // Interleave Factor.
|
|
|
|
bool Reverse;
|
|
|
|
unsigned Align;
|
|
|
|
DenseMap<int, Instruction *> Members;
|
2017-10-12 23:30:03 +00:00
|
|
|
int SmallestKey = 0;
|
|
|
|
int LargestKey = 0;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// To avoid breaking dependences, vectorized instructions of an interleave
|
|
|
|
// group should be inserted at either the first load or the last store in
|
|
|
|
// program order.
|
|
|
|
//
|
|
|
|
// E.g. %even = load i32 // Insert Position
|
|
|
|
// %add = add i32 %even // Use of %even
|
|
|
|
// %odd = load i32
|
|
|
|
//
|
|
|
|
// store i32 %even
|
|
|
|
// %odd = add i32 // Def of %odd
|
|
|
|
// store i32 %odd // Insert Position
|
|
|
|
Instruction *InsertPos;
|
|
|
|
};
|
2017-12-16 01:12:50 +00:00
|
|
|
} // end namespace llvm
|
|
|
|
|
|
|
|
namespace {
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
/// \brief Drive the analysis of interleaved memory accesses in the loop.
|
|
|
|
///
|
|
|
|
/// Use this class to analyze interleaved accesses only when we can vectorize
|
|
|
|
/// a loop. Otherwise it's meaningless to do analysis as the vectorization
|
|
|
|
/// on interleaved accesses is unsafe.
|
|
|
|
///
|
|
|
|
/// The analysis collects interleave groups and records the relationships
|
|
|
|
/// between the member and the group in a map.
|
|
|
|
class InterleavedAccessInfo {
|
|
|
|
public:
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
|
2016-06-24 15:33:25 +00:00
|
|
|
DominatorTree *DT, LoopInfo *LI)
|
2017-10-12 23:30:03 +00:00
|
|
|
: PSE(PSE), TheLoop(L), DT(DT), LI(LI) {}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
~InterleavedAccessInfo() {
|
|
|
|
SmallSet<InterleaveGroup *, 4> DelSet;
|
|
|
|
// Avoid releasing a pointer twice.
|
|
|
|
for (auto &I : InterleaveGroupMap)
|
|
|
|
DelSet.insert(I.second);
|
|
|
|
for (auto *Ptr : DelSet)
|
|
|
|
delete Ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Analyze the interleaved accesses and collect them in interleave
|
|
|
|
/// groups. Substitute symbolic strides using \p Strides.
|
|
|
|
void analyzeInterleaving(const ValueToValueMap &Strides);
|
|
|
|
|
|
|
|
/// \brief Check if \p Instr belongs to any interleave group.
|
|
|
|
bool isInterleaved(Instruction *Instr) const {
|
|
|
|
return InterleaveGroupMap.count(Instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Get the interleave group that \p Instr belongs to.
|
|
|
|
///
|
|
|
|
/// \returns nullptr if doesn't have such group.
|
|
|
|
InterleaveGroup *getInterleaveGroup(Instruction *Instr) const {
|
|
|
|
if (InterleaveGroupMap.count(Instr))
|
|
|
|
return InterleaveGroupMap.find(Instr)->second;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-04-27 18:21:36 +00:00
|
|
|
/// \brief Returns true if an interleaved group that may access memory
|
|
|
|
/// out-of-bounds requires a scalar epilogue iteration for correctness.
|
|
|
|
bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
/// \brief Initialize the LoopAccessInfo used for dependence checking.
|
|
|
|
void setLAI(const LoopAccessInfo *Info) { LAI = Info; }
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
private:
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
/// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
|
|
|
|
/// Simplifies SCEV expressions in the context of existing SCEV assumptions.
|
|
|
|
/// The interleaved access analysis can also add new predicates (for example
|
|
|
|
/// by versioning strides of pointers).
|
|
|
|
PredicatedScalarEvolution &PSE;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
Loop *TheLoop;
|
|
|
|
DominatorTree *DT;
|
2016-06-24 15:33:25 +00:00
|
|
|
LoopInfo *LI;
|
2017-10-12 23:30:03 +00:00
|
|
|
const LoopAccessInfo *LAI = nullptr;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-04-27 18:21:36 +00:00
|
|
|
/// True if the loop may contain non-reversed interleaved groups with
|
|
|
|
/// out-of-bounds accesses. We ensure we don't speculatively access memory
|
|
|
|
/// out-of-bounds by executing at least one scalar epilogue iteration.
|
2017-10-12 23:30:03 +00:00
|
|
|
bool RequiresScalarEpilogue = false;
|
2016-04-27 18:21:36 +00:00
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
/// Holds the relationships between the members and the interleave group.
|
|
|
|
DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
/// Holds dependences among the memory accesses in the loop. It maps a source
|
|
|
|
/// access to a set of dependent sink accesses.
|
|
|
|
DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
/// \brief The descriptor for a strided memory access.
|
|
|
|
struct StrideDescriptor {
|
2017-10-12 23:30:03 +00:00
|
|
|
StrideDescriptor() = default;
|
2016-07-12 19:35:15 +00:00
|
|
|
StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
unsigned Align)
|
|
|
|
: Stride(Stride), Scev(Scev), Size(Size), Align(Align) {}
|
|
|
|
|
2016-07-12 19:35:15 +00:00
|
|
|
// The access's stride. It is negative for a reverse access.
|
|
|
|
int64_t Stride = 0;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
|
|
|
// The scalar expression of this access.
|
|
|
|
const SCEV *Scev = nullptr;
|
|
|
|
|
|
|
|
// The size of the memory object.
|
|
|
|
uint64_t Size = 0;
|
|
|
|
|
|
|
|
// The alignment of this access.
|
|
|
|
unsigned Align = 0;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
};
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
/// \brief A type for holding instructions and their stride descriptors.
|
2017-10-12 23:30:03 +00:00
|
|
|
using StrideEntry = std::pair<Instruction *, StrideDescriptor>;
|
2016-06-24 15:33:25 +00:00
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
/// \brief Create a new interleave group with the given instruction \p Instr,
|
|
|
|
/// stride \p Stride and alignment \p Align.
|
|
|
|
///
|
|
|
|
/// \returns the newly created interleave group.
|
|
|
|
InterleaveGroup *createInterleaveGroup(Instruction *Instr, int Stride,
|
|
|
|
unsigned Align) {
|
|
|
|
assert(!InterleaveGroupMap.count(Instr) &&
|
|
|
|
"Already in an interleaved access group");
|
|
|
|
InterleaveGroupMap[Instr] = new InterleaveGroup(Instr, Stride, Align);
|
|
|
|
return InterleaveGroupMap[Instr];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Release the group and remove all the relationships.
|
|
|
|
void releaseGroup(InterleaveGroup *Group) {
|
|
|
|
for (unsigned i = 0; i < Group->getFactor(); i++)
|
|
|
|
if (Instruction *Member = Group->getMember(i))
|
|
|
|
InterleaveGroupMap.erase(Member);
|
|
|
|
|
|
|
|
delete Group;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Collect all the accesses with a constant stride in program order.
|
2016-07-14 21:05:08 +00:00
|
|
|
void collectConstStrideAccesses(
|
|
|
|
MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
const ValueToValueMap &Strides);
|
2016-06-24 15:33:25 +00:00
|
|
|
|
|
|
|
/// \brief Returns true if \p Stride is allowed in an interleaved group.
|
|
|
|
static bool isStrided(int Stride) {
|
|
|
|
unsigned Factor = std::abs(Stride);
|
|
|
|
return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
|
|
|
|
}
|
|
|
|
|
2016-07-14 20:59:47 +00:00
|
|
|
/// \brief Returns true if \p BB is a predicated block.
|
|
|
|
bool isPredicated(BasicBlock *BB) const {
|
|
|
|
return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
|
|
|
|
}
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
/// \brief Returns true if LoopAccessInfo can be used for dependence queries.
|
|
|
|
bool areDependencesValid() const {
|
|
|
|
return LAI && LAI->getDepChecker().getDependences();
|
|
|
|
}
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
/// \brief Returns true if memory accesses \p A and \p B can be reordered, if
|
2016-06-24 15:33:25 +00:00
|
|
|
/// necessary, when constructing interleaved groups.
|
|
|
|
///
|
2016-07-15 15:22:43 +00:00
|
|
|
/// \p A must precede \p B in program order. We return false if reordering is
|
|
|
|
/// not necessary or is prevented because \p A and \p B may be dependent.
|
|
|
|
bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
|
|
|
|
StrideEntry *B) const {
|
2016-06-24 15:33:25 +00:00
|
|
|
// Code motion for interleaved accesses can potentially hoist strided loads
|
|
|
|
// and sink strided stores. The code below checks the legality of the
|
|
|
|
// following two conditions:
|
|
|
|
//
|
2016-07-15 15:22:43 +00:00
|
|
|
// 1. Potentially moving a strided load (B) before any store (A) that
|
|
|
|
// precedes B, or
|
2016-06-24 15:33:25 +00:00
|
|
|
//
|
2016-07-15 15:22:43 +00:00
|
|
|
// 2. Potentially moving a strided store (A) after any load or store (B)
|
|
|
|
// that A precedes.
|
2016-06-24 15:33:25 +00:00
|
|
|
//
|
2016-07-15 15:22:43 +00:00
|
|
|
// It's legal to reorder A and B if we know there isn't a dependence from A
|
|
|
|
// to B. Note that this determination is conservative since some
|
2016-06-24 15:33:25 +00:00
|
|
|
// dependences could potentially be reordered safely.
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// A is potentially the source of a dependence.
|
|
|
|
auto *Src = A->first;
|
|
|
|
auto SrcDes = A->second;
|
2016-06-24 15:33:25 +00:00
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// B is potentially the sink of a dependence.
|
|
|
|
auto *Sink = B->first;
|
|
|
|
auto SinkDes = B->second;
|
2016-06-24 15:33:25 +00:00
|
|
|
|
|
|
|
// Code motion for interleaved accesses can't violate WAR dependences.
|
|
|
|
// Thus, reordering is legal if the source isn't a write.
|
|
|
|
if (!Src->mayWriteToMemory())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// At least one of the accesses must be strided.
|
|
|
|
if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If dependence information is not available from LoopAccessInfo,
|
|
|
|
// conservatively assume the instructions can't be reordered.
|
|
|
|
if (!areDependencesValid())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If we know there is a dependence from source to sink, assume the
|
|
|
|
// instructions can't be reordered. Otherwise, reordering is legal.
|
|
|
|
return !Dependences.count(Src) || !Dependences.lookup(Src).count(Sink);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Collect the dependences from LoopAccessInfo.
|
|
|
|
///
|
|
|
|
/// We process the dependences once during the interleaved access analysis to
|
|
|
|
/// enable constant-time dependence queries.
|
|
|
|
void collectDependences() {
|
|
|
|
if (!areDependencesValid())
|
|
|
|
return;
|
|
|
|
auto *Deps = LAI->getDepChecker().getDependences();
|
|
|
|
for (auto Dep : *Deps)
|
|
|
|
Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
|
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
};
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Utility class for getting and setting loop vectorizer hints in the form
|
|
|
|
/// of loop metadata.
|
|
|
|
/// This class keeps a number of loop annotations locally (as member variables)
|
|
|
|
/// and can, upon request, write them back as metadata on the loop. It will
|
|
|
|
/// initially scan the loop for existing metadata, and will update the local
|
|
|
|
/// values based on information in the loop.
|
|
|
|
/// We cannot write all values to metadata, as the mere presence of some info,
|
|
|
|
/// for example 'force', means a decision has been made. So, we need to be
|
|
|
|
/// careful NOT to add them if the user hasn't specifically asked so.
|
|
|
|
class LoopVectorizeHints {
|
2017-08-20 10:32:41 +00:00
|
|
|
enum HintKind { HK_WIDTH, HK_UNROLL, HK_FORCE, HK_ISVECTORIZED };
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Hint - associates name and validation with the hint value.
|
|
|
|
struct Hint {
|
2016-05-05 00:54:54 +00:00
|
|
|
const char *Name;
|
2015-08-11 00:52:54 +00:00
|
|
|
unsigned Value; // This may have to change for non-numeric values.
|
|
|
|
HintKind Kind;
|
2015-01-30 05:02:21 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Hint(const char *Name, unsigned Value, HintKind Kind)
|
|
|
|
: Name(Name), Value(Value), Kind(Kind) {}
|
2015-01-30 05:02:21 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
bool validate(unsigned Val) {
|
|
|
|
switch (Kind) {
|
|
|
|
case HK_WIDTH:
|
|
|
|
return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
|
|
|
|
case HK_UNROLL:
|
|
|
|
return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
|
|
|
|
case HK_FORCE:
|
|
|
|
return (Val <= 1);
|
2017-08-20 10:32:41 +00:00
|
|
|
case HK_ISVECTORIZED:
|
|
|
|
return (Val==0 || Val==1);
|
2015-01-30 05:02:21 +00:00
|
|
|
}
|
2015-08-11 00:52:54 +00:00
|
|
|
return false;
|
2015-01-30 05:02:21 +00:00
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
};
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vectorization width.
|
|
|
|
Hint Width;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vectorization interleave factor.
|
|
|
|
Hint Interleave;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vectorization forced
|
|
|
|
Hint Force;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2017-08-20 10:32:41 +00:00
|
|
|
/// Already Vectorized
|
|
|
|
Hint IsVectorized;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Return the loop metadata prefix.
|
|
|
|
static StringRef Prefix() { return "llvm.loop."; }
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2016-04-14 20:42:18 +00:00
|
|
|
/// True if there is any unsafe math in the loop.
|
2017-10-12 23:30:03 +00:00
|
|
|
bool PotentiallyUnsafe = false;
|
2016-04-14 20:42:18 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
public:
|
|
|
|
enum ForceKind {
|
|
|
|
FK_Undefined = -1, ///< Not selected.
|
|
|
|
FK_Disabled = 0, ///< Forcing disabled.
|
|
|
|
FK_Enabled = 1, ///< Forcing enabled.
|
|
|
|
};
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
LoopVectorizeHints(const Loop *L, bool DisableInterleaving,
|
|
|
|
OptimizationRemarkEmitter &ORE)
|
2015-08-11 00:52:54 +00:00
|
|
|
: Width("vectorize.width", VectorizerParams::VectorizationFactor,
|
|
|
|
HK_WIDTH),
|
|
|
|
Interleave("interleave.count", DisableInterleaving, HK_UNROLL),
|
|
|
|
Force("vectorize.enable", FK_Undefined, HK_FORCE),
|
2017-10-12 23:30:03 +00:00
|
|
|
IsVectorized("isvectorized", 0, HK_ISVECTORIZED), TheLoop(L), ORE(ORE) {
|
2015-08-11 00:52:54 +00:00
|
|
|
// Populate values with existing loop metadata.
|
|
|
|
getHintsFromMetadata();
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// force-vector-interleave overrides DisableInterleaving.
|
|
|
|
if (VectorizerParams::isInterleaveForced())
|
|
|
|
Interleave.Value = VectorizerParams::VectorizationInterleave;
|
2013-05-11 23:04:28 +00:00
|
|
|
|
2017-08-20 10:32:41 +00:00
|
|
|
if (IsVectorized.Value != 1)
|
|
|
|
// If the vectorization width and interleaving count are both 1 then
|
|
|
|
// consider the loop to have been already vectorized because there's
|
|
|
|
// nothing more that we can do.
|
|
|
|
IsVectorized.Value = Width.Value == 1 && Interleave.Value == 1;
|
2015-08-11 00:52:54 +00:00
|
|
|
DEBUG(if (DisableInterleaving && Interleave.Value == 1) dbgs()
|
|
|
|
<< "LV: Interleaving disabled by the pass manager\n");
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Mark the loop L as already vectorized by setting the width to 1.
|
|
|
|
void setAlreadyVectorized() {
|
2017-08-20 10:32:41 +00:00
|
|
|
IsVectorized.Value = 1;
|
|
|
|
Hint Hints[] = {IsVectorized};
|
2015-08-11 00:52:54 +00:00
|
|
|
writeHintsToMetadata(Hints);
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
bool allowVectorization(Function *F, Loop *L, bool AlwaysVectorize) const {
|
|
|
|
if (getForce() == LoopVectorizeHints::FK_Disabled) {
|
|
|
|
DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
|
2016-09-29 16:23:12 +00:00
|
|
|
emitRemarkWithHints();
|
2015-08-11 00:52:54 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
if (!AlwaysVectorize && getForce() != LoopVectorizeHints::FK_Enabled) {
|
|
|
|
DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
|
2016-09-29 16:23:12 +00:00
|
|
|
emitRemarkWithHints();
|
2015-08-11 00:52:54 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2017-08-20 10:32:41 +00:00
|
|
|
if (getIsVectorized() == 1) {
|
2015-08-11 00:52:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
|
|
|
|
// FIXME: Add interleave.disable metadata. This will allow
|
|
|
|
// vectorize.disable to be used without disabling the pass and errors
|
|
|
|
// to differentiate between disabled vectorization and a width of 1.
|
2017-09-19 23:00:55 +00:00
|
|
|
ORE.emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
|
2016-09-30 00:29:25 +00:00
|
|
|
"AllDisabled", L->getStartLoc(),
|
|
|
|
L->getHeader())
|
|
|
|
<< "loop not vectorized: vectorization and interleaving are "
|
2017-08-20 10:32:41 +00:00
|
|
|
"explicitly disabled, or the loop has already been "
|
2017-09-19 23:00:55 +00:00
|
|
|
"vectorized";
|
|
|
|
});
|
2015-08-11 00:52:54 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
return true;
|
2015-02-01 16:56:04 +00:00
|
|
|
}
|
2013-04-19 21:03:36 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Dumps all the hint information.
|
2016-09-29 16:23:12 +00:00
|
|
|
void emitRemarkWithHints() const {
|
|
|
|
using namespace ore;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE.emit([&]() {
|
|
|
|
if (Force.Value == LoopVectorizeHints::FK_Disabled)
|
|
|
|
return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
|
2016-09-29 16:23:12 +00:00
|
|
|
TheLoop->getStartLoc(),
|
|
|
|
TheLoop->getHeader())
|
2017-10-11 17:12:59 +00:00
|
|
|
<< "loop not vectorized: vectorization is explicitly disabled";
|
|
|
|
else {
|
|
|
|
OptimizationRemarkMissed R(LV_NAME, "MissedDetails",
|
|
|
|
TheLoop->getStartLoc(),
|
|
|
|
TheLoop->getHeader());
|
|
|
|
R << "loop not vectorized";
|
|
|
|
if (Force.Value == LoopVectorizeHints::FK_Enabled) {
|
|
|
|
R << " (Force=" << NV("Force", true);
|
|
|
|
if (Width.Value != 0)
|
|
|
|
R << ", Vector Width=" << NV("VectorWidth", Width.Value);
|
|
|
|
if (Interleave.Value != 0)
|
|
|
|
R << ", Interleave Count="
|
|
|
|
<< NV("InterleaveCount", Interleave.Value);
|
|
|
|
R << ")";
|
|
|
|
}
|
|
|
|
return R;
|
2015-08-11 00:52:54 +00:00
|
|
|
}
|
2017-10-11 17:12:59 +00:00
|
|
|
});
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
unsigned getWidth() const { return Width.Value; }
|
|
|
|
unsigned getInterleave() const { return Interleave.Value; }
|
2017-08-20 10:32:41 +00:00
|
|
|
unsigned getIsVectorized() const { return IsVectorized.Value; }
|
2015-08-11 00:52:54 +00:00
|
|
|
enum ForceKind getForce() const { return (ForceKind)Force.Value; }
|
2016-06-29 22:04:10 +00:00
|
|
|
|
|
|
|
/// \brief If hints are provided that force vectorization, use the AlwaysPrint
|
|
|
|
/// pass name to force the frontend to print the diagnostic.
|
2015-08-27 01:02:04 +00:00
|
|
|
const char *vectorizeAnalysisPassName() const {
|
|
|
|
if (getWidth() == 1)
|
|
|
|
return LV_NAME;
|
|
|
|
if (getForce() == LoopVectorizeHints::FK_Disabled)
|
|
|
|
return LV_NAME;
|
|
|
|
if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth() == 0)
|
|
|
|
return LV_NAME;
|
2016-09-27 22:19:23 +00:00
|
|
|
return OptimizationRemarkAnalysis::AlwaysPrint;
|
2015-08-11 01:09:15 +00:00
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2015-08-27 18:56:49 +00:00
|
|
|
bool allowReordering() const {
|
|
|
|
// When enabling loop hints are provided we allow the vectorizer to change
|
|
|
|
// the order of operations that is given by the scalar loop. This is not
|
|
|
|
// enabled by default because can be unsafe or inefficient. For example,
|
|
|
|
// reordering floating-point operations will change the way round-off
|
|
|
|
// error accumulates in the loop.
|
|
|
|
return getForce() == LoopVectorizeHints::FK_Enabled || getWidth() > 1;
|
|
|
|
}
|
|
|
|
|
2016-04-14 20:42:18 +00:00
|
|
|
bool isPotentiallyUnsafe() const {
|
|
|
|
// Avoid FP vectorization if the target is unsure about proper support.
|
|
|
|
// This may be related to the SIMD unit in the target not handling
|
|
|
|
// IEEE 754 FP ops properly, or bad single-to-double promotions.
|
|
|
|
// Otherwise, a sequence of vectorized loops, even without reduction,
|
|
|
|
// could lead to different end results on the destination vectors.
|
|
|
|
return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
|
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
|
2016-04-14 20:42:18 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
private:
|
|
|
|
/// Find hints specified in the loop metadata and update local values.
|
|
|
|
void getHintsFromMetadata() {
|
|
|
|
MDNode *LoopID = TheLoop->getLoopID();
|
|
|
|
if (!LoopID)
|
|
|
|
return;
|
2013-06-24 12:09:15 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// First operand should refer to the loop id itself.
|
|
|
|
assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
|
|
|
|
assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
|
2014-01-10 18:20:32 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
|
|
|
|
const MDString *S = nullptr;
|
|
|
|
SmallVector<Metadata *, 4> Args;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// The expected hint is either a MDString or a MDNode with the first
|
|
|
|
// operand a MDString.
|
|
|
|
if (const MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i))) {
|
|
|
|
if (!MD || MD->getNumOperands() == 0)
|
|
|
|
continue;
|
|
|
|
S = dyn_cast<MDString>(MD->getOperand(0));
|
|
|
|
for (unsigned i = 1, ie = MD->getNumOperands(); i < ie; ++i)
|
|
|
|
Args.push_back(MD->getOperand(i));
|
|
|
|
} else {
|
|
|
|
S = dyn_cast<MDString>(LoopID->getOperand(i));
|
|
|
|
assert(Args.size() == 0 && "too many arguments for MDString");
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
if (!S)
|
|
|
|
continue;
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// Check if the hint starts with the loop metadata prefix.
|
|
|
|
StringRef Name = S->getString();
|
|
|
|
if (Args.size() == 1)
|
|
|
|
setHint(Name, Args[0]);
|
|
|
|
}
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Checks string hint with one operand and set value if valid.
|
|
|
|
void setHint(StringRef Name, Metadata *Arg) {
|
|
|
|
if (!Name.startswith(Prefix()))
|
|
|
|
return;
|
|
|
|
Name = Name.substr(Prefix().size(), StringRef::npos);
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
|
2016-05-05 00:54:54 +00:00
|
|
|
if (!C)
|
|
|
|
return;
|
2015-08-11 00:52:54 +00:00
|
|
|
unsigned Val = C->getZExtValue();
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2017-08-20 10:32:41 +00:00
|
|
|
Hint *Hints[] = {&Width, &Interleave, &Force, &IsVectorized};
|
2015-08-11 00:52:54 +00:00
|
|
|
for (auto H : Hints) {
|
|
|
|
if (Name == H->Name) {
|
|
|
|
if (H->validate(Val))
|
|
|
|
H->Value = Val;
|
|
|
|
else
|
|
|
|
DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-01-10 18:20:32 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Create a new hint from name / value pair.
|
|
|
|
MDNode *createHintMetadata(StringRef Name, unsigned V) const {
|
|
|
|
LLVMContext &Context = TheLoop->getHeader()->getContext();
|
|
|
|
Metadata *MDs[] = {MDString::get(Context, Name),
|
|
|
|
ConstantAsMetadata::get(
|
|
|
|
ConstantInt::get(Type::getInt32Ty(Context), V))};
|
|
|
|
return MDNode::get(Context, MDs);
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Matches metadata with hint name.
|
|
|
|
bool matchesHintMetadataName(MDNode *Node, ArrayRef<Hint> HintTypes) {
|
2016-05-05 00:54:54 +00:00
|
|
|
MDString *Name = dyn_cast<MDString>(Node->getOperand(0));
|
2015-08-11 00:52:54 +00:00
|
|
|
if (!Name)
|
|
|
|
return false;
|
2015-02-01 16:56:02 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
for (auto H : HintTypes)
|
|
|
|
if (Name->getString().endswith(H.Name))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Sets current hints into loop metadata, keeping other values intact.
|
|
|
|
void writeHintsToMetadata(ArrayRef<Hint> HintTypes) {
|
2017-10-12 23:30:03 +00:00
|
|
|
if (HintTypes.empty())
|
2015-08-11 00:52:54 +00:00
|
|
|
return;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// Reserve the first element to LoopID (see below).
|
|
|
|
SmallVector<Metadata *, 4> MDs(1);
|
|
|
|
// If the loop already has metadata, then ignore the existing operands.
|
|
|
|
MDNode *LoopID = TheLoop->getLoopID();
|
|
|
|
if (LoopID) {
|
|
|
|
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
|
|
|
|
MDNode *Node = cast<MDNode>(LoopID->getOperand(i));
|
|
|
|
// If node in update list, ignore old value.
|
|
|
|
if (!matchesHintMetadataName(Node, HintTypes))
|
|
|
|
MDs.push_back(Node);
|
|
|
|
}
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// Now, add the missing hints.
|
|
|
|
for (auto H : HintTypes)
|
|
|
|
MDs.push_back(createHintMetadata(Twine(Prefix(), H.Name).str(), H.Value));
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// Replace current metadata node with new one.
|
|
|
|
LLVMContext &Context = TheLoop->getHeader()->getContext();
|
|
|
|
MDNode *NewLoopID = MDNode::get(Context, MDs);
|
|
|
|
// Set operand 0 to refer to the loop id itself.
|
|
|
|
NewLoopID->replaceOperandWith(0, NewLoopID);
|
2015-02-19 19:15:04 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
TheLoop->setLoopID(NewLoopID);
|
|
|
|
}
|
2013-05-15 01:44:30 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// The loop these hints belong to.
|
|
|
|
const Loop *TheLoop;
|
2016-07-20 21:44:26 +00:00
|
|
|
|
|
|
|
/// Interface to emit optimization remarks.
|
|
|
|
OptimizationRemarkEmitter &ORE;
|
2015-08-11 00:52:54 +00:00
|
|
|
};
|
2015-08-10 19:51:46 +00:00
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
static void emitMissedWarning(Function *F, Loop *L,
|
2016-07-20 04:03:43 +00:00
|
|
|
const LoopVectorizeHints &LH,
|
|
|
|
OptimizationRemarkEmitter *ORE) {
|
2016-09-29 16:23:12 +00:00
|
|
|
LH.emitRemarkWithHints();
|
2015-03-09 06:14:18 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
if (LH.getForce() == LoopVectorizeHints::FK_Enabled) {
|
|
|
|
if (LH.getWidth() != 1)
|
2017-02-02 05:41:51 +00:00
|
|
|
ORE->emit(DiagnosticInfoOptimizationFailure(
|
|
|
|
DEBUG_TYPE, "FailedRequestedVectorization",
|
|
|
|
L->getStartLoc(), L->getHeader())
|
|
|
|
<< "loop not vectorized: "
|
|
|
|
<< "failed explicitly specified loop vectorization");
|
2015-08-11 00:52:54 +00:00
|
|
|
else if (LH.getInterleave() != 1)
|
2017-02-02 05:41:51 +00:00
|
|
|
ORE->emit(DiagnosticInfoOptimizationFailure(
|
|
|
|
DEBUG_TYPE, "FailedRequestedInterleaving", L->getStartLoc(),
|
|
|
|
L->getHeader())
|
|
|
|
<< "loop not interleaved: "
|
|
|
|
<< "failed explicitly specified loop interleaving");
|
2015-08-11 00:52:54 +00:00
|
|
|
}
|
|
|
|
}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2018-01-07 16:02:58 +00:00
|
|
|
namespace llvm {
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
|
|
|
|
/// to what vectorization factor.
|
|
|
|
/// This class does not look at the profitability of vectorization, only the
|
|
|
|
/// legality. This class has two main kinds of checks:
|
|
|
|
/// * Memory checks - The code in canVectorizeMemory checks if vectorization
|
|
|
|
/// will change the order of memory accesses in a way that will change the
|
|
|
|
/// correctness of the program.
|
|
|
|
/// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
|
|
|
|
/// checks for a number of different conditions, such as the availability of a
|
|
|
|
/// single induction variable, that all types are supported and vectorize-able,
|
|
|
|
/// etc. This code reflects the capabilities of InnerLoopVectorizer.
|
|
|
|
/// This class is also used by InnerLoopVectorizer for identifying
|
|
|
|
/// induction variable and the different reduction variables.
|
|
|
|
class LoopVectorizationLegality {
|
2013-01-07 10:44:06 +00:00
|
|
|
public:
|
2016-07-09 22:56:50 +00:00
|
|
|
LoopVectorizationLegality(
|
|
|
|
Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
|
|
|
|
TargetLibraryInfo *TLI, AliasAnalysis *AA, Function *F,
|
|
|
|
const TargetTransformInfo *TTI,
|
|
|
|
std::function<const LoopAccessInfo &(Loop &)> *GetLAA, LoopInfo *LI,
|
2016-07-20 21:44:26 +00:00
|
|
|
OptimizationRemarkEmitter *ORE, LoopVectorizationRequirements *R,
|
2018-02-04 15:42:24 +00:00
|
|
|
LoopVectorizeHints *H, DemandedBits *DB, AssumptionCache *AC)
|
2017-10-12 23:30:03 +00:00
|
|
|
: TheLoop(L), PSE(PSE), TLI(TLI), TTI(TTI), DT(DT), GetLAA(GetLAA),
|
2018-02-04 15:42:24 +00:00
|
|
|
ORE(ORE), InterleaveInfo(PSE, L, DT, LI), Requirements(R), Hints(H),
|
|
|
|
DB(DB), AC(AC) {}
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// ReductionList contains the reduction descriptors for all
|
|
|
|
/// of the reductions that were found in the loop.
|
2017-10-12 23:30:03 +00:00
|
|
|
using ReductionList = DenseMap<PHINode *, RecurrenceDescriptor>;
|
2013-02-05 15:08:02 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// InductionList saves induction variables and maps them to the
|
|
|
|
/// induction descriptor.
|
2017-10-12 23:30:03 +00:00
|
|
|
using InductionList = MapVector<PHINode *, InductionDescriptor>;
|
2014-08-02 00:14:03 +00:00
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
/// RecurrenceSet contains the phi nodes that are recurrences other than
|
|
|
|
/// inductions and reductions.
|
2017-10-12 23:30:03 +00:00
|
|
|
using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;
|
2016-02-19 17:56:08 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns true if it is legal to vectorize this loop.
|
|
|
|
/// This does not mean that it is profitable to vectorize this
|
|
|
|
/// loop, only that it is legal to do so.
|
|
|
|
bool canVectorize();
|
2014-10-14 22:59:49 +00:00
|
|
|
|
2017-02-14 22:14:01 +00:00
|
|
|
/// Returns the primary induction variable.
|
|
|
|
PHINode *getPrimaryInduction() { return PrimaryInduction; }
|
2013-01-07 10:44:06 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the reduction variables found in the loop.
|
|
|
|
ReductionList *getReductionVars() { return &Reductions; }
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the induction variables found in the loop.
|
|
|
|
InductionList *getInductionVars() { return &Inductions; }
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
/// Return the first-order recurrences found in the loop.
|
|
|
|
RecurrenceSet *getFirstOrderRecurrences() { return &FirstOrderRecurrences; }
|
|
|
|
|
2017-06-30 21:05:06 +00:00
|
|
|
/// Return the set of instructions to sink to handle first-order recurrences.
|
|
|
|
DenseMap<Instruction *, Instruction *> &getSinkAfter() { return SinkAfter; }
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the widest induction type.
|
|
|
|
Type *getWidestInductionType() { return WidestIndTy; }
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
/// Returns True if V is a Phi node of an induction variable in this loop.
|
|
|
|
bool isInductionPhi(const Value *V);
|
|
|
|
|
|
|
|
/// Returns True if V is a cast that is part of an induction def-use chain,
|
|
|
|
/// and had been proven to be redundant under a runtime guard (in other
|
|
|
|
/// words, the cast has the same SCEV expression as the induction phi).
|
|
|
|
bool isCastedInductionVariable(const Value *V);
|
|
|
|
|
|
|
|
/// Returns True if V can be considered as an induction variable in this
|
|
|
|
/// loop. V can be the induction phi, or some redundant cast in the def-use
|
|
|
|
/// chain of the inducion phi.
|
2015-08-11 00:52:54 +00:00
|
|
|
bool isInductionVariable(const Value *V);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-11-19 14:19:06 +00:00
|
|
|
/// Returns True if PN is a reduction variable in this loop.
|
|
|
|
bool isReductionVariable(PHINode *PN) { return Reductions.count(PN); }
|
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
/// Returns True if Phi is a first-order recurrence in this loop.
|
|
|
|
bool isFirstOrderRecurrence(const PHINode *Phi);
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Return true if the block BB needs to be predicated in order for the loop
|
|
|
|
/// to be vectorized.
|
|
|
|
bool blockNeedsPredication(BasicBlock *BB);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2016-05-24 16:51:26 +00:00
|
|
|
/// Check if this pointer is consecutive when vectorizing. This happens
|
2015-08-11 00:52:54 +00:00
|
|
|
/// when the last index of the GEP is the induction variable, or that the
|
|
|
|
/// pointer itself is an induction variable.
|
|
|
|
/// This check allows us to vectorize A[idx] into a wide load/store.
|
|
|
|
/// Returns:
|
|
|
|
/// 0 - Stride is unknown or non-consecutive.
|
|
|
|
/// 1 - Address is consecutive.
|
|
|
|
/// -1 - Address is consecutive, and decreasing.
|
2017-12-16 02:55:24 +00:00
|
|
|
/// NOTE: This method must only be used before modifying the original scalar
|
|
|
|
/// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
|
2015-08-11 00:52:54 +00:00
|
|
|
int isConsecutivePtr(Value *Ptr);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns true if the value V is uniform within the loop.
|
|
|
|
bool isUniform(Value *V);
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the information that we collected about runtime memory check.
|
|
|
|
const RuntimePointerChecking *getRuntimePointerChecking() const {
|
|
|
|
return LAI->getRuntimePointerChecking();
|
|
|
|
}
|
2013-08-28 18:33:10 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
const LoopAccessInfo *getLAI() const { return LAI; }
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// \brief Check if \p Instr belongs to any interleaved access group.
|
|
|
|
bool isAccessInterleaved(Instruction *Instr) {
|
|
|
|
return InterleaveInfo.isInterleaved(Instr);
|
2013-05-28 20:00:34 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// \brief Get the interleaved access group that \p Instr belongs to.
|
|
|
|
const InterleaveGroup *getInterleavedAccessGroup(Instruction *Instr) {
|
|
|
|
return InterleaveInfo.getInterleaveGroup(Instr);
|
|
|
|
}
|
2015-08-11 00:35:44 +00:00
|
|
|
|
2016-04-27 18:21:36 +00:00
|
|
|
/// \brief Returns true if an interleaved group requires a scalar iteration
|
|
|
|
/// to handle accesses with gaps.
|
|
|
|
bool requiresScalarEpilogue() const {
|
|
|
|
return InterleaveInfo.requiresScalarEpilogue();
|
|
|
|
}
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
|
2015-08-11 00:35:44 +00:00
|
|
|
|
2017-09-14 07:40:02 +00:00
|
|
|
uint64_t getMaxSafeRegisterWidth() const {
|
|
|
|
return LAI->getDepChecker().getMaxSafeRegisterWidth();
|
|
|
|
}
|
|
|
|
|
2016-06-16 22:57:55 +00:00
|
|
|
bool hasStride(Value *V) { return LAI->hasStride(V); }
|
2015-08-11 00:35:44 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns true if vector representation of the instruction \p I
|
|
|
|
/// requires mask.
|
2016-05-05 00:54:54 +00:00
|
|
|
bool isMaskRequired(const Instruction *I) { return (MaskedOp.count(I) != 0); }
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
unsigned getNumStores() const { return LAI->getNumStores(); }
|
|
|
|
unsigned getNumLoads() const { return LAI->getNumLoads(); }
|
2016-09-08 19:11:07 +00:00
|
|
|
|
2017-05-09 10:43:25 +00:00
|
|
|
// Returns true if the NoNaN attribute is set on the function.
|
|
|
|
bool hasFunNoNaNAttr() const { return HasFunNoNaNAttr; }
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
private:
|
|
|
|
/// Check if a single basic block loop is vectorizable.
|
|
|
|
/// At this point we know that this is a loop with a constant trip count
|
|
|
|
/// and we only need to check individual instructions.
|
|
|
|
bool canVectorizeInstrs();
|
2015-08-11 00:35:44 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// When we vectorize loops we may change the order in which
|
|
|
|
/// we read and write from memory. This method checks if it is
|
|
|
|
/// legal to vectorize the code, considering only memory constrains.
|
|
|
|
/// Returns true if the loop is vectorizable
|
|
|
|
bool canVectorizeMemory();
|
2014-07-31 21:22:22 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Return true if we can vectorize this loop using the IF-conversion
|
|
|
|
/// transformation.
|
|
|
|
bool canVectorizeWithIfConvert();
|
|
|
|
|
|
|
|
/// Return true if all of the instructions in the block can be speculatively
|
|
|
|
/// executed. \p SafePtrs is a list of addresses that are known to be legal
|
|
|
|
/// and we know that we can read from them without segfault.
|
|
|
|
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
|
|
|
|
|
2016-05-05 15:14:01 +00:00
|
|
|
/// Updates the vectorization state by adding \p Phi to the inductions list.
|
|
|
|
/// This can set \p Phi as the main induction of the loop if \p Phi is a
|
|
|
|
/// better choice for the main induction than the existing one.
|
2016-06-17 20:41:14 +00:00
|
|
|
void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
|
2016-06-15 00:35:26 +00:00
|
|
|
SmallPtrSetImpl<Value *> &AllowedExit);
|
2016-05-05 15:14:01 +00:00
|
|
|
|
2016-09-29 16:49:42 +00:00
|
|
|
/// Create an analysis remark that explains why vectorization failed
|
|
|
|
///
|
|
|
|
/// \p RemarkName is the identifier for the remark. If \p I is passed it is
|
|
|
|
/// an instruction that prevents vectorization. Otherwise the loop is used
|
|
|
|
/// for the location of the remark. \return the remark object that can be
|
|
|
|
/// streamed to.
|
|
|
|
OptimizationRemarkAnalysis
|
|
|
|
createMissedAnalysis(StringRef RemarkName, Instruction *I = nullptr) const {
|
2016-09-29 17:05:35 +00:00
|
|
|
return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
|
|
|
|
RemarkName, TheLoop, I);
|
2016-09-29 16:49:42 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 22:58:27 +00:00
|
|
|
/// \brief If an access has a symbolic strides, this maps the pointer value to
|
|
|
|
/// the stride symbol.
|
2016-06-16 22:57:55 +00:00
|
|
|
const ValueToValueMap *getSymbolicStrides() {
|
|
|
|
// FIXME: Currently, the set of symbolic strides is sometimes queried before
|
|
|
|
// it's collected. This happens from canVectorizeWithIfConvert, when the
|
|
|
|
// pointer is checked to reference consecutive elements suitable for a
|
|
|
|
// masked access.
|
|
|
|
return LAI ? &LAI->getSymbolicStrides() : nullptr;
|
|
|
|
}
|
2016-06-15 15:49:46 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// The loop that we evaluate.
|
|
|
|
Loop *TheLoop;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
/// A wrapper around ScalarEvolution used to add runtime SCEV checks.
|
|
|
|
/// Applies dynamic knowledge to simplify SCEV expressions in the context
|
|
|
|
/// of existing SCEV assumptions. The analysis will also add a minimal set
|
|
|
|
/// of new predicates if this is required to enable vectorization and
|
|
|
|
/// unrolling.
|
|
|
|
PredicatedScalarEvolution &PSE;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Target Library Info.
|
|
|
|
TargetLibraryInfo *TLI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Target Transform Info
|
|
|
|
const TargetTransformInfo *TTI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Dominator Tree.
|
|
|
|
DominatorTree *DT;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// LoopAccess analysis.
|
2016-07-09 22:56:50 +00:00
|
|
|
std::function<const LoopAccessInfo &(Loop &)> *GetLAA;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// And the loop-accesses info corresponding to this loop. This pointer is
|
|
|
|
// null until canVectorizeMemory sets it up.
|
2017-10-12 23:30:03 +00:00
|
|
|
const LoopAccessInfo *LAI = nullptr;
|
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
/// Interface to emit optimization remarks.
|
|
|
|
OptimizationRemarkEmitter *ORE;
|
2013-05-29 03:13:41 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// The interleave access information contains groups of interleaved accesses
|
|
|
|
/// with the same stride and close to each other.
|
|
|
|
InterleavedAccessInfo InterleaveInfo;
|
2013-05-29 03:13:41 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
// --- vectorization state --- //
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2017-02-14 22:14:01 +00:00
|
|
|
/// Holds the primary induction variable. This is the counter of the
|
2015-08-11 00:52:54 +00:00
|
|
|
/// loop.
|
2017-10-12 23:30:03 +00:00
|
|
|
PHINode *PrimaryInduction = nullptr;
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Holds the reduction variables.
|
|
|
|
ReductionList Reductions;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Holds all of the induction variables that we found in the loop.
|
|
|
|
/// Notice that inductions don't need to start at zero and that induction
|
|
|
|
/// variables can be pointers.
|
|
|
|
InductionList Inductions;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
/// Holds all the casts that participate in the update chain of the induction
|
|
|
|
/// variables, and that have been proven to be redundant (possibly under a
|
|
|
|
/// runtime guard). These casts can be ignored when creating the vectorized
|
|
|
|
/// loop body.
|
|
|
|
SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
|
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
/// Holds the phi nodes that are first-order recurrences.
|
|
|
|
RecurrenceSet FirstOrderRecurrences;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2017-06-30 21:05:06 +00:00
|
|
|
/// Holds instructions that need to sink past other instructions to handle
|
|
|
|
/// first-order recurrences.
|
|
|
|
DenseMap<Instruction *, Instruction *> SinkAfter;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Holds the widest induction type encountered.
|
2017-10-12 23:30:03 +00:00
|
|
|
Type *WidestIndTy = nullptr;
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
/// Allowed outside users. This holds the induction and reduction
|
2015-08-11 00:52:54 +00:00
|
|
|
/// vars which can be accessed from outside the loop.
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallPtrSet<Value *, 4> AllowedExit;
|
2016-08-02 14:29:41 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Can we assume the absence of NaNs.
|
2017-10-12 23:30:03 +00:00
|
|
|
bool HasFunNoNaNAttr = false;
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vectorization requirements that will go through late-evaluation.
|
|
|
|
LoopVectorizationRequirements *Requirements;
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-08-11 01:09:15 +00:00
|
|
|
/// Used to emit an analysis of any legality issues.
|
2016-04-14 20:42:18 +00:00
|
|
|
LoopVectorizeHints *Hints;
|
2015-08-11 01:09:15 +00:00
|
|
|
|
2018-02-04 15:42:24 +00:00
|
|
|
/// The demanded bits analsyis is used to compute the minimum type size in
|
|
|
|
/// which a reduction can be computed.
|
|
|
|
DemandedBits *DB;
|
|
|
|
|
|
|
|
/// The assumption cache analysis is used to compute the minimum type size in
|
|
|
|
/// which a reduction can be computed.
|
|
|
|
AssumptionCache *AC;
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// While vectorizing these instructions we have to generate a
|
|
|
|
/// call to the appropriate masked intrinsic
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
SmallPtrSet<const Instruction *, 8> MaskedOp;
|
2015-08-11 00:52:54 +00:00
|
|
|
};
|
2014-05-02 20:40:04 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// LoopVectorizationCostModel - estimates the expected speedups due to
|
|
|
|
/// vectorization.
|
|
|
|
/// In many cases vectorization is not profitable. This can happen because of
|
|
|
|
/// a number of reasons. In this class we mainly attempt to predict the
|
|
|
|
/// expected speedup/slowdowns due to the supported instruction set. We use the
|
|
|
|
/// TargetTransformInfo to query the different backends for the cost of
|
|
|
|
/// different operations.
|
|
|
|
class LoopVectorizationCostModel {
|
|
|
|
public:
|
2016-05-19 20:38:03 +00:00
|
|
|
LoopVectorizationCostModel(Loop *L, PredicatedScalarEvolution &PSE,
|
|
|
|
LoopInfo *LI, LoopVectorizationLegality *Legal,
|
2015-08-11 00:52:54 +00:00
|
|
|
const TargetTransformInfo &TTI,
|
2016-05-05 00:54:54 +00:00
|
|
|
const TargetLibraryInfo *TLI, DemandedBits *DB,
|
2016-12-19 08:22:17 +00:00
|
|
|
AssumptionCache *AC,
|
2016-07-20 21:44:26 +00:00
|
|
|
OptimizationRemarkEmitter *ORE, const Function *F,
|
2016-05-19 20:38:03 +00:00
|
|
|
const LoopVectorizeHints *Hints)
|
|
|
|
: TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
|
2016-12-19 08:22:17 +00:00
|
|
|
AC(AC), ORE(ORE), TheFunction(F), Hints(Hints) {}
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
/// \return An upper bound for the vectorization factor, or None if
|
|
|
|
/// vectorization should be avoided up front.
|
|
|
|
Optional<unsigned> computeMaxVF(bool OptForSize);
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// \return The most profitable vectorization factor and the cost of that VF.
|
2017-03-14 13:07:04 +00:00
|
|
|
/// This method checks every power of two up to MaxVF. If UserVF is not ZERO
|
2015-08-11 00:52:54 +00:00
|
|
|
/// then this vectorization factor will be selected if vectorization is
|
|
|
|
/// possible.
|
2017-03-14 13:07:04 +00:00
|
|
|
VectorizationFactor selectVectorizationFactor(unsigned MaxVF);
|
|
|
|
|
|
|
|
/// Setup cost-based decisions for user vectorization factor.
|
|
|
|
void selectUserVectorizationFactor(unsigned UserVF) {
|
|
|
|
collectUniformsAndScalars(UserVF);
|
|
|
|
collectInstsToScalarize(UserVF);
|
|
|
|
}
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
/// \return The size (in bits) of the smallest and widest types in the code
|
|
|
|
/// that needs to be vectorized. We ignore values that remain scalar such as
|
2015-08-11 00:52:54 +00:00
|
|
|
/// 64 bit loop indices.
|
2015-11-02 22:53:48 +00:00
|
|
|
std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// \return The desired interleave count.
|
|
|
|
/// If interleave count has been specified by metadata it will be returned.
|
|
|
|
/// Otherwise, the interleave count is computed and returned. VF and LoopCost
|
|
|
|
/// are the selected vectorization factor and the cost of the selected VF.
|
|
|
|
unsigned selectInterleaveCount(bool OptForSize, unsigned VF,
|
|
|
|
unsigned LoopCost);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
/// Memory access instruction may be vectorized in more than one way.
|
|
|
|
/// Form of instruction after vectorization depends on cost.
|
|
|
|
/// This function takes cost-based decisions for Load/Store instructions
|
|
|
|
/// and collects them in a map. This decisions map is used for building
|
|
|
|
/// the lists of loop-uniform and loop-scalar instructions.
|
|
|
|
/// The calculated cost is saved with widening decision in order to
|
|
|
|
/// avoid redundant calculations.
|
|
|
|
void setCostBasedWideningDecision(unsigned VF);
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// \brief A struct that represents some properties of the register usage
|
|
|
|
/// of a loop.
|
|
|
|
struct RegisterUsage {
|
|
|
|
/// Holds the number of loop invariant values that are used in the loop.
|
|
|
|
unsigned LoopInvariantRegs;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Holds the maximum number of concurrent live intervals in the loop.
|
|
|
|
unsigned MaxLocalUsers;
|
|
|
|
};
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
/// \return Returns information about the register usages of the loop for the
|
|
|
|
/// given vectorization factors.
|
2016-04-29 07:09:48 +00:00
|
|
|
SmallVector<RegisterUsage, 8> calculateRegisterUsage(ArrayRef<unsigned> VFs);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Collect values we want to ignore in the cost model.
|
|
|
|
void collectValuesToIgnore();
|
|
|
|
|
2016-10-05 20:23:46 +00:00
|
|
|
/// \returns The smallest bitwidth each instruction can be represented with.
|
|
|
|
/// The vector equivalents of these instructions should be truncated to this
|
|
|
|
/// type.
|
|
|
|
const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
|
|
|
|
return MinBWs;
|
|
|
|
}
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
/// \returns True if it is more profitable to scalarize instruction \p I for
|
|
|
|
/// vectorization factor \p VF.
|
|
|
|
bool isProfitableToScalarize(Instruction *I, unsigned VF) const {
|
2017-08-27 12:55:46 +00:00
|
|
|
assert(VF > 1 && "Profitable to scalarize relevant only for VF > 1.");
|
2016-12-07 15:03:32 +00:00
|
|
|
auto Scalars = InstsToScalarize.find(VF);
|
|
|
|
assert(Scalars != InstsToScalarize.end() &&
|
|
|
|
"VF not yet analyzed for scalarization profitability");
|
|
|
|
return Scalars->second.count(I);
|
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
/// Returns true if \p I is known to be uniform after vectorization.
|
|
|
|
bool isUniformAfterVectorization(Instruction *I, unsigned VF) const {
|
|
|
|
if (VF == 1)
|
|
|
|
return true;
|
|
|
|
assert(Uniforms.count(VF) && "VF not yet analyzed for uniformity");
|
|
|
|
auto UniformsPerVF = Uniforms.find(VF);
|
|
|
|
return UniformsPerVF->second.count(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if \p I is known to be scalar after vectorization.
|
|
|
|
bool isScalarAfterVectorization(Instruction *I, unsigned VF) const {
|
|
|
|
if (VF == 1)
|
|
|
|
return true;
|
|
|
|
assert(Scalars.count(VF) && "Scalar values are not calculated for VF");
|
|
|
|
auto ScalarsPerVF = Scalars.find(VF);
|
|
|
|
return ScalarsPerVF->second.count(I);
|
|
|
|
}
|
|
|
|
|
2016-12-16 16:52:35 +00:00
|
|
|
/// \returns True if instruction \p I can be truncated to a smaller bitwidth
|
|
|
|
/// for vectorization factor \p VF.
|
|
|
|
bool canTruncateToMinimalBitwidth(Instruction *I, unsigned VF) const {
|
|
|
|
return VF > 1 && MinBWs.count(I) && !isProfitableToScalarize(I, VF) &&
|
2017-02-08 19:25:23 +00:00
|
|
|
!isScalarAfterVectorization(I, VF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Decision that was taken during cost calculation for memory instruction.
|
|
|
|
enum InstWidening {
|
|
|
|
CM_Unknown,
|
2017-12-16 02:55:24 +00:00
|
|
|
CM_Widen, // For consecutive accesses with stride +1.
|
|
|
|
CM_Widen_Reverse, // For consecutive accesses with stride -1.
|
2017-02-08 19:25:23 +00:00
|
|
|
CM_Interleave,
|
|
|
|
CM_GatherScatter,
|
|
|
|
CM_Scalarize
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Save vectorization decision \p W and \p Cost taken by the cost model for
|
|
|
|
/// instruction \p I and vector width \p VF.
|
|
|
|
void setWideningDecision(Instruction *I, unsigned VF, InstWidening W,
|
|
|
|
unsigned Cost) {
|
|
|
|
assert(VF >= 2 && "Expected VF >=2");
|
|
|
|
WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Save vectorization decision \p W and \p Cost taken by the cost model for
|
|
|
|
/// interleaving group \p Grp and vector width \p VF.
|
|
|
|
void setWideningDecision(const InterleaveGroup *Grp, unsigned VF,
|
|
|
|
InstWidening W, unsigned Cost) {
|
|
|
|
assert(VF >= 2 && "Expected VF >=2");
|
|
|
|
/// Broadcast this decicion to all instructions inside the group.
|
|
|
|
/// But the cost will be assigned to one instruction only.
|
|
|
|
for (unsigned i = 0; i < Grp->getFactor(); ++i) {
|
|
|
|
if (auto *I = Grp->getMember(i)) {
|
|
|
|
if (Grp->getInsertPos() == I)
|
|
|
|
WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
|
|
|
|
else
|
|
|
|
WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the cost model decision for the given instruction \p I and vector
|
|
|
|
/// width \p VF. Return CM_Unknown if this instruction did not pass
|
|
|
|
/// through the cost modeling.
|
|
|
|
InstWidening getWideningDecision(Instruction *I, unsigned VF) {
|
|
|
|
assert(VF >= 2 && "Expected VF >=2");
|
|
|
|
std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
|
|
|
|
auto Itr = WideningDecisions.find(InstOnVF);
|
|
|
|
if (Itr == WideningDecisions.end())
|
|
|
|
return CM_Unknown;
|
|
|
|
return Itr->second.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the vectorization cost for the given instruction \p I and vector
|
|
|
|
/// width \p VF.
|
|
|
|
unsigned getWideningCost(Instruction *I, unsigned VF) {
|
|
|
|
assert(VF >= 2 && "Expected VF >=2");
|
|
|
|
std::pair<Instruction *, unsigned> InstOnVF = std::make_pair(I, VF);
|
|
|
|
assert(WideningDecisions.count(InstOnVF) && "The cost is not calculated");
|
|
|
|
return WideningDecisions[InstOnVF].second;
|
2016-12-16 16:52:35 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 16:28:32 +00:00
|
|
|
/// Return True if instruction \p I is an optimizable truncate whose operand
|
|
|
|
/// is an induction variable. Such a truncate will be removed by adding a new
|
|
|
|
/// induction variable with the destination type.
|
|
|
|
bool isOptimizableIVTruncate(Instruction *I, unsigned VF) {
|
|
|
|
// If the instruction is not a truncate, return false.
|
|
|
|
auto *Trunc = dyn_cast<TruncInst>(I);
|
|
|
|
if (!Trunc)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the source and destination types of the truncate.
|
|
|
|
Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
|
|
|
|
Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
|
|
|
|
|
|
|
|
// If the truncate is free for the given types, return false. Replacing a
|
|
|
|
// free truncate with an induction variable would add an induction variable
|
|
|
|
// update instruction to each iteration of the loop. We exclude from this
|
|
|
|
// check the primary induction variable since it will need an update
|
|
|
|
// instruction regardless.
|
|
|
|
Value *Op = Trunc->getOperand(0);
|
2017-02-14 22:14:01 +00:00
|
|
|
if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
|
2017-02-14 16:28:32 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the truncated value is not an induction variable, return false.
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
return Legal->isInductionPhi(Op);
|
2017-02-14 16:28:32 +00:00
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
/// Collects the instructions to scalarize for each predicated instruction in
|
|
|
|
/// the loop.
|
|
|
|
void collectInstsToScalarize(unsigned VF);
|
|
|
|
|
|
|
|
/// Collect Uniform and Scalar values for the given \p VF.
|
|
|
|
/// The sets depend on CM decision for Load/Store instructions
|
|
|
|
/// that may be vectorized as interleave, gather-scatter or scalarized.
|
|
|
|
void collectUniformsAndScalars(unsigned VF) {
|
|
|
|
// Do the analysis once.
|
|
|
|
if (VF == 1 || Uniforms.count(VF))
|
|
|
|
return;
|
|
|
|
setCostBasedWideningDecision(VF);
|
|
|
|
collectLoopUniforms(VF);
|
|
|
|
collectLoopScalars(VF);
|
|
|
|
}
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
/// Returns true if the target machine supports masked store operation
|
|
|
|
/// for the given \p DataType and kind of access to \p Ptr.
|
|
|
|
bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
|
|
|
|
return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the target machine supports masked load operation
|
|
|
|
/// for the given \p DataType and kind of access to \p Ptr.
|
|
|
|
bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
|
|
|
|
return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the target machine supports masked scatter operation
|
|
|
|
/// for the given \p DataType.
|
|
|
|
bool isLegalMaskedScatter(Type *DataType) {
|
|
|
|
return TTI.isLegalMaskedScatter(DataType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the target machine supports masked gather operation
|
|
|
|
/// for the given \p DataType.
|
|
|
|
bool isLegalMaskedGather(Type *DataType) {
|
|
|
|
return TTI.isLegalMaskedGather(DataType);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the target machine can represent \p V as a masked gather
|
|
|
|
/// or scatter operation.
|
|
|
|
bool isLegalGatherOrScatter(Value *V) {
|
|
|
|
bool LI = isa<LoadInst>(V);
|
|
|
|
bool SI = isa<StoreInst>(V);
|
|
|
|
if (!LI && !SI)
|
|
|
|
return false;
|
|
|
|
auto *Ty = getMemInstValueType(V);
|
|
|
|
return (LI && isLegalMaskedGather(Ty)) || (SI && isLegalMaskedScatter(Ty));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if \p I is an instruction that will be scalarized with
|
|
|
|
/// predication. Such instructions include conditional stores and
|
|
|
|
/// instructions that may divide by zero.
|
|
|
|
bool isScalarWithPredication(Instruction *I);
|
|
|
|
|
|
|
|
/// Returns true if \p I is a memory instruction with consecutive memory
|
|
|
|
/// access that can be widened.
|
|
|
|
bool memoryInstructionCanBeWidened(Instruction *I, unsigned VF = 1);
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
private:
|
2018-02-26 11:06:36 +00:00
|
|
|
unsigned NumPredStores = 0;
|
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
/// \return An upper bound for the vectorization factor, larger than zero.
|
|
|
|
/// One is returned if vectorization should best be avoided due to cost.
|
2017-09-12 16:32:45 +00:00
|
|
|
unsigned computeFeasibleMaxVF(bool OptForSize, unsigned ConstTripCount);
|
2017-03-14 13:07:04 +00:00
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
/// The vectorization cost is a combination of the cost itself and a boolean
|
2016-05-05 00:54:54 +00:00
|
|
|
/// indicating whether any of the contributing operations will actually
|
|
|
|
/// operate on
|
|
|
|
/// vector values after type legalization in the backend. If this latter value
|
|
|
|
/// is
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
/// false, then all operations will be scalarized (i.e. no vectorization has
|
|
|
|
/// actually taken place).
|
2017-10-12 23:30:03 +00:00
|
|
|
using VectorizationCostTy = std::pair<unsigned, bool>;
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the expected execution cost. The unit of the cost does
|
|
|
|
/// not matter because we use the 'cost' units to compare different
|
|
|
|
/// vector widths. The cost that is returned is *not* normalized by
|
|
|
|
/// the factor width.
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
VectorizationCostTy expectedCost(unsigned VF);
|
2014-05-02 20:40:04 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns the execution time cost of an instruction for a given vector
|
|
|
|
/// width. Vector width of one means scalar.
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost-computation logic from getInstructionCost which provides
|
|
|
|
/// the vector type as an output parameter.
|
|
|
|
unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy);
|
2013-05-28 20:00:34 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
/// Calculate vectorization cost of memory instruction \p I.
|
|
|
|
unsigned getMemoryInstructionCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost computation for scalarized memory instruction.
|
|
|
|
unsigned getMemInstScalarizationCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost computation for interleaving group of memory instructions.
|
|
|
|
unsigned getInterleaveGroupCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost computation for Gather/Scatter instruction.
|
|
|
|
unsigned getGatherScatterCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost computation for widening instruction \p I with consecutive
|
|
|
|
/// memory access.
|
|
|
|
unsigned getConsecutiveMemOpCost(Instruction *I, unsigned VF);
|
|
|
|
|
|
|
|
/// The cost calculation for Load instruction \p I with uniform pointer -
|
|
|
|
/// scalar load + broadcast.
|
|
|
|
unsigned getUniformMemOpCost(Instruction *I, unsigned VF);
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Returns whether the instruction is a load or store and will be a emitted
|
|
|
|
/// as a vector operation.
|
|
|
|
bool isConsecutiveLoadOrStore(Instruction *I);
|
2014-07-16 00:36:00 +00:00
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
/// Returns true if an artificially high cost for emulated masked memrefs
|
|
|
|
/// should be used.
|
|
|
|
bool useEmulatedMaskMemRefHack(Instruction *I);
|
|
|
|
|
2016-09-29 17:15:48 +00:00
|
|
|
/// Create an analysis remark that explains why vectorization failed
|
|
|
|
///
|
|
|
|
/// \p RemarkName is the identifier for the remark. \return the remark object
|
|
|
|
/// that can be streamed to.
|
|
|
|
OptimizationRemarkAnalysis createMissedAnalysis(StringRef RemarkName) {
|
|
|
|
return ::createMissedAnalysis(Hints->vectorizeAnalysisPassName(),
|
|
|
|
RemarkName, TheLoop);
|
2014-07-16 00:36:00 +00:00
|
|
|
}
|
2015-08-11 00:52:54 +00:00
|
|
|
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
/// Map of scalar integer values to the smallest bitwidth they can be legally
|
|
|
|
/// represented as. The vector equivalents of these values should be truncated
|
|
|
|
/// to this type.
|
2016-05-05 00:54:54 +00:00
|
|
|
MapVector<Instruction *, uint64_t> MinBWs;
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
/// A type representing the costs for instructions if they were to be
|
|
|
|
/// scalarized rather than vectorized. The entries are Instruction-Cost
|
|
|
|
/// pairs.
|
2017-10-12 23:30:03 +00:00
|
|
|
using ScalarCostsTy = DenseMap<Instruction *, unsigned>;
|
2016-12-07 15:03:32 +00:00
|
|
|
|
2017-04-12 13:13:15 +00:00
|
|
|
/// A set containing all BasicBlocks that are known to present after
|
|
|
|
/// vectorization as a predicated block.
|
|
|
|
SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
/// A map holding scalar costs for different vectorization factors. The
|
|
|
|
/// presence of a cost for an instruction in the mapping indicates that the
|
|
|
|
/// instruction will be scalarized when vectorizing with the associated
|
|
|
|
/// vectorization factor. The entries are VF-ScalarCostTy pairs.
|
|
|
|
DenseMap<unsigned, ScalarCostsTy> InstsToScalarize;
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
/// Holds the instructions known to be uniform after vectorization.
|
|
|
|
/// The data is collected per VF.
|
|
|
|
DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Uniforms;
|
|
|
|
|
|
|
|
/// Holds the instructions known to be scalar after vectorization.
|
|
|
|
/// The data is collected per VF.
|
|
|
|
DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> Scalars;
|
|
|
|
|
2017-05-24 13:42:56 +00:00
|
|
|
/// Holds the instructions (address computations) that are forced to be
|
|
|
|
/// scalarized.
|
|
|
|
DenseMap<unsigned, SmallPtrSet<Instruction *, 4>> ForcedScalars;
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
/// Returns the expected difference in cost from scalarizing the expression
|
|
|
|
/// feeding a predicated instruction \p PredInst. The instructions to
|
|
|
|
/// scalarize and their scalar costs are collected in \p ScalarCosts. A
|
|
|
|
/// non-negative return value implies the expression will be scalarized.
|
|
|
|
/// Currently, only single-use chains are considered for scalarization.
|
|
|
|
int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
|
|
|
|
unsigned VF);
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
/// Collect the instructions that are uniform after vectorization. An
|
|
|
|
/// instruction is uniform if we represent it with a single scalar value in
|
|
|
|
/// the vectorized loop corresponding to each vector iteration. Examples of
|
|
|
|
/// uniform instructions include pointer operands of consecutive or
|
|
|
|
/// interleaved memory accesses. Note that although uniformity implies an
|
|
|
|
/// instruction will be scalar, the reverse is not true. In general, a
|
|
|
|
/// scalarized instruction will be represented by VF scalar values in the
|
|
|
|
/// vectorized loop, each corresponding to an iteration of the original
|
|
|
|
/// scalar loop.
|
|
|
|
void collectLoopUniforms(unsigned VF);
|
|
|
|
|
|
|
|
/// Collect the instructions that are scalar after vectorization. An
|
|
|
|
/// instruction is scalar if it is known to be uniform or will be scalarized
|
|
|
|
/// during vectorization. Non-uniform scalarized instructions will be
|
|
|
|
/// represented by VF values in the vectorized loop, each corresponding to an
|
|
|
|
/// iteration of the original scalar loop.
|
|
|
|
void collectLoopScalars(unsigned VF);
|
|
|
|
|
|
|
|
/// Keeps cost model vectorization decision and cost for instructions.
|
|
|
|
/// Right now it is used for memory instructions only.
|
2017-10-12 23:30:03 +00:00
|
|
|
using DecisionList = DenseMap<std::pair<Instruction *, unsigned>,
|
|
|
|
std::pair<InstWidening, unsigned>>;
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
DecisionList WideningDecisions;
|
|
|
|
|
2016-10-05 20:23:46 +00:00
|
|
|
public:
|
2015-08-11 00:52:54 +00:00
|
|
|
/// The loop that we evaluate.
|
|
|
|
Loop *TheLoop;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Predicated scalar evolution analysis.
|
|
|
|
PredicatedScalarEvolution &PSE;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Loop Info analysis.
|
|
|
|
LoopInfo *LI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vectorization legality.
|
|
|
|
LoopVectorizationLegality *Legal;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Vector target information.
|
|
|
|
const TargetTransformInfo &TTI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
/// Target Library Info.
|
|
|
|
const TargetLibraryInfo *TLI;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Demanded bits analysis.
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
DemandedBits *DB;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-12-19 08:22:17 +00:00
|
|
|
/// Assumption cache.
|
|
|
|
AssumptionCache *AC;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
/// Interface to emit optimization remarks.
|
|
|
|
OptimizationRemarkEmitter *ORE;
|
|
|
|
|
2015-08-11 00:52:54 +00:00
|
|
|
const Function *TheFunction;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Loop Vectorize Hint.
|
2015-08-11 00:52:54 +00:00
|
|
|
const LoopVectorizeHints *Hints;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Values to ignore in the cost model.
|
|
|
|
SmallPtrSet<const Value *, 16> ValuesToIgnore;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
/// Values to ignore in the cost model when VF > 1.
|
|
|
|
SmallPtrSet<const Value *, 16> VecValuesToIgnore;
|
2015-08-11 00:52:54 +00:00
|
|
|
};
|
2014-07-16 00:36:00 +00:00
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
} // end namespace llvm
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2015-08-10 19:51:46 +00:00
|
|
|
/// \brief This holds vectorization requirements that must be verified late in
|
|
|
|
/// the process. The requirements are set by legalize and costmodel. Once
|
|
|
|
/// vectorization has been determined to be possible and profitable the
|
|
|
|
/// requirements can be verified by looking for metadata or compiler options.
|
|
|
|
/// For example, some loops require FP commutativity which is only allowed if
|
|
|
|
/// vectorization is explicitly specified or if the fast-math compiler option
|
|
|
|
/// has been provided.
|
|
|
|
/// Late evaluation of these requirements allows helpful diagnostics to be
|
|
|
|
/// composed that tells the user what need to be done to vectorize the loop. For
|
|
|
|
/// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
|
|
|
|
/// evaluation should be used only when diagnostics can generated that can be
|
|
|
|
/// followed by a non-expert user.
|
|
|
|
class LoopVectorizationRequirements {
|
|
|
|
public:
|
2017-10-12 23:30:03 +00:00
|
|
|
LoopVectorizationRequirements(OptimizationRemarkEmitter &ORE) : ORE(ORE) {}
|
2015-08-10 19:51:46 +00:00
|
|
|
|
|
|
|
void addUnsafeAlgebraInst(Instruction *I) {
|
|
|
|
// First unsafe algebra instruction.
|
|
|
|
if (!UnsafeAlgebraInst)
|
|
|
|
UnsafeAlgebraInst = I;
|
|
|
|
}
|
|
|
|
|
2015-08-10 23:01:55 +00:00
|
|
|
void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; }
|
|
|
|
|
|
|
|
bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints) {
|
2016-09-29 18:04:47 +00:00
|
|
|
const char *PassName = Hints.vectorizeAnalysisPassName();
|
2015-08-21 23:03:24 +00:00
|
|
|
bool Failed = false;
|
2015-08-27 18:56:49 +00:00
|
|
|
if (UnsafeAlgebraInst && !Hints.allowReordering()) {
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE.emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysisFPCommute(
|
|
|
|
PassName, "CantReorderFPOps",
|
|
|
|
UnsafeAlgebraInst->getDebugLoc(),
|
|
|
|
UnsafeAlgebraInst->getParent())
|
|
|
|
<< "loop not vectorized: cannot prove it is safe to reorder "
|
|
|
|
"floating-point operations";
|
|
|
|
});
|
2015-08-21 23:03:24 +00:00
|
|
|
Failed = true;
|
2015-08-10 19:51:46 +00:00
|
|
|
}
|
2015-08-10 23:01:55 +00:00
|
|
|
|
2015-08-27 18:56:49 +00:00
|
|
|
// Test if runtime memcheck thresholds are exceeded.
|
|
|
|
bool PragmaThresholdReached =
|
|
|
|
NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
|
|
|
|
bool ThresholdReached =
|
|
|
|
NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
|
|
|
|
if ((ThresholdReached && !Hints.allowReordering()) ||
|
|
|
|
PragmaThresholdReached) {
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE.emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps",
|
2016-09-29 18:04:47 +00:00
|
|
|
L->getStartLoc(),
|
|
|
|
L->getHeader())
|
|
|
|
<< "loop not vectorized: cannot prove it is safe to reorder "
|
2017-10-11 17:12:59 +00:00
|
|
|
"memory operations";
|
|
|
|
});
|
2015-08-10 23:01:55 +00:00
|
|
|
DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
|
2015-08-21 23:03:24 +00:00
|
|
|
Failed = true;
|
2015-08-10 23:01:55 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 23:03:24 +00:00
|
|
|
return Failed;
|
2015-08-10 19:51:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-10-12 23:30:03 +00:00
|
|
|
unsigned NumRuntimePointerChecks = 0;
|
|
|
|
Instruction *UnsafeAlgebraInst = nullptr;
|
2016-07-20 23:50:32 +00:00
|
|
|
|
|
|
|
/// Interface to emit optimization remarks.
|
|
|
|
OptimizationRemarkEmitter &ORE;
|
2015-08-10 19:51:46 +00:00
|
|
|
};
|
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2018-03-02 12:24:25 +00:00
|
|
|
static void addAcyclicInnerLoop(Loop &L, LoopInfo &LI,
|
|
|
|
SmallVectorImpl<Loop *> &V) {
|
2016-08-12 22:47:13 +00:00
|
|
|
if (L.empty()) {
|
2018-03-02 12:24:25 +00:00
|
|
|
LoopBlocksRPO RPOT(&L);
|
|
|
|
RPOT.perform(&LI);
|
|
|
|
if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
|
2016-08-12 22:47:13 +00:00
|
|
|
V.push_back(&L);
|
|
|
|
return;
|
|
|
|
}
|
2014-03-18 22:00:32 +00:00
|
|
|
for (Loop *InnerL : L)
|
2018-03-02 12:24:25 +00:00
|
|
|
addAcyclicInnerLoop(*InnerL, LI, V);
|
2014-01-25 10:01:55 +00:00
|
|
|
}
|
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
namespace {
|
|
|
|
|
2012-12-10 21:39:02 +00:00
|
|
|
/// The LoopVectorize Pass.
|
2014-01-25 10:01:55 +00:00
|
|
|
struct LoopVectorize : public FunctionPass {
|
2012-12-12 01:11:46 +00:00
|
|
|
/// Pass identification, replacement for typeid
|
|
|
|
static char ID;
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
LoopVectorizePass Impl;
|
|
|
|
|
2013-12-05 21:20:02 +00:00
|
|
|
explicit LoopVectorize(bool NoUnrolling = false, bool AlwaysVectorize = true)
|
2016-07-09 22:56:50 +00:00
|
|
|
: FunctionPass(ID) {
|
|
|
|
Impl.DisableUnrolling = NoUnrolling;
|
|
|
|
Impl.AlwaysVectorize = AlwaysVectorize;
|
2012-10-17 18:25:06 +00:00
|
|
|
initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2014-03-05 09:10:37 +00:00
|
|
|
bool runOnFunction(Function &F) override {
|
2016-04-22 22:06:11 +00:00
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
|
|
|
auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
|
2015-01-15 10:41:28 +00:00
|
|
|
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
|
2016-07-09 22:56:50 +00:00
|
|
|
auto *TLI = TLIP ? &TLIP->getTLI() : nullptr;
|
|
|
|
auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
2016-12-19 08:22:17 +00:00
|
|
|
auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2016-07-09 22:56:50 +00:00
|
|
|
auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
|
|
|
|
auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
|
2016-07-20 04:03:43 +00:00
|
|
|
auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
|
2015-03-09 06:14:18 +00:00
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
std::function<const LoopAccessInfo &(Loop &)> GetLAA =
|
|
|
|
[&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
|
2014-04-29 08:55:11 +00:00
|
|
|
|
2016-12-19 08:22:17 +00:00
|
|
|
return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
|
|
|
|
GetLAA, *ORE);
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2014-03-05 09:10:37 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2016-12-19 08:22:17 +00:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2015-07-14 23:40:50 +00:00
|
|
|
AU.addRequired<BlockFrequencyInfoWrapperPass>();
|
2014-01-13 13:07:17 +00:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2015-01-17 14:16:18 +00:00
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 02:08:17 +00:00
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 03:43:40 +00:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
2016-07-08 20:55:26 +00:00
|
|
|
AU.addRequired<LoopAccessLegacyAnalysis>();
|
2016-04-18 23:55:01 +00:00
|
|
|
AU.addRequired<DemandedBitsWrapperPass>();
|
2016-07-20 04:03:43 +00:00
|
|
|
AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
|
2015-01-17 14:16:18 +00:00
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
2014-01-13 13:07:17 +00:00
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
AU.addPreserved<BasicAAWrapperPass>();
|
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-01-07 10:44:06 +00:00
|
|
|
} // end anonymous namespace
|
2012-12-10 21:39:02 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
|
2017-03-14 13:07:04 +00:00
|
|
|
// LoopVectorizationCostModel and LoopVectorizationPlanner.
|
2012-12-10 21:39:02 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-03 21:33:08 +00:00
|
|
|
Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
|
2012-11-29 19:25:41 +00:00
|
|
|
// We need to place the broadcast of invariant variables outside the loop.
|
2012-12-10 19:25:06 +00:00
|
|
|
Instruction *Instr = dyn_cast<Instruction>(V);
|
2016-05-12 18:44:51 +00:00
|
|
|
bool NewInstr = (Instr && Instr->getParent() == LoopVectorBody);
|
2012-12-10 19:25:06 +00:00
|
|
|
bool Invariant = OrigLoop->isLoopInvariant(V) && !NewInstr;
|
2012-11-29 19:25:41 +00:00
|
|
|
|
|
|
|
// Place the code for broadcasting invariant variables in the new preheader.
|
2013-09-30 15:40:17 +00:00
|
|
|
IRBuilder<>::InsertPointGuard Guard(Builder);
|
2012-11-29 19:25:41 +00:00
|
|
|
if (Invariant)
|
|
|
|
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
// Broadcast the scalar into all locations in the vector.
|
2013-01-01 19:55:16 +00:00
|
|
|
Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
|
2012-11-29 19:25:41 +00:00
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
return Shuf;
|
|
|
|
}
|
|
|
|
|
2017-02-24 18:20:12 +00:00
|
|
|
void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
|
2017-02-17 16:09:07 +00:00
|
|
|
const InductionDescriptor &II, Value *Step, Instruction *EntryVal) {
|
2018-03-20 09:04:39 +00:00
|
|
|
assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
|
|
|
|
"Expected either an induction phi-node or a truncate of it!");
|
2016-06-01 17:16:46 +00:00
|
|
|
Value *Start = II.getStartValue();
|
|
|
|
|
|
|
|
// Construct the initial value of the vector IV in the vector loop preheader
|
|
|
|
auto CurrIP = Builder.saveIP();
|
|
|
|
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
2016-09-01 18:14:27 +00:00
|
|
|
if (isa<TruncInst>(EntryVal)) {
|
2017-02-24 18:20:12 +00:00
|
|
|
assert(Start->getType()->isIntegerTy() &&
|
|
|
|
"Truncation requires an integer type");
|
2016-09-01 18:14:27 +00:00
|
|
|
auto *TruncType = cast<IntegerType>(EntryVal->getType());
|
2017-02-17 16:09:07 +00:00
|
|
|
Step = Builder.CreateTrunc(Step, TruncType);
|
2016-06-01 17:16:46 +00:00
|
|
|
Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
|
|
|
|
}
|
|
|
|
Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
|
2017-02-24 18:20:12 +00:00
|
|
|
Value *SteppedStart =
|
|
|
|
getStepVector(SplatStart, 0, Step, II.getInductionOpcode());
|
|
|
|
|
|
|
|
// We create vector phi nodes for both integer and floating-point induction
|
|
|
|
// variables. Here, we determine the kind of arithmetic we will perform.
|
|
|
|
Instruction::BinaryOps AddOp;
|
|
|
|
Instruction::BinaryOps MulOp;
|
|
|
|
if (Step->getType()->isIntegerTy()) {
|
|
|
|
AddOp = Instruction::Add;
|
|
|
|
MulOp = Instruction::Mul;
|
|
|
|
} else {
|
|
|
|
AddOp = II.getInductionOpcode();
|
|
|
|
MulOp = Instruction::FMul;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multiply the vectorization factor by the step using integer or
|
|
|
|
// floating-point arithmetic as appropriate.
|
|
|
|
Value *ConstVF = getSignedIntOrFpConstant(Step->getType(), VF);
|
|
|
|
Value *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, Step, ConstVF));
|
2017-02-17 16:09:07 +00:00
|
|
|
|
|
|
|
// Create a vector splat to use in the induction update.
|
|
|
|
//
|
|
|
|
// FIXME: If the step is non-constant, we create the vector splat with
|
|
|
|
// IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
|
|
|
|
// handle a constant vector splat.
|
|
|
|
Value *SplatVF = isa<Constant>(Mul)
|
|
|
|
? ConstantVector::getSplat(VF, cast<Constant>(Mul))
|
|
|
|
: Builder.CreateVectorSplat(VF, Mul);
|
2016-06-01 17:16:46 +00:00
|
|
|
Builder.restoreIP(CurrIP);
|
|
|
|
|
|
|
|
// We may need to add the step a number of times, depending on the unroll
|
|
|
|
// factor. The last of those goes into the PHI.
|
|
|
|
PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
|
|
|
|
&*LoopVectorBody->getFirstInsertionPt());
|
2016-07-21 21:20:15 +00:00
|
|
|
Instruction *LastInduction = VecInd;
|
2016-06-01 17:16:46 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(EntryVal, Part, LastInduction);
|
2018-01-15 10:56:07 +00:00
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
if (isa<TruncInst>(EntryVal))
|
|
|
|
addMetadata(LastInduction, EntryVal);
|
2018-03-20 09:04:39 +00:00
|
|
|
recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, Part);
|
2018-01-15 10:56:07 +00:00
|
|
|
|
2017-02-24 18:20:12 +00:00
|
|
|
LastInduction = cast<Instruction>(addFastMathFlag(
|
|
|
|
Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")));
|
2016-06-01 17:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 21:20:15 +00:00
|
|
|
// Move the last step to the end of the latch block. This ensures consistent
|
|
|
|
// placement of all induction updates.
|
|
|
|
auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
|
|
|
|
auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
|
|
|
|
auto *ICmp = cast<Instruction>(Br->getCondition());
|
|
|
|
LastInduction->moveBefore(ICmp);
|
|
|
|
LastInduction->setName("vec.ind.next");
|
|
|
|
|
2016-06-01 17:16:46 +00:00
|
|
|
VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
|
2016-07-21 21:20:15 +00:00
|
|
|
VecInd->addIncoming(LastInduction, LoopVectorLatch);
|
2016-06-01 17:16:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
|
2017-02-08 19:25:23 +00:00
|
|
|
return Cost->isScalarAfterVectorization(I, VF) ||
|
2016-12-07 15:03:32 +00:00
|
|
|
Cost->isProfitableToScalarize(I, VF);
|
|
|
|
}
|
|
|
|
|
2016-08-02 15:25:16 +00:00
|
|
|
bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
|
2016-12-07 15:03:32 +00:00
|
|
|
if (shouldScalarizeInstruction(IV))
|
2016-08-02 15:25:16 +00:00
|
|
|
return true;
|
|
|
|
auto isScalarInst = [&](User *U) -> bool {
|
|
|
|
auto *I = cast<Instruction>(U);
|
2016-12-07 15:03:32 +00:00
|
|
|
return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
|
2016-08-02 15:25:16 +00:00
|
|
|
};
|
2017-10-12 23:30:03 +00:00
|
|
|
return llvm::any_of(IV->users(), isScalarInst);
|
2016-08-02 15:25:16 +00:00
|
|
|
}
|
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
|
2018-03-20 09:04:39 +00:00
|
|
|
const InductionDescriptor &ID, const Instruction *EntryVal,
|
|
|
|
Value *VectorLoopVal, unsigned Part, unsigned Lane) {
|
|
|
|
assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
|
|
|
|
"Expected either an induction phi-node or a truncate of it!");
|
|
|
|
|
|
|
|
// This induction variable is not the phi from the original loop but the
|
|
|
|
// newly-created IV based on the proof that casted Phi is equal to the
|
|
|
|
// uncasted Phi in the vectorized loop (under a runtime guard possibly). It
|
|
|
|
// re-uses the same InductionDescriptor that original IV uses but we don't
|
|
|
|
// have to do any recording in this case - that is done when original IV is
|
|
|
|
// processed.
|
|
|
|
if (isa<TruncInst>(EntryVal))
|
|
|
|
return;
|
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
|
|
|
|
if (Casts.empty())
|
|
|
|
return;
|
|
|
|
// Only the first Cast instruction in the Casts vector is of interest.
|
|
|
|
// The rest of the Casts (if exist) have no uses outside the
|
|
|
|
// induction update chain itself.
|
|
|
|
Instruction *CastInst = *Casts.begin();
|
|
|
|
if (Lane < UINT_MAX)
|
|
|
|
VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal);
|
|
|
|
else
|
|
|
|
VectorLoopValueMap.setVectorValue(CastInst, Part, VectorLoopVal);
|
|
|
|
}
|
|
|
|
|
2017-02-24 18:20:12 +00:00
|
|
|
void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, TruncInst *Trunc) {
|
|
|
|
assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
|
|
|
|
"Primary induction variable must have an integer type");
|
2016-07-05 15:41:28 +00:00
|
|
|
|
|
|
|
auto II = Legal->getInductionVars()->find(IV);
|
|
|
|
assert(II != Legal->getInductionVars()->end() && "IV is not an induction");
|
|
|
|
|
|
|
|
auto ID = II->second;
|
|
|
|
assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
|
|
|
|
|
2016-08-02 15:25:16 +00:00
|
|
|
// The scalar value to broadcast. This will be derived from the canonical
|
|
|
|
// induction variable.
|
|
|
|
Value *ScalarIV = nullptr;
|
|
|
|
|
|
|
|
// The value from the original loop to which we are mapping the new induction
|
|
|
|
// variable.
|
|
|
|
Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
|
|
|
|
|
|
|
|
// True if we have vectorized the induction variable.
|
|
|
|
auto VectorizedIV = false;
|
|
|
|
|
|
|
|
// Determine if we want a scalar version of the induction variable. This is
|
|
|
|
// true if the induction variable itself is not widened, or if it has at
|
|
|
|
// least one user in the loop that is not widened.
|
|
|
|
auto NeedsScalarIV = VF > 1 && needsScalarInduction(EntryVal);
|
|
|
|
|
2017-02-17 16:09:07 +00:00
|
|
|
// Generate code for the induction step. Note that induction steps are
|
|
|
|
// required to be loop-invariant
|
|
|
|
assert(PSE.getSE()->isLoopInvariant(ID.getStep(), OrigLoop) &&
|
|
|
|
"Induction step should be loop invariant");
|
|
|
|
auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
|
2017-02-24 18:20:12 +00:00
|
|
|
Value *Step = nullptr;
|
|
|
|
if (PSE.getSE()->isSCEVable(IV->getType())) {
|
|
|
|
SCEVExpander Exp(*PSE.getSE(), DL, "induction");
|
|
|
|
Step = Exp.expandCodeFor(ID.getStep(), ID.getStep()->getType(),
|
|
|
|
LoopVectorPreHeader->getTerminator());
|
|
|
|
} else {
|
|
|
|
Step = cast<SCEVUnknown>(ID.getStep())->getValue();
|
|
|
|
}
|
2016-07-05 15:41:28 +00:00
|
|
|
|
|
|
|
// Try to create a new independent vector induction variable. If we can't
|
|
|
|
// create the phi node, we will splat the scalar induction variable in each
|
|
|
|
// loop iteration.
|
2017-02-17 16:09:07 +00:00
|
|
|
if (VF > 1 && !shouldScalarizeInstruction(EntryVal)) {
|
2017-02-24 18:20:12 +00:00
|
|
|
createVectorIntOrFpInductionPHI(ID, Step, EntryVal);
|
2016-08-02 15:25:16 +00:00
|
|
|
VectorizedIV = true;
|
|
|
|
}
|
2016-07-05 15:41:28 +00:00
|
|
|
|
2016-08-02 15:25:16 +00:00
|
|
|
// If we haven't yet vectorized the induction variable, or if we will create
|
|
|
|
// a scalar one, we need to define the scalar induction variable and step
|
|
|
|
// values. If we were given a truncation type, truncate the canonical
|
2017-02-17 16:09:07 +00:00
|
|
|
// induction variable and step. Otherwise, derive these values from the
|
|
|
|
// induction descriptor.
|
2016-08-02 15:25:16 +00:00
|
|
|
if (!VectorizedIV || NeedsScalarIV) {
|
2017-03-27 20:07:38 +00:00
|
|
|
ScalarIV = Induction;
|
|
|
|
if (IV != OldInduction) {
|
|
|
|
ScalarIV = IV->getType()->isIntegerTy()
|
|
|
|
? Builder.CreateSExtOrTrunc(Induction, IV->getType())
|
|
|
|
: Builder.CreateCast(Instruction::SIToFP, Induction,
|
|
|
|
IV->getType());
|
|
|
|
ScalarIV = ID.transform(Builder, ScalarIV, PSE.getSE(), DL);
|
|
|
|
ScalarIV->setName("offset.idx");
|
|
|
|
}
|
2016-09-01 18:14:27 +00:00
|
|
|
if (Trunc) {
|
|
|
|
auto *TruncType = cast<IntegerType>(Trunc->getType());
|
2017-02-17 16:09:07 +00:00
|
|
|
assert(Step->getType()->isIntegerTy() &&
|
|
|
|
"Truncation requires an integer step");
|
2017-03-27 20:07:38 +00:00
|
|
|
ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
|
2017-02-17 16:09:07 +00:00
|
|
|
Step = Builder.CreateTrunc(Step, TruncType);
|
2016-07-05 15:41:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-02 15:25:16 +00:00
|
|
|
// If we haven't yet vectorized the induction variable, splat the scalar
|
|
|
|
// induction variable, and build the necessary step vectors.
|
2018-01-15 10:56:07 +00:00
|
|
|
// TODO: Don't do it unless the vectorized IV is really required.
|
2016-08-02 15:25:16 +00:00
|
|
|
if (!VectorizedIV) {
|
|
|
|
Value *Broadcasted = getBroadcastInstrs(ScalarIV);
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *EntryPart =
|
2017-02-24 18:20:12 +00:00
|
|
|
getStepVector(Broadcasted, VF * Part, Step, ID.getInductionOpcode());
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(EntryVal, Part, EntryPart);
|
|
|
|
if (Trunc)
|
|
|
|
addMetadata(EntryPart, Trunc);
|
2018-03-20 09:04:39 +00:00
|
|
|
recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, Part);
|
2017-06-27 08:41:19 +00:00
|
|
|
}
|
2016-08-02 15:25:16 +00:00
|
|
|
}
|
2016-07-14 14:36:06 +00:00
|
|
|
|
|
|
|
// If an induction variable is only used for counting loop iterations or
|
|
|
|
// calculating addresses, it doesn't need to be widened. Create scalar steps
|
|
|
|
// that can be used by instructions we will later scalarize. Note that the
|
|
|
|
// addition of the scalar steps will not increase the number of instructions
|
|
|
|
// in the loop in the common case prior to InstCombine. We will be trading
|
|
|
|
// one vector extract for each scalar step.
|
2016-08-02 15:25:16 +00:00
|
|
|
if (NeedsScalarIV)
|
2017-02-24 18:20:12 +00:00
|
|
|
buildScalarSteps(ScalarIV, Step, EntryVal, ID);
|
2016-07-05 15:41:28 +00:00
|
|
|
}
|
|
|
|
|
2016-07-24 07:24:54 +00:00
|
|
|
Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step,
|
|
|
|
Instruction::BinaryOps BinOp) {
|
|
|
|
// Create and check the types.
|
2012-10-17 18:25:06 +00:00
|
|
|
assert(Val->getType()->isVectorTy() && "Must be a vector");
|
2016-07-24 07:24:54 +00:00
|
|
|
int VLen = Val->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
Type *STy = Val->getType()->getScalarType();
|
|
|
|
assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
|
|
|
|
"Induction Step must be an integer or FP");
|
|
|
|
assert(Step->getType() == STy && "Step has wrong type");
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallVector<Constant *, 8> Indices;
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2016-07-24 07:24:54 +00:00
|
|
|
if (STy->isIntegerTy()) {
|
|
|
|
// Create a vector of consecutive numbers from zero to VF.
|
|
|
|
for (int i = 0; i < VLen; ++i)
|
|
|
|
Indices.push_back(ConstantInt::get(STy, StartIdx + i));
|
|
|
|
|
|
|
|
// Add the consecutive indices to the vector value.
|
|
|
|
Constant *Cv = ConstantVector::get(Indices);
|
|
|
|
assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
|
|
|
|
Step = Builder.CreateVectorSplat(VLen, Step);
|
|
|
|
assert(Step->getType() == Val->getType() && "Invalid step vec");
|
|
|
|
// FIXME: The newly created binary instructions should contain nsw/nuw flags,
|
|
|
|
// which can be found from the original scalar operations.
|
|
|
|
Step = Builder.CreateMul(Cv, Step);
|
|
|
|
return Builder.CreateAdd(Val, Step, "induction");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Floating point induction.
|
|
|
|
assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
|
|
|
|
"Binary Opcode should be specified for FP induction");
|
2012-10-17 18:25:06 +00:00
|
|
|
// Create a vector of consecutive numbers from zero to VF.
|
2015-01-30 05:02:21 +00:00
|
|
|
for (int i = 0; i < VLen; ++i)
|
2016-07-24 07:24:54 +00:00
|
|
|
Indices.push_back(ConstantFP::get(STy, (double)(StartIdx + i)));
|
2012-10-17 18:25:06 +00:00
|
|
|
|
|
|
|
// Add the consecutive indices to the vector value.
|
|
|
|
Constant *Cv = ConstantVector::get(Indices);
|
2016-07-24 07:24:54 +00:00
|
|
|
|
2015-01-30 05:02:21 +00:00
|
|
|
Step = Builder.CreateVectorSplat(VLen, Step);
|
2016-07-24 07:24:54 +00:00
|
|
|
|
|
|
|
// Floating point operations had to be 'fast' to enable the induction.
|
|
|
|
FastMathFlags Flags;
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-06 16:27:15 +00:00
|
|
|
Flags.setFast();
|
2016-07-24 07:24:54 +00:00
|
|
|
|
|
|
|
Value *MulOp = Builder.CreateFMul(Cv, Step);
|
|
|
|
if (isa<Instruction>(MulOp))
|
|
|
|
// Have to check, MulOp may be a constant
|
|
|
|
cast<Instruction>(MulOp)->setFastMathFlags(Flags);
|
|
|
|
|
|
|
|
Value *BOp = Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
|
|
|
|
if (isa<Instruction>(BOp))
|
|
|
|
cast<Instruction>(BOp)->setFastMathFlags(Flags);
|
|
|
|
return BOp;
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2016-07-14 14:36:06 +00:00
|
|
|
void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
|
2018-03-20 09:04:39 +00:00
|
|
|
Instruction *EntryVal,
|
2017-02-24 18:20:12 +00:00
|
|
|
const InductionDescriptor &ID) {
|
2016-07-14 14:36:06 +00:00
|
|
|
// We shouldn't have to build scalar steps if we aren't vectorizing.
|
2016-07-06 14:26:59 +00:00
|
|
|
assert(VF > 1 && "VF should be greater than one");
|
|
|
|
|
|
|
|
// Get the value type and ensure it and the step have the same integer type.
|
2016-07-14 14:36:06 +00:00
|
|
|
Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
|
2017-02-24 18:20:12 +00:00
|
|
|
assert(ScalarIVTy == Step->getType() &&
|
|
|
|
"Val and Step should have the same type");
|
|
|
|
|
|
|
|
// We build scalar steps for both integer and floating-point induction
|
|
|
|
// variables. Here, we determine the kind of arithmetic we will perform.
|
|
|
|
Instruction::BinaryOps AddOp;
|
|
|
|
Instruction::BinaryOps MulOp;
|
|
|
|
if (ScalarIVTy->isIntegerTy()) {
|
|
|
|
AddOp = Instruction::Add;
|
|
|
|
MulOp = Instruction::Mul;
|
|
|
|
} else {
|
|
|
|
AddOp = ID.getInductionOpcode();
|
|
|
|
MulOp = Instruction::FMul;
|
|
|
|
}
|
2016-07-06 14:26:59 +00:00
|
|
|
|
2016-09-21 16:50:24 +00:00
|
|
|
// Determine the number of scalars we need to generate for each unroll
|
2016-09-30 15:13:52 +00:00
|
|
|
// iteration. If EntryVal is uniform, we only need to generate the first
|
|
|
|
// lane. Otherwise, we generate all VF values.
|
2016-09-21 16:50:24 +00:00
|
|
|
unsigned Lanes =
|
2017-08-27 12:55:46 +00:00
|
|
|
Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF) ? 1
|
|
|
|
: VF;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// Compute the scalar steps and save the results in VectorLoopValueMap.
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2016-09-21 16:50:24 +00:00
|
|
|
for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
|
2017-02-24 18:20:12 +00:00
|
|
|
auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane);
|
|
|
|
auto *Mul = addFastMathFlag(Builder.CreateBinOp(MulOp, StartIdx, Step));
|
|
|
|
auto *Add = addFastMathFlag(Builder.CreateBinOp(AddOp, ScalarIV, Mul));
|
2017-08-27 12:55:46 +00:00
|
|
|
VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Add);
|
2018-03-20 09:04:39 +00:00
|
|
|
recordVectorLoopValueForInductionCast(ID, EntryVal, Add, Part, Lane);
|
2016-07-14 14:36:06 +00:00
|
|
|
}
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
}
|
2016-07-06 14:26:59 +00:00
|
|
|
}
|
|
|
|
|
2012-12-26 19:08:17 +00:00
|
|
|
int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
2016-09-18 13:56:08 +00:00
|
|
|
const ValueToValueMap &Strides = getSymbolicStrides() ? *getSymbolicStrides() :
|
|
|
|
ValueToValueMap();
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2016-09-18 13:56:08 +00:00
|
|
|
int Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, true, false);
|
|
|
|
if (Stride == 1 || Stride == -1)
|
|
|
|
return Stride;
|
2012-12-26 19:08:17 +00:00
|
|
|
return 0;
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2015-02-19 19:14:34 +00:00
|
|
|
bool LoopVectorizationLegality::isUniform(Value *V) {
|
2015-02-19 19:15:04 +00:00
|
|
|
return LAI->isUniform(V);
|
2015-02-19 19:14:34 +00:00
|
|
|
}
|
2015-02-01 16:56:04 +00:00
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *InnerLoopVectorizer::getOrCreateVectorValue(Value *V, unsigned Part) {
|
2012-11-29 19:25:41 +00:00
|
|
|
assert(V != Induction && "The new induction variable should not be used.");
|
2012-10-19 23:05:40 +00:00
|
|
|
assert(!V->getType()->isVectorTy() && "Can't widen a vector");
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
assert(!V->getType()->isVoidTy() && "Type does not produce a value");
|
2012-10-18 17:31:49 +00:00
|
|
|
|
2014-01-10 18:20:32 +00:00
|
|
|
// If we have a stride that is replaced by one, do it here.
|
|
|
|
if (Legal->hasStride(V))
|
|
|
|
V = ConstantInt::get(V->getType(), 1);
|
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
// If we have a vector mapped to this value, return it.
|
|
|
|
if (VectorLoopValueMap.hasVectorValue(V, Part))
|
|
|
|
return VectorLoopValueMap.getVectorValue(V, Part);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
|
|
|
|
// If the value has not been vectorized, check if it has been scalarized
|
|
|
|
// instead. If it has been scalarized, and we actually need the value in
|
|
|
|
// vector form, we will construct the vector values on demand.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (VectorLoopValueMap.hasAnyScalarValue(V)) {
|
2017-08-27 12:55:46 +00:00
|
|
|
Value *ScalarValue = VectorLoopValueMap.getScalarValue(V, {Part, 0});
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
|
2016-09-21 16:50:24 +00:00
|
|
|
// If we've scalarized a value, that value should be an instruction.
|
|
|
|
auto *I = cast<Instruction>(V);
|
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// If we aren't vectorizing, we can just copy the scalar map values over to
|
|
|
|
// the vector map.
|
|
|
|
if (VF == 1) {
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(V, Part, ScalarValue);
|
|
|
|
return ScalarValue;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 17:59:33 +00:00
|
|
|
// Get the last scalar instruction we generated for V and Part. If the value
|
|
|
|
// is known to be uniform after vectorization, this corresponds to lane zero
|
|
|
|
// of the Part unroll iteration. Otherwise, the last instruction is the one
|
|
|
|
// we created for the last vector lane of the Part unroll iteration.
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned LastLane = Cost->isUniformAfterVectorization(I, VF) ? 0 : VF - 1;
|
2017-08-27 12:55:46 +00:00
|
|
|
auto *LastInst = cast<Instruction>(
|
|
|
|
VectorLoopValueMap.getScalarValue(V, {Part, LastLane}));
|
2016-08-29 20:14:04 +00:00
|
|
|
|
|
|
|
// Set the insert point after the last scalarized instruction. This ensures
|
|
|
|
// the insertelement sequence will directly follow the scalar definitions.
|
|
|
|
auto OldIP = Builder.saveIP();
|
|
|
|
auto NewIP = std::next(BasicBlock::iterator(LastInst));
|
|
|
|
Builder.SetInsertPoint(&*NewIP);
|
|
|
|
|
2016-09-21 16:50:24 +00:00
|
|
|
// However, if we are vectorizing, we need to construct the vector values.
|
|
|
|
// If the value is known to be uniform after vectorization, we can just
|
|
|
|
// broadcast the scalar value corresponding to lane zero for each unroll
|
|
|
|
// iteration. Otherwise, we construct the vector values using insertelement
|
|
|
|
// instructions. Since the resulting vectors are stored in
|
|
|
|
// VectorLoopValueMap, we will only generate the insertelements once.
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *VectorValue = nullptr;
|
|
|
|
if (Cost->isUniformAfterVectorization(I, VF)) {
|
|
|
|
VectorValue = getBroadcastInstrs(ScalarValue);
|
2017-08-27 12:55:46 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(V, Part, VectorValue);
|
2017-06-27 08:41:19 +00:00
|
|
|
} else {
|
2017-08-27 12:55:46 +00:00
|
|
|
// Initialize packing with insertelements to start from undef.
|
|
|
|
Value *Undef = UndefValue::get(VectorType::get(V->getType(), VF));
|
|
|
|
VectorLoopValueMap.setVectorValue(V, Part, Undef);
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Lane = 0; Lane < VF; ++Lane)
|
2017-08-27 12:55:46 +00:00
|
|
|
packScalarIntoVectorValue(V, {Part, Lane});
|
|
|
|
VectorValue = VectorLoopValueMap.getVectorValue(V, Part);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
}
|
2016-08-29 20:14:04 +00:00
|
|
|
Builder.restoreIP(OldIP);
|
2017-06-27 08:41:19 +00:00
|
|
|
return VectorValue;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
}
|
2013-01-03 00:52:27 +00:00
|
|
|
|
|
|
|
// If this scalar is unknown, assume that it is a constant or that it is
|
|
|
|
// loop invariant. Broadcast V and save the value for future uses.
|
2012-10-18 17:31:49 +00:00
|
|
|
Value *B = getBroadcastInstrs(V);
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(V, Part, B);
|
|
|
|
return B;
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
Value *
|
|
|
|
InnerLoopVectorizer::getOrCreateScalarValue(Value *V,
|
|
|
|
const VPIteration &Instance) {
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// If the value is not an instruction contained in the loop, it should
|
|
|
|
// already be scalar.
|
|
|
|
if (OrigLoop->isLoopInvariant(V))
|
|
|
|
return V;
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
assert(Instance.Lane > 0
|
|
|
|
? !Cost->isUniformAfterVectorization(cast<Instruction>(V), VF)
|
|
|
|
: true && "Uniform values only have lane zero");
|
2016-09-21 16:50:24 +00:00
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// If the value from the original loop has not been vectorized, it is
|
|
|
|
// represented by UF x VF scalar values in the new loop. Return the requested
|
|
|
|
// scalar value.
|
2017-08-27 12:55:46 +00:00
|
|
|
if (VectorLoopValueMap.hasScalarValue(V, Instance))
|
|
|
|
return VectorLoopValueMap.getScalarValue(V, Instance);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
|
|
|
|
// If the value has not been scalarized, get its entry in VectorLoopValueMap
|
|
|
|
// for the given unroll part. If this entry is not a vector type (i.e., the
|
|
|
|
// vectorization factor is one), there is no need to generate an
|
|
|
|
// extractelement instruction.
|
2017-08-27 12:55:46 +00:00
|
|
|
auto *U = getOrCreateVectorValue(V, Instance.Part);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
if (!U->getType()->isVectorTy()) {
|
|
|
|
assert(VF == 1 && "Value not scalarized has non-vector type");
|
|
|
|
return U;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, the value from the original loop has been vectorized and is
|
|
|
|
// represented by UF vector values. Extract and return the requested scalar
|
|
|
|
// value from the appropriate vector lane.
|
2017-08-27 12:55:46 +00:00
|
|
|
return Builder.CreateExtractElement(U, Builder.getInt32(Instance.Lane));
|
|
|
|
}
|
|
|
|
|
|
|
|
void InnerLoopVectorizer::packScalarIntoVectorValue(
|
|
|
|
Value *V, const VPIteration &Instance) {
|
|
|
|
assert(V != Induction && "The new induction variable should not be used.");
|
|
|
|
assert(!V->getType()->isVectorTy() && "Can't pack a vector");
|
|
|
|
assert(!V->getType()->isVoidTy() && "Type does not produce a value");
|
|
|
|
|
|
|
|
Value *ScalarInst = VectorLoopValueMap.getScalarValue(V, Instance);
|
|
|
|
Value *VectorValue = VectorLoopValueMap.getVectorValue(V, Instance.Part);
|
|
|
|
VectorValue = Builder.CreateInsertElement(VectorValue, ScalarInst,
|
|
|
|
Builder.getInt32(Instance.Lane));
|
|
|
|
VectorLoopValueMap.resetVectorValue(V, Instance.Part, VectorValue);
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2012-12-26 19:08:17 +00:00
|
|
|
Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
|
|
|
|
assert(Vec->getType()->isVectorTy() && "Invalid type");
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallVector<Constant *, 8> ShuffleMask;
|
2012-12-26 19:08:17 +00:00
|
|
|
for (unsigned i = 0; i < VF; ++i)
|
|
|
|
ShuffleMask.push_back(Builder.getInt32(VF - i - 1));
|
|
|
|
|
|
|
|
return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
|
|
|
|
ConstantVector::get(ShuffleMask),
|
|
|
|
"reverse");
|
|
|
|
}
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
// Try to vectorize the interleave group that \p Instr belongs to.
|
|
|
|
//
|
|
|
|
// E.g. Translate following interleaved load group (factor = 3):
|
|
|
|
// for (i = 0; i < N; i+=3) {
|
|
|
|
// R = Pic[i]; // Member of index 0
|
|
|
|
// G = Pic[i+1]; // Member of index 1
|
|
|
|
// B = Pic[i+2]; // Member of index 2
|
|
|
|
// ... // do something to R, G, B
|
|
|
|
// }
|
|
|
|
// To:
|
|
|
|
// %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B
|
|
|
|
// %R.vec = shuffle %wide.vec, undef, <0, 3, 6, 9> ; R elements
|
|
|
|
// %G.vec = shuffle %wide.vec, undef, <1, 4, 7, 10> ; G elements
|
|
|
|
// %B.vec = shuffle %wide.vec, undef, <2, 5, 8, 11> ; B elements
|
|
|
|
//
|
|
|
|
// Or translate following interleaved store group (factor = 3):
|
|
|
|
// for (i = 0; i < N; i+=3) {
|
|
|
|
// ... do something to R, G, B
|
|
|
|
// Pic[i] = R; // Member of index 0
|
|
|
|
// Pic[i+1] = G; // Member of index 1
|
|
|
|
// Pic[i+2] = B; // Member of index 2
|
|
|
|
// }
|
|
|
|
// To:
|
|
|
|
// %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
|
|
|
|
// %B_U.vec = shuffle %B.vec, undef, <0, 1, 2, 3, u, u, u, u>
|
|
|
|
// %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
|
|
|
|
// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements
|
|
|
|
// store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B
|
|
|
|
void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr) {
|
|
|
|
const InterleaveGroup *Group = Legal->getInterleavedAccessGroup(Instr);
|
|
|
|
assert(Group && "Fail to get an interleaved access group.");
|
|
|
|
|
|
|
|
// Skip if current instruction is not the insert position.
|
|
|
|
if (Instr != Group->getInsertPos())
|
|
|
|
return;
|
|
|
|
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
const DataLayout &DL = Instr->getModule()->getDataLayout();
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(Instr);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Prepare for the vector type of the interleaved load/store.
|
2017-02-08 19:25:23 +00:00
|
|
|
Type *ScalarTy = getMemInstValueType(Instr);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
unsigned InterleaveFactor = Group->getFactor();
|
|
|
|
Type *VecTy = VectorType::get(ScalarTy, InterleaveFactor * VF);
|
2017-02-08 19:25:23 +00:00
|
|
|
Type *PtrTy = VecTy->getPointerTo(getMemInstAddressSpace(Instr));
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Prepare for the new pointers.
|
|
|
|
setDebugLocFromInst(Builder, Ptr);
|
|
|
|
SmallVector<Value *, 2> NewPtrs;
|
|
|
|
unsigned Index = Group->getIndex(Instr);
|
2016-09-02 16:19:22 +00:00
|
|
|
|
|
|
|
// If the group is reverse, adjust the index to refer to the last vector lane
|
|
|
|
// instead of the first. We adjust the index from the first vector lane,
|
|
|
|
// rather than directly getting the pointer for lane VF - 1, because the
|
|
|
|
// pointer operand of the interleaved access is supposed to be uniform. For
|
|
|
|
// uniform instructions, we're only required to generate a value for the
|
|
|
|
// first vector lane in each unroll iteration.
|
|
|
|
if (Group->isReverse())
|
|
|
|
Index += (VF - 1) * Group->getFactor();
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; Part++) {
|
2017-08-27 12:55:46 +00:00
|
|
|
Value *NewPtr = getOrCreateScalarValue(Ptr, {Part, 0});
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Notice current instruction could be any index. Need to adjust the address
|
|
|
|
// to the member of index 0.
|
|
|
|
//
|
|
|
|
// E.g. a = A[i+1]; // Member of index 1 (Current instruction)
|
|
|
|
// b = A[i]; // Member of index 0
|
|
|
|
// Current pointer is pointed to A[i+1], adjust it to A[i].
|
|
|
|
//
|
|
|
|
// E.g. A[i+1] = a; // Member of index 1
|
|
|
|
// A[i] = b; // Member of index 0
|
|
|
|
// A[i+2] = c; // Member of index 2 (Current instruction)
|
|
|
|
// Current pointer is pointed to A[i+2], adjust it to A[i].
|
|
|
|
NewPtr = Builder.CreateGEP(NewPtr, Builder.getInt32(-Index));
|
|
|
|
|
|
|
|
// Cast to the vector pointer type.
|
|
|
|
NewPtrs.push_back(Builder.CreateBitCast(NewPtr, PtrTy));
|
|
|
|
}
|
|
|
|
|
|
|
|
setDebugLocFromInst(Builder, Instr);
|
|
|
|
Value *UndefVec = UndefValue::get(VecTy);
|
|
|
|
|
|
|
|
// Vectorize the interleaved load group.
|
2017-02-08 19:25:23 +00:00
|
|
|
if (isa<LoadInst>(Instr)) {
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// For each unroll part, create a wide load for the group.
|
|
|
|
SmallVector<Value *, 2> NewLoads;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; Part++) {
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
auto *NewLoad = Builder.CreateAlignedLoad(
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
NewPtrs[Part], Group->getAlignment(), "wide.vec");
|
2017-12-06 22:42:24 +00:00
|
|
|
Group->addMetadata(NewLoad);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
NewLoads.push_back(NewLoad);
|
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// For each member in the group, shuffle out the appropriate data from the
|
|
|
|
// wide loads.
|
|
|
|
for (unsigned I = 0; I < InterleaveFactor; ++I) {
|
|
|
|
Instruction *Member = Group->getMember(I);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
// Skip the gaps in the group.
|
|
|
|
if (!Member)
|
|
|
|
continue;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2017-02-01 17:45:46 +00:00
|
|
|
Constant *StrideMask = createStrideMask(Builder, I, InterleaveFactor, VF);
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; Part++) {
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
Value *StridedVec = Builder.CreateShuffleVector(
|
[LV] Unify vector and scalar maps
This patch unifies the data structures we use for mapping instructions from the
original loop to their corresponding instructions in the new loop. Previously,
we maintained two distinct maps for this purpose: WidenMap and ScalarIVMap.
WidenMap maintained the vector values each instruction from the old loop was
represented with, and ScalarIVMap maintained the scalar values each scalarized
induction variable was represented with. With this patch, all values created
for the new loop are maintained in VectorLoopValueMap.
The change allows for several simplifications. Previously, when an instruction
was scalarized, we had to insert the scalar values into vectors in order to
maintain the mapping in WidenMap. Then, if a user of the scalarized value was
also scalar, we had to extract the scalar values from the temporary vector we
created. We now aovid these unnecessary scalar-to-vector-to-scalar conversions.
If a scalarized value is used by a scalar instruction, the scalar value is used
directly. However, if the scalarized value is needed by a vector instruction,
we generate the needed insertelement instructions on-demand.
A common idiom in several locations in the code (including the scalarization
code), is to first get the vector values an instruction from the original loop
maps to, and then extract a particular scalar value. This patch adds
getScalarValue for this purpose along side getVectorValue as an interface into
VectorLoopValueMap. These functions work together to return the requested
values if they're available or to produce them if they're not.
The mapping has also be made less permissive. Entries can be added to
VectorLoopValue map with the new initVector and initScalar functions.
getVectorValue has been modified to return a constant reference to the mapped
entries.
There's no real functional change with this patch; however, in some cases we
will generate slightly different code. For example, instead of an insertelement
sequence following the definition of an instruction, it will now precede the
first use of that instruction. This can be seen in the test case changes.
Differential Revision: https://reviews.llvm.org/D23169
llvm-svn: 279649
2016-08-24 18:23:17 +00:00
|
|
|
NewLoads[Part], UndefVec, StrideMask, "strided.vec");
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// If this member has different type, cast the result type.
|
|
|
|
if (Member->getType() != ScalarTy) {
|
|
|
|
VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
if (Group->isReverse())
|
|
|
|
StridedVec = reverseVector(StridedVec);
|
|
|
|
|
|
|
|
VectorLoopValueMap.setVectorValue(Member, Part, StridedVec);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The sub vector type for current instruction.
|
|
|
|
VectorType *SubVT = VectorType::get(ScalarTy, VF);
|
|
|
|
|
|
|
|
// Vectorize the interleaved store group.
|
|
|
|
for (unsigned Part = 0; Part < UF; Part++) {
|
|
|
|
// Collect the stored vector from each member.
|
|
|
|
SmallVector<Value *, 4> StoredVecs;
|
|
|
|
for (unsigned i = 0; i < InterleaveFactor; i++) {
|
|
|
|
// Interleaved store group doesn't allow a gap, so each index has a member
|
|
|
|
Instruction *Member = Group->getMember(i);
|
|
|
|
assert(Member && "Fail to get a member from an interleaved store group");
|
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *StoredVec = getOrCreateVectorValue(
|
|
|
|
cast<StoreInst>(Member)->getValueOperand(), Part);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
if (Group->isReverse())
|
|
|
|
StoredVec = reverseVector(StoredVec);
|
|
|
|
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
// If this member has different type, cast it to a unified type.
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
if (StoredVec->getType() != SubVT)
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
StoredVecs.push_back(StoredVec);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Concatenate all vectors into a wide vector.
|
2017-02-01 17:45:46 +00:00
|
|
|
Value *WideVec = concatenateVectors(Builder, StoredVecs);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Interleave the elements in the wide vector.
|
2017-02-01 17:45:46 +00:00
|
|
|
Constant *IMask = createInterleaveMask(Builder, VF, InterleaveFactor);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
Value *IVec = Builder.CreateShuffleVector(WideVec, UndefVec, IMask,
|
|
|
|
"interleaved.vec");
|
|
|
|
|
|
|
|
Instruction *NewStoreInstr =
|
|
|
|
Builder.CreateAlignedStore(IVec, NewPtrs[Part], Group->getAlignment());
|
2017-12-06 22:42:24 +00:00
|
|
|
|
|
|
|
Group->addMetadata(NewStoreInstr);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
|
|
|
|
VectorParts *BlockInMask) {
|
2013-01-25 21:47:42 +00:00
|
|
|
// Attempt to issue a wide load.
|
|
|
|
LoadInst *LI = dyn_cast<LoadInst>(Instr);
|
|
|
|
StoreInst *SI = dyn_cast<StoreInst>(Instr);
|
|
|
|
|
|
|
|
assert((LI || SI) && "Invalid Load/Store instruction");
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
LoopVectorizationCostModel::InstWidening Decision =
|
|
|
|
Cost->getWideningDecision(Instr, VF);
|
|
|
|
assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
|
|
|
|
"CM decision should be taken at this point");
|
|
|
|
if (Decision == LoopVectorizationCostModel::CM_Interleave)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
return vectorizeInterleaveGroup(Instr);
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
Type *ScalarDataTy = getMemInstValueType(Instr);
|
2013-01-25 21:47:42 +00:00
|
|
|
Type *DataTy = VectorType::get(ScalarDataTy, VF);
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(Instr);
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned Alignment = getMemInstAlignment(Instr);
|
2013-11-15 23:09:33 +00:00
|
|
|
// An alignment of 0 means target abi alignment. We need to use the scalar's
|
|
|
|
// target abi alignment in such a case.
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = Instr->getModule()->getDataLayout();
|
2013-11-15 23:09:33 +00:00
|
|
|
if (!Alignment)
|
2015-03-10 02:37:25 +00:00
|
|
|
Alignment = DL.getABITypeAlignment(ScalarDataTy);
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned AddressSpace = getMemInstAddressSpace(Instr);
|
2014-01-28 01:01:53 +00:00
|
|
|
|
2016-09-08 19:11:07 +00:00
|
|
|
// Determine if the pointer operand of the access is either consecutive or
|
|
|
|
// reverse consecutive.
|
2017-12-16 02:55:24 +00:00
|
|
|
bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse);
|
|
|
|
bool ConsecutiveStride =
|
|
|
|
Reverse || (Decision == LoopVectorizationCostModel::CM_Widen);
|
2016-09-08 19:11:07 +00:00
|
|
|
bool CreateGatherScatter =
|
2017-02-08 19:25:23 +00:00
|
|
|
(Decision == LoopVectorizationCostModel::CM_GatherScatter);
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
// Either Ptr feeds a vector load/store, or a vector GEP should feed a vector
|
|
|
|
// gather/scatter. Otherwise Decision should have been to Scalarize.
|
|
|
|
assert((ConsecutiveStride || CreateGatherScatter) &&
|
|
|
|
"The instruction should be scalarized");
|
2013-01-25 21:47:42 +00:00
|
|
|
|
|
|
|
// Handle consecutive loads/stores.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (ConsecutiveStride)
|
2017-08-27 12:55:46 +00:00
|
|
|
Ptr = getOrCreateScalarValue(Ptr, {0, 0});
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VectorParts Mask;
|
|
|
|
bool isMaskRequired = BlockInMask;
|
|
|
|
if (isMaskRequired)
|
|
|
|
Mask = *BlockInMask;
|
|
|
|
|
2013-01-25 21:47:42 +00:00
|
|
|
// Handle Stores:
|
|
|
|
if (SI) {
|
2013-06-28 16:26:54 +00:00
|
|
|
setDebugLocFromInst(Builder, SI);
|
2015-10-27 19:02:52 +00:00
|
|
|
|
2013-01-25 21:47:42 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2016-02-17 19:23:04 +00:00
|
|
|
Instruction *NewSI = nullptr;
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *StoredVal = getOrCreateVectorValue(SI->getValueOperand(), Part);
|
2016-02-17 19:23:04 +00:00
|
|
|
if (CreateGatherScatter) {
|
2017-11-20 12:01:47 +00:00
|
|
|
Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
|
|
|
|
NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
|
|
|
|
MaskPart);
|
2016-02-17 19:23:04 +00:00
|
|
|
} else {
|
|
|
|
// Calculate the pointer for the specific unroll-part.
|
|
|
|
Value *PartPtr =
|
2016-05-05 00:54:54 +00:00
|
|
|
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2016-02-17 19:23:04 +00:00
|
|
|
if (Reverse) {
|
|
|
|
// If we store to reverse consecutive memory locations, then we need
|
|
|
|
// to reverse the order of elements in the stored value.
|
2017-06-27 08:41:19 +00:00
|
|
|
StoredVal = reverseVector(StoredVal);
|
|
|
|
// We don't want to update the value in the map as it might be used in
|
|
|
|
// another expression. So don't call resetVectorValue(StoredVal).
|
|
|
|
|
2016-02-17 19:23:04 +00:00
|
|
|
// If the address is consecutive but reversed, then the
|
|
|
|
// wide store needs to start at the last vector element.
|
2016-05-05 00:54:54 +00:00
|
|
|
PartPtr =
|
|
|
|
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
|
|
|
|
PartPtr =
|
|
|
|
Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
|
2017-11-20 12:01:47 +00:00
|
|
|
if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
|
2017-07-31 13:21:42 +00:00
|
|
|
Mask[Part] = reverseVector(Mask[Part]);
|
2016-02-17 19:23:04 +00:00
|
|
|
}
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *VecPtr =
|
|
|
|
Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
|
2014-12-16 11:50:42 +00:00
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
if (isMaskRequired)
|
2017-06-27 08:41:19 +00:00
|
|
|
NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
|
2016-02-17 19:23:04 +00:00
|
|
|
Mask[Part]);
|
2016-05-05 00:54:54 +00:00
|
|
|
else
|
2017-06-27 08:41:19 +00:00
|
|
|
NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
|
2016-02-17 19:23:04 +00:00
|
|
|
}
|
2016-03-17 20:32:37 +00:00
|
|
|
addMetadata(NewSI, SI);
|
2013-01-25 21:47:42 +00:00
|
|
|
}
|
2013-06-27 00:45:41 +00:00
|
|
|
return;
|
2013-01-25 21:47:42 +00:00
|
|
|
}
|
|
|
|
|
2013-06-28 00:38:54 +00:00
|
|
|
// Handle loads.
|
|
|
|
assert(LI && "Must have a load instruction");
|
2013-06-28 16:26:54 +00:00
|
|
|
setDebugLocFromInst(Builder, LI);
|
2013-01-25 21:47:42 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *NewLI;
|
2016-02-17 19:23:04 +00:00
|
|
|
if (CreateGatherScatter) {
|
2017-11-20 12:01:47 +00:00
|
|
|
Value *MaskPart = isMaskRequired ? Mask[Part] : nullptr;
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *VectorGep = getOrCreateVectorValue(Ptr, Part);
|
|
|
|
NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
|
2017-05-19 10:40:18 +00:00
|
|
|
nullptr, "wide.masked.gather");
|
2017-06-27 08:41:19 +00:00
|
|
|
addMetadata(NewLI, LI);
|
2016-02-17 19:23:04 +00:00
|
|
|
} else {
|
|
|
|
// Calculate the pointer for the specific unroll-part.
|
|
|
|
Value *PartPtr =
|
2016-05-05 00:54:54 +00:00
|
|
|
Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(Part * VF));
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2016-02-17 19:23:04 +00:00
|
|
|
if (Reverse) {
|
|
|
|
// If the address is consecutive but reversed, then the
|
|
|
|
// wide load needs to start at the last vector element.
|
|
|
|
PartPtr = Builder.CreateGEP(nullptr, Ptr, Builder.getInt32(-Part * VF));
|
|
|
|
PartPtr = Builder.CreateGEP(nullptr, PartPtr, Builder.getInt32(1 - VF));
|
2017-11-20 12:01:47 +00:00
|
|
|
if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
|
2017-07-31 13:21:42 +00:00
|
|
|
Mask[Part] = reverseVector(Mask[Part]);
|
2016-02-17 19:23:04 +00:00
|
|
|
}
|
2013-01-25 21:47:42 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *VecPtr =
|
|
|
|
Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
|
2017-11-20 12:01:47 +00:00
|
|
|
if (isMaskRequired)
|
2016-02-17 19:23:04 +00:00
|
|
|
NewLI = Builder.CreateMaskedLoad(VecPtr, Alignment, Mask[Part],
|
|
|
|
UndefValue::get(DataTy),
|
|
|
|
"wide.masked.load");
|
|
|
|
else
|
|
|
|
NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
|
2017-06-27 08:41:19 +00:00
|
|
|
|
|
|
|
// Add metadata to the load, but setVectorValue to the reverse shuffle.
|
|
|
|
addMetadata(NewLI, LI);
|
|
|
|
if (Reverse)
|
|
|
|
NewLI = reverseVector(NewLI);
|
2016-02-17 19:23:04 +00:00
|
|
|
}
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(Instr, Part, NewLI);
|
2013-01-25 21:47:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-19 00:32:30 +00:00
|
|
|
void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
|
2017-08-27 12:55:46 +00:00
|
|
|
const VPIteration &Instance,
|
2016-08-24 11:37:57 +00:00
|
|
|
bool IfPredicateInstr) {
|
2012-10-17 18:25:06 +00:00
|
|
|
assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
|
|
|
|
|
2013-06-28 16:26:54 +00:00
|
|
|
setDebugLocFromInst(Builder, Instr);
|
2013-06-28 00:38:54 +00:00
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
// Does this instruction return a value ?
|
|
|
|
bool IsVoidRetTy = Instr->getType()->isVoidTy();
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
Instruction *Cloned = Instr->clone();
|
|
|
|
if (!IsVoidRetTy)
|
|
|
|
Cloned->setName(Instr->getName() + ".cloned");
|
2016-12-19 08:22:17 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Replace the operands of the cloned instructions with their scalar
|
|
|
|
// equivalents in the new loop.
|
|
|
|
for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
|
|
|
|
auto *NewOp = getOrCreateScalarValue(Instr->getOperand(op), Instance);
|
|
|
|
Cloned->setOperand(op, NewOp);
|
|
|
|
}
|
|
|
|
addNewMetadata(Cloned, Instr);
|
2017-08-20 23:17:11 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Place the cloned scalar in the new loop.
|
|
|
|
Builder.Insert(Cloned);
|
2017-08-20 23:17:11 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Add the cloned scalar to the scalar map entry.
|
|
|
|
VectorLoopValueMap.setScalarValue(Instr, Instance, Cloned);
|
2017-08-20 23:17:11 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// If we just cloned a new assumption, add it the assumption cache.
|
|
|
|
if (auto *II = dyn_cast<IntrinsicInst>(Cloned))
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::assume)
|
|
|
|
AC->registerAssumption(II);
|
2017-08-20 23:17:11 +00:00
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// End if-block.
|
|
|
|
if (IfPredicateInstr)
|
|
|
|
PredicatedInstructions.push_back(Cloned);
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
|
|
|
|
Value *End, Value *Step,
|
2015-09-02 10:15:09 +00:00
|
|
|
Instruction *DL) {
|
|
|
|
BasicBlock *Header = L->getHeader();
|
|
|
|
BasicBlock *Latch = L->getLoopLatch();
|
|
|
|
// As we're just creating this loop, it's possible no latch exists
|
|
|
|
// yet. If so, use the header as this will be a single block loop.
|
|
|
|
if (!Latch)
|
|
|
|
Latch = Header;
|
2015-10-19 22:06:09 +00:00
|
|
|
|
|
|
|
IRBuilder<> Builder(&*Header->getFirstInsertionPt());
|
2016-11-07 21:59:40 +00:00
|
|
|
Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
|
|
|
|
setDebugLocFromInst(Builder, OldInst);
|
2015-09-02 10:15:09 +00:00
|
|
|
auto *Induction = Builder.CreatePHI(Start->getType(), 2, "index");
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(Latch->getTerminator());
|
2016-11-07 21:59:40 +00:00
|
|
|
setDebugLocFromInst(Builder, OldInst);
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:09 +00:00
|
|
|
// Create i+1 and fill the PHINode.
|
|
|
|
Value *Next = Builder.CreateAdd(Induction, Step, "index.next");
|
|
|
|
Induction->addIncoming(Start, L->getLoopPreheader());
|
|
|
|
Induction->addIncoming(Next, Latch);
|
|
|
|
// Create the compare.
|
|
|
|
Value *ICmp = Builder.CreateICmpEQ(Next, End);
|
|
|
|
Builder.CreateCondBr(ICmp, L->getExitBlock(), Header);
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:09 +00:00
|
|
|
// Now we have two terminators. Remove the old one from the block.
|
|
|
|
Latch->getTerminator()->eraseFromParent();
|
|
|
|
|
|
|
|
return Induction;
|
|
|
|
}
|
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
|
|
|
|
if (TripCount)
|
|
|
|
return TripCount;
|
|
|
|
|
|
|
|
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
|
|
|
// Find the loop boundaries.
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
ScalarEvolution *SE = PSE.getSE();
|
Re-commit [SCEV] Introduce a guarded backedge taken count and use it in LAA and LV
This re-commits r265535 which was reverted in r265541 because it
broke the windows bots. The problem was that we had a PointerIntPair
which took a pointer to a struct allocated with new. The problem
was that new doesn't provide sufficient alignment guarantees.
This pattern was already present before r265535 and it just happened
to work. To fix this, we now separate the PointerToIntPair from the
ExitNotTakenInfo struct into a pointer and a bool.
Original commit message:
Summary:
When the backedge taken codition is computed from an icmp, SCEV can
deduce the backedge taken count only if one of the sides of the icmp
is an AddRecExpr. However, due to sign/zero extensions, we sometimes
end up with something that is not an AddRecExpr.
However, we can use SCEV predicates to produce a 'guarded' expression.
This change adds a method to SCEV to get this expression, and the
SCEV predicate associated with it.
In HowManyGreaterThans and HowManyLessThans we will now add a SCEV
predicate associated with the guarded backedge taken count when the
analyzed SCEV expression is not an AddRecExpr. Note that we only do
this as an alternative to returning a 'CouldNotCompute'.
We use new feature in Loop Access Analysis and LoopVectorize to analyze
and transform more loops.
Reviewers: anemet, mzolotukhin, hfinkel, sanjoy
Subscribers: flyingforyou, mcrosier, atrick, mssimpso, sanjoy, mzolotukhin, llvm-commits
Differential Revision: http://reviews.llvm.org/D17201
llvm-svn: 265786
2016-04-08 14:29:09 +00:00
|
|
|
const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
|
2015-11-19 00:32:30 +00:00
|
|
|
assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
|
|
|
|
"Invalid loop count");
|
2015-09-02 10:15:16 +00:00
|
|
|
|
|
|
|
Type *IdxTy = Legal->getWidestInductionType();
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
// The exit count might have the type of i64 while the phi is i32. This can
|
|
|
|
// happen if we have an induction variable that is sign extended before the
|
|
|
|
// compare. The only way that we get a backedge taken count is that the
|
|
|
|
// induction variable was signed and as such will not overflow. In such a case
|
|
|
|
// truncation is legal.
|
2015-09-09 12:51:10 +00:00
|
|
|
if (BackedgeTakenCount->getType()->getPrimitiveSizeInBits() >
|
2015-09-02 10:15:16 +00:00
|
|
|
IdxTy->getPrimitiveSizeInBits())
|
2015-09-09 12:51:10 +00:00
|
|
|
BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
|
|
|
|
BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
// Get the total trip count from the count by adding 1.
|
2015-09-23 01:59:04 +00:00
|
|
|
const SCEV *ExitCount = SE->getAddExpr(
|
|
|
|
BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
|
2015-09-02 10:15:16 +00:00
|
|
|
|
|
|
|
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
|
|
|
|
|
|
|
|
// Expand the trip count and place the new instructions in the preheader.
|
|
|
|
// Notice that the pre-header does not change, only the loop body.
|
|
|
|
SCEVExpander Exp(*SE, DL, "induction");
|
|
|
|
|
|
|
|
// Count holds the overall loop count (N).
|
|
|
|
TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
|
|
|
|
L->getLoopPreheader()->getTerminator());
|
|
|
|
|
|
|
|
if (TripCount->getType()->isPointerTy())
|
|
|
|
TripCount =
|
2016-05-05 00:54:54 +00:00
|
|
|
CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
|
|
|
|
L->getLoopPreheader()->getTerminator());
|
2015-09-02 10:15:16 +00:00
|
|
|
|
|
|
|
return TripCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
|
|
|
|
if (VectorTripCount)
|
|
|
|
return VectorTripCount;
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
Value *TC = getOrCreateTripCount(L);
|
|
|
|
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2016-04-27 18:21:36 +00:00
|
|
|
// Now we need to generate the expression for the part of the loop that the
|
|
|
|
// vectorized body will execute. This is equal to N - (N % Step) if scalar
|
|
|
|
// iterations are not required for correctness, or N - Step, otherwise. Step
|
|
|
|
// is equal to the vectorization factor (number of SIMD elements) times the
|
|
|
|
// unroll factor (number of SIMD instructions).
|
2015-09-02 10:15:16 +00:00
|
|
|
Constant *Step = ConstantInt::get(TC->getType(), VF * UF);
|
|
|
|
Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
|
2016-04-27 18:21:36 +00:00
|
|
|
|
|
|
|
// If there is a non-reversed interleaved group that may speculatively access
|
|
|
|
// memory out-of-bounds, we need to ensure that there will be at least one
|
|
|
|
// iteration of the scalar epilogue loop. Thus, if the step evenly divides
|
|
|
|
// the trip count, we set the remainder to be equal to the step. If the step
|
|
|
|
// does not evenly divide the trip count, no adjustment is necessary since
|
|
|
|
// there will already be scalar iterations. Note that the minimum iterations
|
|
|
|
// check ensures that N >= Step.
|
|
|
|
if (VF > 1 && Legal->requiresScalarEpilogue()) {
|
|
|
|
auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
|
|
|
|
R = Builder.CreateSelect(IsZero, Step, R);
|
|
|
|
}
|
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
|
|
|
|
|
|
|
|
return VectorTripCount;
|
|
|
|
}
|
|
|
|
|
[LoopVectorizer] Use two step casting for float to pointer types.
Summary:
LoopVectorizer is creating casts between vec<ptr> and vec<float> types
on ARM when compiling OpenCV. Since, tIs is illegal to directly cast a
floating point type to a pointer type even if the types have same size
causing a crash. Fix the crash using a two-step casting by bitcasting
to integer and integer to pointer/float.
Fixes PR33804.
Reviewers: mkuper, Ayal, dlj, rengolin, srhines
Reviewed By: rengolin
Subscribers: aemerson, kristof.beyls, mkazantsev, Meinersbur, rengolin, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D35498
llvm-svn: 312331
2017-09-01 15:36:00 +00:00
|
|
|
Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
|
|
|
|
const DataLayout &DL) {
|
|
|
|
// Verify that V is a vector type with same number of elements as DstVTy.
|
|
|
|
unsigned VF = DstVTy->getNumElements();
|
|
|
|
VectorType *SrcVecTy = cast<VectorType>(V->getType());
|
|
|
|
assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
|
|
|
|
Type *SrcElemTy = SrcVecTy->getElementType();
|
|
|
|
Type *DstElemTy = DstVTy->getElementType();
|
|
|
|
assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
|
|
|
|
"Vector elements must have same size");
|
|
|
|
|
|
|
|
// Do a direct cast if element types are castable.
|
|
|
|
if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
|
|
|
|
return Builder.CreateBitOrPointerCast(V, DstVTy);
|
|
|
|
}
|
|
|
|
// V cannot be directly casted to desired vector type.
|
|
|
|
// May happen when V is a floating point vector but DstVTy is a vector of
|
|
|
|
// pointers or vice-versa. Handle this using a two-step bitcast using an
|
|
|
|
// intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
|
|
|
|
assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
|
|
|
|
"Only one type should be a pointer type");
|
|
|
|
assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
|
|
|
|
"Only one type should be a floating point type");
|
|
|
|
Type *IntTy =
|
|
|
|
IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
|
|
|
|
VectorType *VecIntTy = VectorType::get(IntTy, VF);
|
|
|
|
Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
|
|
|
|
return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
|
|
|
|
}
|
|
|
|
|
2015-09-02 10:15:22 +00:00
|
|
|
void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
|
|
|
|
BasicBlock *Bypass) {
|
|
|
|
Value *Count = getOrCreateTripCount(L);
|
|
|
|
BasicBlock *BB = L->getLoopPreheader();
|
|
|
|
IRBuilder<> Builder(BB->getTerminator());
|
|
|
|
|
2017-07-19 05:16:39 +00:00
|
|
|
// Generate code to check if the loop's trip count is less than VF * UF, or
|
|
|
|
// equal to it in case a scalar epilogue is required; this implies that the
|
|
|
|
// vector trip count is zero. This check also covers the case where adding one
|
|
|
|
// to the backedge-taken count overflowed leading to an incorrect trip count
|
|
|
|
// of zero. In this case we will also jump to the scalar loop.
|
|
|
|
auto P = Legal->requiresScalarEpilogue() ? ICmpInst::ICMP_ULE
|
|
|
|
: ICmpInst::ICMP_ULT;
|
|
|
|
Value *CheckMinIters = Builder.CreateICmp(
|
|
|
|
P, Count, ConstantInt::get(Count->getType(), VF * UF), "min.iters.check");
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
BasicBlock *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
|
[SCEV] Try to reuse existing value during SCEV expansion
Current SCEV expansion will expand SCEV as a sequence of operations
and doesn't utilize the value already existed. This will introduce
redundent computation which may not be cleaned up throughly by
following optimizations.
This patch introduces an ExprValueMap which is a map from SCEV to the
set of equal values with the same SCEV. When a SCEV is expanded, the
set of values is checked and reused whenever possible before generating
a sequence of operations.
The original commit triggered regressions in Polly tests. The regressions
exposed two problems which have been fixed in current version.
1. Polly will generate a new function based on the old one. To generate an
instruction for the new function, it builds SCEV for the old instruction,
applies some tranformation on the SCEV generated, then expands the transformed
SCEV and insert the expanded value into new function. Because SCEV expansion
may reuse value cached in ExprValueMap, the value in old function may be
inserted into new function, which is wrong.
In SCEVExpander::expand, there is a logic to check the cached value to
be used should dominate the insertion point. However, for the above
case, the check always passes. That is because the insertion point is
in a new function, which is unreachable from the old function. However
for unreachable node, DominatorTreeBase::dominates thinks it will be
dominated by any other node.
The fix is to simply add a check that the cached value to be used in
expansion should be in the same function as the insertion point instruction.
2. When the SCEV is of scConstant type, expanding it directly is cheaper than
reusing a normal value cached. Although in the cached value set in ExprValueMap,
there is a Constant type value, but it is not easy to find it out -- the cached
Value set is not sorted according to the potential cost. Existing reuse logic
in SCEVExpander::expand simply chooses the first legal element from the cached
value set.
The fix is that when the SCEV is of scConstant type, don't try the reuse
logic. simply expand it.
Differential Revision: http://reviews.llvm.org/D12090
llvm-svn: 259736
2016-02-04 01:27:38 +00:00
|
|
|
// Update dominator tree immediately if the generated block is a
|
|
|
|
// LoopBypassBlock because SCEV expansions to generate loop bypass
|
|
|
|
// checks may query it before the current function is finished.
|
|
|
|
DT->addNewBlock(NewBB, BB);
|
2015-09-02 10:15:22 +00:00
|
|
|
if (L->getParentLoop())
|
|
|
|
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
|
|
|
|
ReplaceInstWithInst(BB->getTerminator(),
|
2017-07-19 05:16:39 +00:00
|
|
|
BranchInst::Create(Bypass, NewBB, CheckMinIters));
|
2015-09-02 10:15:22 +00:00
|
|
|
LoopBypassBlocks.push_back(BB);
|
|
|
|
}
|
|
|
|
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
|
2015-09-02 10:15:22 +00:00
|
|
|
BasicBlock *BB = L->getLoopPreheader();
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
|
|
|
|
// Generate the code to check that the SCEV assumptions that we made.
|
|
|
|
// We want the new basic block to start at the first instruction in a
|
2015-09-02 10:15:22 +00:00
|
|
|
// sequence of instructions that form a check.
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
|
|
|
|
"scev.check");
|
|
|
|
Value *SCEVCheck =
|
|
|
|
Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
|
|
|
|
if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
|
|
|
|
if (C->isZero())
|
|
|
|
return;
|
2015-09-02 10:15:22 +00:00
|
|
|
|
|
|
|
// Create a new block containing the stride check.
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
BB->setName("vector.scevcheck");
|
2015-09-02 10:15:22 +00:00
|
|
|
auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
|
[SCEV] Try to reuse existing value during SCEV expansion
Current SCEV expansion will expand SCEV as a sequence of operations
and doesn't utilize the value already existed. This will introduce
redundent computation which may not be cleaned up throughly by
following optimizations.
This patch introduces an ExprValueMap which is a map from SCEV to the
set of equal values with the same SCEV. When a SCEV is expanded, the
set of values is checked and reused whenever possible before generating
a sequence of operations.
The original commit triggered regressions in Polly tests. The regressions
exposed two problems which have been fixed in current version.
1. Polly will generate a new function based on the old one. To generate an
instruction for the new function, it builds SCEV for the old instruction,
applies some tranformation on the SCEV generated, then expands the transformed
SCEV and insert the expanded value into new function. Because SCEV expansion
may reuse value cached in ExprValueMap, the value in old function may be
inserted into new function, which is wrong.
In SCEVExpander::expand, there is a logic to check the cached value to
be used should dominate the insertion point. However, for the above
case, the check always passes. That is because the insertion point is
in a new function, which is unreachable from the old function. However
for unreachable node, DominatorTreeBase::dominates thinks it will be
dominated by any other node.
The fix is to simply add a check that the cached value to be used in
expansion should be in the same function as the insertion point instruction.
2. When the SCEV is of scConstant type, expanding it directly is cheaper than
reusing a normal value cached. Although in the cached value set in ExprValueMap,
there is a Constant type value, but it is not easy to find it out -- the cached
Value set is not sorted according to the potential cost. Existing reuse logic
in SCEVExpander::expand simply chooses the first legal element from the cached
value set.
The fix is that when the SCEV is of scConstant type, don't try the reuse
logic. simply expand it.
Differential Revision: http://reviews.llvm.org/D12090
llvm-svn: 259736
2016-02-04 01:27:38 +00:00
|
|
|
// Update dominator tree immediately if the generated block is a
|
|
|
|
// LoopBypassBlock because SCEV expansions to generate loop bypass
|
|
|
|
// checks may query it before the current function is finished.
|
|
|
|
DT->addNewBlock(NewBB, BB);
|
2015-09-02 10:15:22 +00:00
|
|
|
if (L->getParentLoop())
|
|
|
|
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
|
|
|
|
ReplaceInstWithInst(BB->getTerminator(),
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
BranchInst::Create(Bypass, NewBB, SCEVCheck));
|
2015-09-02 10:15:22 +00:00
|
|
|
LoopBypassBlocks.push_back(BB);
|
|
|
|
AddedSafetyChecks = true;
|
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
void InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass) {
|
2015-09-02 10:15:22 +00:00
|
|
|
BasicBlock *BB = L->getLoopPreheader();
|
|
|
|
|
|
|
|
// Generate the code that checks in runtime if arrays overlap. We put the
|
|
|
|
// checks into a separate block to make the more common case of few elements
|
|
|
|
// faster.
|
|
|
|
Instruction *FirstCheckInst;
|
|
|
|
Instruction *MemRuntimeCheck;
|
|
|
|
std::tie(FirstCheckInst, MemRuntimeCheck) =
|
|
|
|
Legal->getLAI()->addRuntimeChecks(BB->getTerminator());
|
|
|
|
if (!MemRuntimeCheck)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Create a new block containing the memory check.
|
|
|
|
BB->setName("vector.memcheck");
|
|
|
|
auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph");
|
[SCEV] Try to reuse existing value during SCEV expansion
Current SCEV expansion will expand SCEV as a sequence of operations
and doesn't utilize the value already existed. This will introduce
redundent computation which may not be cleaned up throughly by
following optimizations.
This patch introduces an ExprValueMap which is a map from SCEV to the
set of equal values with the same SCEV. When a SCEV is expanded, the
set of values is checked and reused whenever possible before generating
a sequence of operations.
The original commit triggered regressions in Polly tests. The regressions
exposed two problems which have been fixed in current version.
1. Polly will generate a new function based on the old one. To generate an
instruction for the new function, it builds SCEV for the old instruction,
applies some tranformation on the SCEV generated, then expands the transformed
SCEV and insert the expanded value into new function. Because SCEV expansion
may reuse value cached in ExprValueMap, the value in old function may be
inserted into new function, which is wrong.
In SCEVExpander::expand, there is a logic to check the cached value to
be used should dominate the insertion point. However, for the above
case, the check always passes. That is because the insertion point is
in a new function, which is unreachable from the old function. However
for unreachable node, DominatorTreeBase::dominates thinks it will be
dominated by any other node.
The fix is to simply add a check that the cached value to be used in
expansion should be in the same function as the insertion point instruction.
2. When the SCEV is of scConstant type, expanding it directly is cheaper than
reusing a normal value cached. Although in the cached value set in ExprValueMap,
there is a Constant type value, but it is not easy to find it out -- the cached
Value set is not sorted according to the potential cost. Existing reuse logic
in SCEVExpander::expand simply chooses the first legal element from the cached
value set.
The fix is that when the SCEV is of scConstant type, don't try the reuse
logic. simply expand it.
Differential Revision: http://reviews.llvm.org/D12090
llvm-svn: 259736
2016-02-04 01:27:38 +00:00
|
|
|
// Update dominator tree immediately if the generated block is a
|
|
|
|
// LoopBypassBlock because SCEV expansions to generate loop bypass
|
|
|
|
// checks may query it before the current function is finished.
|
|
|
|
DT->addNewBlock(NewBB, BB);
|
2015-09-02 10:15:22 +00:00
|
|
|
if (L->getParentLoop())
|
|
|
|
L->getParentLoop()->addBasicBlockToLoop(NewBB, *LI);
|
|
|
|
ReplaceInstWithInst(BB->getTerminator(),
|
|
|
|
BranchInst::Create(Bypass, NewBB, MemRuntimeCheck));
|
|
|
|
LoopBypassBlocks.push_back(BB);
|
|
|
|
AddedSafetyChecks = true;
|
2016-03-17 20:32:37 +00:00
|
|
|
|
|
|
|
// We currently don't use LoopVersioning for the actual loop cloning but we
|
|
|
|
// still use it to add the noalias metadata.
|
|
|
|
LVer = llvm::make_unique<LoopVersioning>(*Legal->getLAI(), OrigLoop, LI, DT,
|
|
|
|
PSE.getSE());
|
|
|
|
LVer->prepareNoAliasMetadata();
|
2015-09-02 10:15:22 +00:00
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
|
2012-10-18 05:29:12 +00:00
|
|
|
/*
|
|
|
|
In this function we generate a new loop. The new loop will contain
|
|
|
|
the vectorized instructions while the old loop will continue to run the
|
|
|
|
scalar remainder.
|
|
|
|
|
2015-08-25 16:43:47 +00:00
|
|
|
[ ] <-- loop iteration number check.
|
2014-05-29 22:10:01 +00:00
|
|
|
/ |
|
|
|
|
/ v
|
|
|
|
| [ ] <-- vector loop bypass (may consist of multiple blocks).
|
|
|
|
| / |
|
|
|
|
| / v
|
|
|
|
|| [ ] <-- vector pre header.
|
2015-09-02 10:15:39 +00:00
|
|
|
|/ |
|
|
|
|
| v
|
|
|
|
| [ ] \
|
|
|
|
| [ ]_| <-- vector loop.
|
|
|
|
| |
|
|
|
|
| v
|
|
|
|
| -[ ] <--- middle-block.
|
2014-05-29 22:10:01 +00:00
|
|
|
| / |
|
|
|
|
| / v
|
|
|
|
-|- >[ ] <--- new preheader.
|
2012-12-10 21:39:02 +00:00
|
|
|
| |
|
|
|
|
| v
|
|
|
|
| [ ] \
|
|
|
|
| [ ]_| <-- old scalar loop to handle remainder.
|
2012-12-12 01:33:47 +00:00
|
|
|
\ |
|
|
|
|
\ v
|
|
|
|
>[ ] <-- exit block.
|
2012-10-18 05:29:12 +00:00
|
|
|
...
|
|
|
|
*/
|
|
|
|
|
2012-11-26 19:51:46 +00:00
|
|
|
BasicBlock *OldBasicBlock = OrigLoop->getHeader();
|
2015-07-08 21:48:03 +00:00
|
|
|
BasicBlock *VectorPH = OrigLoop->getLoopPreheader();
|
2012-11-26 19:51:46 +00:00
|
|
|
BasicBlock *ExitBlock = OrigLoop->getExitBlock();
|
2015-07-08 21:48:03 +00:00
|
|
|
assert(VectorPH && "Invalid loop structure");
|
2012-11-26 19:51:46 +00:00
|
|
|
assert(ExitBlock && "Must have an exit block");
|
|
|
|
|
2012-11-25 08:41:35 +00:00
|
|
|
// Some loops have a single integer induction variable, while other loops
|
|
|
|
// don't. One example is c++ iterators that often have multiple pointer
|
|
|
|
// induction variables. In the code below we also support a case where we
|
|
|
|
// don't have a single induction variable.
|
2015-09-02 10:15:05 +00:00
|
|
|
//
|
|
|
|
// We try to obtain an induction variable from the original loop as hard
|
|
|
|
// as possible. However if we don't find one that:
|
|
|
|
// - is an integer
|
|
|
|
// - counts from zero, stepping by one
|
|
|
|
// - is the size of the widest induction variable type
|
|
|
|
// then we create a new one.
|
2017-02-14 22:14:01 +00:00
|
|
|
OldInduction = Legal->getPrimaryInduction();
|
2013-05-11 23:04:28 +00:00
|
|
|
Type *IdxTy = Legal->getWidestInductionType();
|
2012-11-26 19:51:46 +00:00
|
|
|
|
|
|
|
// Split the single block loop into the two loop structure described above.
|
|
|
|
BasicBlock *VecBody =
|
2015-07-08 21:48:03 +00:00
|
|
|
VectorPH->splitBasicBlock(VectorPH->getTerminator(), "vector.body");
|
2012-11-26 19:51:46 +00:00
|
|
|
BasicBlock *MiddleBlock =
|
2016-05-05 00:54:54 +00:00
|
|
|
VecBody->splitBasicBlock(VecBody->getTerminator(), "middle.block");
|
2012-10-18 05:29:12 +00:00
|
|
|
BasicBlock *ScalarPH =
|
2016-05-05 00:54:54 +00:00
|
|
|
MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), "scalar.ph");
|
2012-11-26 19:51:46 +00:00
|
|
|
|
2013-07-13 06:20:06 +00:00
|
|
|
// Create and register the new vector loop.
|
2017-09-28 02:45:42 +00:00
|
|
|
Loop *Lp = LI->AllocateLoop();
|
2013-07-13 06:20:06 +00:00
|
|
|
Loop *ParentLoop = OrigLoop->getParentLoop();
|
|
|
|
|
|
|
|
// Insert the new loop into the loop nest and register the new basic blocks
|
|
|
|
// before calling any utilities such as SCEV that require valid LoopInfo.
|
|
|
|
if (ParentLoop) {
|
|
|
|
ParentLoop->addChildLoop(Lp);
|
2015-01-18 01:25:51 +00:00
|
|
|
ParentLoop->addBasicBlockToLoop(ScalarPH, *LI);
|
|
|
|
ParentLoop->addBasicBlockToLoop(MiddleBlock, *LI);
|
2013-07-13 06:20:06 +00:00
|
|
|
} else {
|
|
|
|
LI->addTopLevelLoop(Lp);
|
|
|
|
}
|
2015-01-18 01:25:51 +00:00
|
|
|
Lp->addBasicBlockToLoop(VecBody, *LI);
|
2013-07-13 06:20:06 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
// Find the loop boundaries.
|
|
|
|
Value *Count = getOrCreateTripCount(Lp);
|
|
|
|
|
|
|
|
Value *StartIdx = ConstantInt::get(IdxTy, 0);
|
|
|
|
|
2015-09-02 10:15:22 +00:00
|
|
|
// Now, compare the new count to zero. If it is zero skip the vector loop and
|
2017-07-19 05:16:39 +00:00
|
|
|
// jump to the scalar loop. This check also covers the case where the
|
|
|
|
// backedge-taken count is uint##_max: adding one to it will overflow leading
|
|
|
|
// to an incorrect trip count of zero. In this (rare) case we will also jump
|
|
|
|
// to the scalar loop.
|
|
|
|
emitMinimumIterationCountCheck(Lp, ScalarPH);
|
|
|
|
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
// Generate the code to check any assumptions that we've made for SCEV
|
|
|
|
// expressions.
|
|
|
|
emitSCEVChecks(Lp, ScalarPH);
|
|
|
|
|
2015-09-02 10:15:22 +00:00
|
|
|
// Generate the code that checks in runtime if arrays overlap. We put the
|
|
|
|
// checks into a separate block to make the more common case of few elements
|
|
|
|
// faster.
|
2015-09-02 10:15:39 +00:00
|
|
|
emitMemRuntimeChecks(Lp, ScalarPH);
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2015-09-02 10:15:16 +00:00
|
|
|
// Generate the induction variable.
|
2015-09-02 10:15:09 +00:00
|
|
|
// The loop step is equal to the vectorization factor (num of SIMD elements)
|
|
|
|
// times the unroll factor (num of SIMD instructions).
|
2015-09-02 10:15:32 +00:00
|
|
|
Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
|
2015-09-02 10:15:09 +00:00
|
|
|
Constant *Step = ConstantInt::get(IdxTy, VF * UF);
|
|
|
|
Induction =
|
2016-05-05 00:54:54 +00:00
|
|
|
createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
|
|
|
|
getDebugLocFromInstOrOperands(OldInduction));
|
2013-01-25 19:26:23 +00:00
|
|
|
|
2012-11-09 07:09:44 +00:00
|
|
|
// We are going to resume the execution of the scalar loop.
|
2012-11-17 00:27:03 +00:00
|
|
|
// Go over all of the induction variables that we found and fix the
|
|
|
|
// PHIs that are left in the scalar version of the loop.
|
|
|
|
// The starting values of PHI nodes depend on the counter of the last
|
|
|
|
// iteration in the vectorized loop.
|
2012-12-10 21:39:02 +00:00
|
|
|
// If we come from a bypass edge then we need to start from the original
|
|
|
|
// start value.
|
2012-11-17 00:27:03 +00:00
|
|
|
|
2015-09-02 10:15:05 +00:00
|
|
|
// This variable saves the new starting index for the scalar loop. It is used
|
|
|
|
// to test if there are any tail iterations left once the vector loop has
|
|
|
|
// completed.
|
2012-11-17 00:27:03 +00:00
|
|
|
LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
|
2016-07-12 19:35:15 +00:00
|
|
|
for (auto &InductionEntry : *List) {
|
|
|
|
PHINode *OrigPhi = InductionEntry.first;
|
|
|
|
InductionDescriptor II = InductionEntry.second;
|
2013-05-11 23:04:28 +00:00
|
|
|
|
2014-05-29 22:10:01 +00:00
|
|
|
// Create phi nodes to merge from the backedge-taken check block.
|
2016-05-05 00:54:54 +00:00
|
|
|
PHINode *BCResumeVal = PHINode::Create(
|
|
|
|
OrigPhi->getType(), 3, "bc.resume.val", ScalarPH->getTerminator());
|
2017-01-09 19:05:29 +00:00
|
|
|
Value *&EndValue = IVEndValues[OrigPhi];
|
2014-05-29 22:10:01 +00:00
|
|
|
if (OrigPhi == OldInduction) {
|
2015-09-02 10:15:05 +00:00
|
|
|
// We know what the end value is.
|
2015-09-02 10:15:16 +00:00
|
|
|
EndValue = CountRoundDown;
|
2015-09-02 10:15:05 +00:00
|
|
|
} else {
|
2017-07-19 05:16:39 +00:00
|
|
|
IRBuilder<> B(Lp->getLoopPreheader()->getTerminator());
|
2016-07-24 07:24:54 +00:00
|
|
|
Type *StepType = II.getStep()->getType();
|
2016-11-15 13:28:42 +00:00
|
|
|
Instruction::CastOps CastOp =
|
2016-07-24 07:24:54 +00:00
|
|
|
CastInst::getCastOpcode(CountRoundDown, true, StepType, true);
|
|
|
|
Value *CRD = B.CreateCast(CastOp, CountRoundDown, StepType, "cast.crd");
|
2016-05-10 07:33:35 +00:00
|
|
|
const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
|
|
|
|
EndValue = II.transform(B, CRD, PSE.getSE(), DL);
|
2015-09-02 10:15:05 +00:00
|
|
|
EndValue->setName("ind.end");
|
2013-01-23 01:35:00 +00:00
|
|
|
}
|
2012-11-17 00:27:03 +00:00
|
|
|
|
|
|
|
// The new PHI merges the original incoming value, in case of a bypass,
|
|
|
|
// or the value at the end of the vectorized loop.
|
2015-09-02 10:15:39 +00:00
|
|
|
BCResumeVal->addIncoming(EndValue, MiddleBlock);
|
2012-11-17 00:27:03 +00:00
|
|
|
|
|
|
|
// Fix the scalar body counter (PHI node).
|
2012-11-25 08:41:35 +00:00
|
|
|
unsigned BlockIdx = OrigPhi->getBasicBlockIndex(ScalarPH);
|
2014-05-29 22:10:01 +00:00
|
|
|
|
|
|
|
// The old induction's phi node in the scalar body needs the truncated
|
|
|
|
// value.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : LoopBypassBlocks)
|
|
|
|
BCResumeVal->addIncoming(II.getStartValue(), BB);
|
2015-09-02 10:15:05 +00:00
|
|
|
OrigPhi->setIncomingValue(BlockIdx, BCResumeVal);
|
2012-11-17 00:27:03 +00:00
|
|
|
}
|
|
|
|
|
2012-10-18 05:29:12 +00:00
|
|
|
// Add a check in the middle block to see if we have completed
|
|
|
|
// all of the iterations in the first vector loop.
|
|
|
|
// If (N - N%VF) == N, then we *don't* need to run the remainder.
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *CmpN =
|
|
|
|
CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, Count,
|
|
|
|
CountRoundDown, "cmp.n", MiddleBlock->getTerminator());
|
2015-07-01 22:18:30 +00:00
|
|
|
ReplaceInstWithInst(MiddleBlock->getTerminator(),
|
|
|
|
BranchInst::Create(ExitBlock, ScalarPH, CmpN));
|
2012-10-18 05:29:12 +00:00
|
|
|
|
|
|
|
// Get ready to start creating new instructions into the vectorized body.
|
2015-10-19 22:06:09 +00:00
|
|
|
Builder.SetInsertPoint(&*VecBody->getFirstInsertionPt());
|
2012-10-18 05:29:12 +00:00
|
|
|
|
2012-10-19 23:05:40 +00:00
|
|
|
// Save the state.
|
2015-09-02 10:15:22 +00:00
|
|
|
LoopVectorPreHeader = Lp->getLoopPreheader();
|
2012-10-29 21:52:38 +00:00
|
|
|
LoopScalarPreHeader = ScalarPH;
|
2012-10-19 23:05:40 +00:00
|
|
|
LoopMiddleBlock = MiddleBlock;
|
|
|
|
LoopExitBlock = ExitBlock;
|
2016-05-12 18:44:51 +00:00
|
|
|
LoopVectorBody = VecBody;
|
2012-10-19 23:05:40 +00:00
|
|
|
LoopScalarBody = OldBasicBlock;
|
2013-10-24 14:50:51 +00:00
|
|
|
|
2016-04-29 01:27:40 +00:00
|
|
|
// Keep all loop hints from the original loop on the vector loop (we'll
|
|
|
|
// replace the vectorizer-specific hints below).
|
|
|
|
if (MDNode *LID = OrigLoop->getLoopID())
|
|
|
|
Lp->setLoopID(LID);
|
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
LoopVectorizeHints Hints(Lp, true, *ORE);
|
Small refactor on VectorizerHint for deduplication
Previously, the hint mechanism relied on clean up passes to remove redundant
metadata, which still showed up if running opt at low levels of optimization.
That also has shown that multiple nodes of the same type, but with different
values could still coexist, even if temporary, and cause confusion if the
next pass got the wrong value.
This patch makes sure that, if metadata already exists in a loop, the hint
mechanism will never append a new node, but always replace the existing one.
It also enhances the algorithm to cope with more metadata types in the future
by just adding a new type, not a lot of code.
Re-applying again due to MSVC 2013 being minimum requirement, and this patch
having C++11 that MSVC 2012 didn't support.
Fixes PR20655.
llvm-svn: 216870
2014-09-01 10:00:17 +00:00
|
|
|
Hints.setAlreadyVectorized();
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
return LoopVectorPreHeader;
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
// Fix up external users of the induction variable. At this point, we are
|
|
|
|
// in LCSSA form, with all external PHIs that use the IV having one input value,
|
|
|
|
// coming from the remainder loop. We need those PHIs to also have a correct
|
|
|
|
// value for the IV when arriving directly from the middle block.
|
|
|
|
void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
|
|
|
|
const InductionDescriptor &II,
|
|
|
|
Value *CountRoundDown, Value *EndValue,
|
|
|
|
BasicBlock *MiddleBlock) {
|
2016-07-12 19:35:15 +00:00
|
|
|
// There are two kinds of external IV usages - those that use the value
|
2016-06-15 00:35:26 +00:00
|
|
|
// computed in the last iteration (the PHI) and those that use the penultimate
|
|
|
|
// value (the value that feeds into the phi from the loop latch).
|
|
|
|
// We allow both, but they, obviously, have different values.
|
|
|
|
|
|
|
|
assert(OrigLoop->getExitBlock() && "Expected a single exit block");
|
|
|
|
|
2016-07-12 23:11:34 +00:00
|
|
|
DenseMap<Value *, Value *> MissingVals;
|
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
// An external user of the last iteration's value should see the value that
|
|
|
|
// the remainder loop uses to initialize its own IV.
|
|
|
|
Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
|
|
|
|
for (User *U : PostInc->users()) {
|
|
|
|
Instruction *UI = cast<Instruction>(U);
|
|
|
|
if (!OrigLoop->contains(UI)) {
|
|
|
|
assert(isa<PHINode>(UI) && "Expected LCSSA form");
|
2016-07-12 23:11:34 +00:00
|
|
|
MissingVals[UI] = EndValue;
|
2016-06-15 00:35:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// An external user of the penultimate value need to see EndValue - Step.
|
|
|
|
// The simplest way to get this is to recompute it from the constituent SCEVs,
|
|
|
|
// that is Start + (Step * (CRD - 1)).
|
|
|
|
for (User *U : OrigPhi->users()) {
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *UI = cast<Instruction>(U);
|
2016-06-15 00:35:26 +00:00
|
|
|
if (!OrigLoop->contains(UI)) {
|
|
|
|
const DataLayout &DL =
|
|
|
|
OrigLoop->getHeader()->getModule()->getDataLayout();
|
|
|
|
assert(isa<PHINode>(UI) && "Expected LCSSA form");
|
|
|
|
|
|
|
|
IRBuilder<> B(MiddleBlock->getTerminator());
|
|
|
|
Value *CountMinusOne = B.CreateSub(
|
|
|
|
CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
|
2017-04-26 16:23:02 +00:00
|
|
|
Value *CMO =
|
|
|
|
!II.getStep()->getType()->isIntegerTy()
|
|
|
|
? B.CreateCast(Instruction::SIToFP, CountMinusOne,
|
|
|
|
II.getStep()->getType())
|
|
|
|
: B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
|
|
|
|
CMO->setName("cast.cmo");
|
2016-06-15 00:35:26 +00:00
|
|
|
Value *Escape = II.transform(B, CMO, PSE.getSE(), DL);
|
2016-07-12 19:35:15 +00:00
|
|
|
Escape->setName("ind.escape");
|
2016-07-12 23:11:34 +00:00
|
|
|
MissingVals[UI] = Escape;
|
2016-06-15 00:35:26 +00:00
|
|
|
}
|
|
|
|
}
|
2016-07-12 23:11:34 +00:00
|
|
|
|
|
|
|
for (auto &I : MissingVals) {
|
|
|
|
PHINode *PHI = cast<PHINode>(I.first);
|
|
|
|
// One corner case we have to handle is two IVs "chasing" each-other,
|
|
|
|
// that is %IV2 = phi [...], [ %IV1, %latch ]
|
|
|
|
// In this case, if IV1 has an external use, we need to avoid adding both
|
|
|
|
// "last value of IV1" and "penultimate value of IV2". So, verify that we
|
|
|
|
// don't already have an incoming value for the middle block.
|
|
|
|
if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
|
|
|
|
PHI->addIncoming(I.second, MiddleBlock);
|
|
|
|
}
|
2016-06-15 00:35:26 +00:00
|
|
|
}
|
|
|
|
|
2013-11-02 13:39:00 +00:00
|
|
|
namespace {
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-11-02 13:39:00 +00:00
|
|
|
struct CSEDenseMapInfo {
|
2017-03-10 00:25:26 +00:00
|
|
|
static bool canHandle(const Instruction *I) {
|
2013-11-02 13:39:00 +00:00
|
|
|
return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
|
|
|
|
isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
|
|
|
|
}
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-11-02 13:39:00 +00:00
|
|
|
static inline Instruction *getEmptyKey() {
|
|
|
|
return DenseMapInfo<Instruction *>::getEmptyKey();
|
|
|
|
}
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2013-11-02 13:39:00 +00:00
|
|
|
static inline Instruction *getTombstoneKey() {
|
|
|
|
return DenseMapInfo<Instruction *>::getTombstoneKey();
|
|
|
|
}
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2017-03-10 00:25:26 +00:00
|
|
|
static unsigned getHashValue(const Instruction *I) {
|
2013-11-02 13:39:00 +00:00
|
|
|
assert(canHandle(I) && "Unknown instruction!");
|
|
|
|
return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
|
|
|
|
I->value_op_end()));
|
|
|
|
}
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2017-03-10 00:25:26 +00:00
|
|
|
static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
|
2013-11-02 13:39:00 +00:00
|
|
|
if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
|
|
|
|
LHS == getTombstoneKey() || RHS == getTombstoneKey())
|
|
|
|
return LHS == RHS;
|
|
|
|
return LHS->isIdenticalTo(RHS);
|
|
|
|
}
|
|
|
|
};
|
2017-10-12 23:30:03 +00:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2013-11-02 13:39:00 +00:00
|
|
|
|
2013-11-01 23:28:54 +00:00
|
|
|
///\brief Perform cse of induction variable instructions.
|
2016-05-12 18:44:51 +00:00
|
|
|
static void cse(BasicBlock *BB) {
|
2013-11-01 23:28:54 +00:00
|
|
|
// Perform simple cse.
|
2013-11-02 13:39:00 +00:00
|
|
|
SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
|
2016-05-12 18:44:51 +00:00
|
|
|
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
|
|
|
|
Instruction *In = &*I++;
|
2013-11-01 23:28:54 +00:00
|
|
|
|
2016-05-12 18:44:51 +00:00
|
|
|
if (!CSEDenseMapInfo::canHandle(In))
|
|
|
|
continue;
|
2013-11-01 23:28:54 +00:00
|
|
|
|
2016-05-12 18:44:51 +00:00
|
|
|
// Check if we can replace this instruction with any of the
|
|
|
|
// visited instructions.
|
|
|
|
if (Instruction *V = CSEMap.lookup(In)) {
|
|
|
|
In->replaceAllUsesWith(V);
|
|
|
|
In->eraseFromParent();
|
|
|
|
continue;
|
2014-01-28 01:01:53 +00:00
|
|
|
}
|
2016-05-12 18:44:51 +00:00
|
|
|
|
|
|
|
CSEMap[In] = In;
|
2013-11-01 23:28:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-24 11:37:57 +00:00
|
|
|
/// \brief Estimate the overhead of scalarizing an instruction. This is a
|
|
|
|
/// convenience wrapper for the type-based getScalarizationOverhead API.
|
|
|
|
static unsigned getScalarizationOverhead(Instruction *I, unsigned VF,
|
|
|
|
const TargetTransformInfo &TTI) {
|
|
|
|
if (VF == 1)
|
|
|
|
return 0;
|
|
|
|
|
2017-01-26 07:03:25 +00:00
|
|
|
unsigned Cost = 0;
|
2016-08-24 11:37:57 +00:00
|
|
|
Type *RetTy = ToVectorTy(I->getType(), VF);
|
2017-04-12 12:41:37 +00:00
|
|
|
if (!RetTy->isVoidTy() &&
|
|
|
|
(!isa<LoadInst>(I) ||
|
|
|
|
!TTI.supportsEfficientVectorElementLoadStore()))
|
2017-01-26 07:03:25 +00:00
|
|
|
Cost += TTI.getScalarizationOverhead(RetTy, true, false);
|
2016-08-24 11:37:57 +00:00
|
|
|
|
2017-01-30 05:38:05 +00:00
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
|
|
|
SmallVector<const Value *, 4> Operands(CI->arg_operands());
|
|
|
|
Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
|
2017-04-12 12:41:37 +00:00
|
|
|
}
|
|
|
|
else if (!isa<StoreInst>(I) ||
|
|
|
|
!TTI.supportsEfficientVectorElementLoadStore()) {
|
2017-01-30 05:38:05 +00:00
|
|
|
SmallVector<const Value *, 4> Operands(I->operand_values());
|
|
|
|
Cost += TTI.getOperandsScalarizationOverhead(Operands, VF);
|
|
|
|
}
|
2016-08-24 11:37:57 +00:00
|
|
|
|
2017-01-26 07:03:25 +00:00
|
|
|
return Cost;
|
2016-08-24 11:37:57 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 19:46:50 +00:00
|
|
|
// Estimate cost of a call instruction CI if it were vectorized with factor VF.
|
|
|
|
// Return the cost of the instruction, including scalarization overhead if it's
|
|
|
|
// needed. The flag NeedToScalarize shows if the call needs to be scalarized -
|
|
|
|
// i.e. either vector version isn't available, or is too expensive.
|
|
|
|
static unsigned getVectorCallCost(CallInst *CI, unsigned VF,
|
|
|
|
const TargetTransformInfo &TTI,
|
|
|
|
const TargetLibraryInfo *TLI,
|
|
|
|
bool &NeedToScalarize) {
|
|
|
|
Function *F = CI->getCalledFunction();
|
|
|
|
StringRef FnName = CI->getCalledFunction()->getName();
|
|
|
|
Type *ScalarRetTy = CI->getType();
|
|
|
|
SmallVector<Type *, 4> Tys, ScalarTys;
|
|
|
|
for (auto &ArgOp : CI->arg_operands())
|
|
|
|
ScalarTys.push_back(ArgOp->getType());
|
|
|
|
|
|
|
|
// Estimate cost of scalarized vector call. The source operands are assumed
|
|
|
|
// to be vectors, so we need to extract individual elements from there,
|
|
|
|
// execute VF scalar calls, and then gather the result into the vector return
|
|
|
|
// value.
|
|
|
|
unsigned ScalarCallCost = TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys);
|
|
|
|
if (VF == 1)
|
|
|
|
return ScalarCallCost;
|
|
|
|
|
|
|
|
// Compute corresponding vector type for return value and arguments.
|
|
|
|
Type *RetTy = ToVectorTy(ScalarRetTy, VF);
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Type *ScalarTy : ScalarTys)
|
|
|
|
Tys.push_back(ToVectorTy(ScalarTy, VF));
|
2015-03-17 19:46:50 +00:00
|
|
|
|
|
|
|
// Compute costs of unpacking argument values for the scalar calls and
|
|
|
|
// packing the return values to a vector.
|
2017-01-30 05:38:05 +00:00
|
|
|
unsigned ScalarizationCost = getScalarizationOverhead(CI, VF, TTI);
|
2015-03-17 19:46:50 +00:00
|
|
|
|
|
|
|
unsigned Cost = ScalarCallCost * VF + ScalarizationCost;
|
|
|
|
|
|
|
|
// If we can't emit a vector call for this function, then the currently found
|
|
|
|
// cost is the cost we need to return.
|
|
|
|
NeedToScalarize = true;
|
|
|
|
if (!TLI || !TLI->isFunctionVectorizable(FnName, VF) || CI->isNoBuiltin())
|
|
|
|
return Cost;
|
|
|
|
|
|
|
|
// If the corresponding vector cost is cheaper, return its cost.
|
|
|
|
unsigned VectorCallCost = TTI.getCallInstrCost(nullptr, RetTy, Tys);
|
|
|
|
if (VectorCallCost < Cost) {
|
|
|
|
NeedToScalarize = false;
|
|
|
|
return VectorCallCost;
|
|
|
|
}
|
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Estimate cost of an intrinsic call instruction CI if it were vectorized with
|
|
|
|
// factor VF. Return the cost of the instruction, including scalarization
|
|
|
|
// overhead if it's needed.
|
|
|
|
static unsigned getVectorIntrinsicCost(CallInst *CI, unsigned VF,
|
|
|
|
const TargetTransformInfo &TTI,
|
|
|
|
const TargetLibraryInfo *TLI) {
|
2016-04-19 19:10:21 +00:00
|
|
|
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
2015-03-17 19:46:50 +00:00
|
|
|
assert(ID && "Expected intrinsic call!");
|
|
|
|
|
2016-04-14 07:13:24 +00:00
|
|
|
FastMathFlags FMF;
|
|
|
|
if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
|
|
|
|
FMF = FPMO->getFastMathFlags();
|
|
|
|
|
2017-03-14 06:35:36 +00:00
|
|
|
SmallVector<Value *, 4> Operands(CI->arg_operands());
|
|
|
|
return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF);
|
2015-03-17 19:46:50 +00:00
|
|
|
}
|
|
|
|
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *I1 = cast<IntegerType>(T1->getVectorElementType());
|
|
|
|
auto *I2 = cast<IntegerType>(T2->getVectorElementType());
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
|
|
|
|
}
|
|
|
|
static Type *largestIntegerVectorType(Type *T1, Type *T2) {
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *I1 = cast<IntegerType>(T1->getVectorElementType());
|
|
|
|
auto *I2 = cast<IntegerType>(T2->getVectorElementType());
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
|
|
|
|
}
|
|
|
|
|
|
|
|
void InnerLoopVectorizer::truncateToMinimalBitwidths() {
|
|
|
|
// For every instruction `I` in MinBWs, truncate the operands, create a
|
|
|
|
// truncated version of `I` and reextend its result. InstCombine runs
|
|
|
|
// later and will remove any ext/trunc pairs.
|
2016-05-18 11:57:58 +00:00
|
|
|
SmallPtrSet<Value *, 4> Erased;
|
2016-10-05 20:23:46 +00:00
|
|
|
for (const auto &KV : Cost->getMinimalBitwidths()) {
|
2016-12-16 16:52:35 +00:00
|
|
|
// If the value wasn't vectorized, we must maintain the original scalar
|
|
|
|
// type. The absence of the value from VectorLoopValueMap indicates that it
|
|
|
|
// wasn't vectorized.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
|
2016-12-16 16:52:35 +00:00
|
|
|
continue;
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *I = getOrCreateVectorValue(KV.first, Part);
|
2016-07-12 19:35:15 +00:00
|
|
|
if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
continue;
|
|
|
|
Type *OriginalTy = I->getType();
|
2016-05-05 00:54:54 +00:00
|
|
|
Type *ScalarTruncatedTy =
|
|
|
|
IntegerType::get(OriginalTy->getContext(), KV.second);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
Type *TruncatedTy = VectorType::get(ScalarTruncatedTy,
|
|
|
|
OriginalTy->getVectorNumElements());
|
|
|
|
if (TruncatedTy == OriginalTy)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
IRBuilder<> B(cast<Instruction>(I));
|
2016-05-05 00:54:54 +00:00
|
|
|
auto ShrinkOperand = [&](Value *V) -> Value * {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
if (auto *ZI = dyn_cast<ZExtInst>(V))
|
|
|
|
if (ZI->getSrcTy() == TruncatedTy)
|
|
|
|
return ZI->getOperand(0);
|
|
|
|
return B.CreateZExtOrTrunc(V, TruncatedTy);
|
|
|
|
};
|
|
|
|
|
|
|
|
// The actual instruction modification depends on the instruction type,
|
|
|
|
// unfortunately.
|
|
|
|
Value *NewI = nullptr;
|
2016-07-12 19:35:15 +00:00
|
|
|
if (auto *BO = dyn_cast<BinaryOperator>(I)) {
|
2016-05-05 00:54:54 +00:00
|
|
|
NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
ShrinkOperand(BO->getOperand(1)));
|
[LoopVectorize] Don't preserve nsw/nuw flags on shrunken ops.
If we're shrinking a binary operation, it may be the case that the new
operations wraps where the old didn't. If this happens, the behavior
should be well-defined. So, we can't always carry wrapping flags with us
when we shrink operations.
If we do, we get incorrect optimizations in cases like:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] - 128;
}
which gets optimized to:
void foo(const unsigned char *from, unsigned char *to, int n) {
for (int i = 0; i < n; i++)
to[i] = from[i] | 128;
}
Because:
- InstCombine turned `sub i32 %from.i, 128` into
`add nuw nsw i32 %from.i, 128`.
- LoopVectorize vectorized the add to be `add nuw nsw <16 x i8>` with a
vector full of `i8 128`s
- InstCombine took advantage of the fact that the newly-shrunken add
"couldn't wrap", and changed the `add` to an `or`.
InstCombine seems happy to figure out whether we can add nuw/nsw on its
own, so I just decided to drop the flags. There are already a number of
places in LoopVectorize where we rely on InstCombine to clean up.
llvm-svn: 305053
2017-06-09 03:56:15 +00:00
|
|
|
|
|
|
|
// Any wrapping introduced by shrinking this operation shouldn't be
|
|
|
|
// considered undefined behavior. So, we can't unconditionally copy
|
|
|
|
// arithmetic wrapping flags to NewI.
|
|
|
|
cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
|
2016-07-12 19:35:15 +00:00
|
|
|
} else if (auto *CI = dyn_cast<ICmpInst>(I)) {
|
2016-05-05 00:54:54 +00:00
|
|
|
NewI =
|
|
|
|
B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
|
|
|
|
ShrinkOperand(CI->getOperand(1)));
|
2016-07-12 19:35:15 +00:00
|
|
|
} else if (auto *SI = dyn_cast<SelectInst>(I)) {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
NewI = B.CreateSelect(SI->getCondition(),
|
|
|
|
ShrinkOperand(SI->getTrueValue()),
|
|
|
|
ShrinkOperand(SI->getFalseValue()));
|
2016-07-12 19:35:15 +00:00
|
|
|
} else if (auto *CI = dyn_cast<CastInst>(I)) {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
switch (CI->getOpcode()) {
|
2016-05-05 00:54:54 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unhandled cast!");
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
case Instruction::Trunc:
|
|
|
|
NewI = ShrinkOperand(CI->getOperand(0));
|
|
|
|
break;
|
|
|
|
case Instruction::SExt:
|
2016-05-05 00:54:54 +00:00
|
|
|
NewI = B.CreateSExtOrTrunc(
|
|
|
|
CI->getOperand(0),
|
|
|
|
smallestIntegerVectorType(OriginalTy, TruncatedTy));
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
break;
|
|
|
|
case Instruction::ZExt:
|
2016-05-05 00:54:54 +00:00
|
|
|
NewI = B.CreateZExtOrTrunc(
|
|
|
|
CI->getOperand(0),
|
|
|
|
smallestIntegerVectorType(OriginalTy, TruncatedTy));
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
break;
|
|
|
|
}
|
2016-07-12 19:35:15 +00:00
|
|
|
} else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements();
|
2016-05-05 00:54:54 +00:00
|
|
|
auto *O0 = B.CreateZExtOrTrunc(
|
|
|
|
SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements();
|
2016-05-05 00:54:54 +00:00
|
|
|
auto *O1 = B.CreateZExtOrTrunc(
|
|
|
|
SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
|
|
|
|
NewI = B.CreateShuffleVector(O0, O1, SI->getMask());
|
|
|
|
} else if (isa<LoadInst>(I)) {
|
|
|
|
// Don't do anything with the operands, just extend the result.
|
|
|
|
continue;
|
2016-02-15 15:38:17 +00:00
|
|
|
} else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
|
|
|
|
auto Elements = IE->getOperand(0)->getType()->getVectorNumElements();
|
|
|
|
auto *O0 = B.CreateZExtOrTrunc(
|
|
|
|
IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
|
|
|
|
auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
|
|
|
|
NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
|
|
|
|
} else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
|
|
|
|
auto Elements = EE->getOperand(0)->getType()->getVectorNumElements();
|
|
|
|
auto *O0 = B.CreateZExtOrTrunc(
|
|
|
|
EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
|
|
|
|
NewI = B.CreateExtractElement(O0, EE->getOperand(2));
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unhandled instruction type!");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lastly, extend the result.
|
|
|
|
NewI->takeName(cast<Instruction>(I));
|
|
|
|
Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
|
|
|
|
I->replaceAllUsesWith(Res);
|
|
|
|
cast<Instruction>(I)->eraseFromParent();
|
2016-05-18 11:57:58 +00:00
|
|
|
Erased.insert(I);
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.resetVectorValue(KV.first, Part, Res);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll have created a bunch of ZExts that are now parentless. Clean up.
|
2016-10-05 20:23:46 +00:00
|
|
|
for (const auto &KV : Cost->getMinimalBitwidths()) {
|
2016-12-16 16:52:35 +00:00
|
|
|
// If the value wasn't vectorized, we must maintain the original scalar
|
|
|
|
// type. The absence of the value from VectorLoopValueMap indicates that it
|
|
|
|
// wasn't vectorized.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (!VectorLoopValueMap.hasAnyVectorValue(KV.first))
|
2016-12-16 16:52:35 +00:00
|
|
|
continue;
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *I = getOrCreateVectorValue(KV.first, Part);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
ZExtInst *Inst = dyn_cast<ZExtInst>(I);
|
|
|
|
if (Inst && Inst->use_empty()) {
|
|
|
|
Value *NewI = Inst->getOperand(0);
|
|
|
|
Inst->eraseFromParent();
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.resetVectorValue(KV.first, Part, NewI);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-11 11:36:33 +00:00
|
|
|
void InnerLoopVectorizer::fixVectorizedLoop() {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
// Insert truncates and extends for any truncated instructions as hints to
|
|
|
|
// InstCombine.
|
|
|
|
if (VF > 1)
|
|
|
|
truncateToMinimalBitwidths();
|
2016-02-01 16:07:01 +00:00
|
|
|
|
|
|
|
// At this point every instruction in the original loop is widened to a
|
2017-03-14 13:50:47 +00:00
|
|
|
// vector form. Now we need to fix the recurrences in the loop. These PHI
|
2016-02-01 16:07:01 +00:00
|
|
|
// nodes are currently empty because we did not want to introduce cycles.
|
|
|
|
// This is the second stage of vectorizing recurrences.
|
2017-03-14 13:50:47 +00:00
|
|
|
fixCrossIterationPHIs();
|
2013-11-01 22:18:19 +00:00
|
|
|
|
2017-01-09 19:05:29 +00:00
|
|
|
// Update the dominator tree.
|
|
|
|
//
|
|
|
|
// FIXME: After creating the structure of the new loop, the dominator tree is
|
|
|
|
// no longer up-to-date, and it remains that way until we update it
|
|
|
|
// here. An out-of-date dominator tree is problematic for SCEV,
|
|
|
|
// because SCEVExpander uses it to guide code generation. The
|
|
|
|
// vectorizer use SCEVExpanders in several places. Instead, we should
|
|
|
|
// keep the dominator tree up-to-date as we go.
|
2015-09-09 12:51:06 +00:00
|
|
|
updateAnalysis();
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2017-01-09 19:05:29 +00:00
|
|
|
// Fix-up external users of the induction variables.
|
|
|
|
for (auto &Entry : *Legal->getInductionVars())
|
|
|
|
fixupIVUsers(Entry.first, Entry.second,
|
|
|
|
getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
|
|
|
|
IVEndValues[Entry.first], LoopMiddleBlock);
|
|
|
|
|
|
|
|
fixLCSSAPHIs();
|
2017-08-27 12:55:46 +00:00
|
|
|
for (Instruction *PI : PredicatedInstructions)
|
|
|
|
sinkScalarOperands(&*PI);
|
2016-08-03 13:23:43 +00:00
|
|
|
|
2013-11-01 23:28:54 +00:00
|
|
|
// Remove redundant induction instructions.
|
|
|
|
cse(LoopVectorBody);
|
2013-08-26 22:33:26 +00:00
|
|
|
}
|
2012-12-30 07:47:00 +00:00
|
|
|
|
2017-03-14 13:50:47 +00:00
|
|
|
void InnerLoopVectorizer::fixCrossIterationPHIs() {
|
|
|
|
// In order to support recurrences we need to be able to vectorize Phi nodes.
|
|
|
|
// Phi nodes have cycles, so we need to vectorize them in two stages. This is
|
|
|
|
// stage #2: We now need to fix the recurrences by adding incoming edges to
|
|
|
|
// the currently empty PHI nodes. At this point every instruction in the
|
|
|
|
// original loop is widened to a vector form so we can use them to construct
|
|
|
|
// the incoming edges.
|
2017-12-30 15:27:33 +00:00
|
|
|
for (PHINode &Phi : OrigLoop->getHeader()->phis()) {
|
2017-03-14 13:50:47 +00:00
|
|
|
// Handle first-order recurrences and reductions that need to be fixed.
|
2017-12-30 15:27:33 +00:00
|
|
|
if (Legal->isFirstOrderRecurrence(&Phi))
|
|
|
|
fixFirstOrderRecurrence(&Phi);
|
|
|
|
else if (Legal->isReductionVariable(&Phi))
|
|
|
|
fixReduction(&Phi);
|
2017-03-14 13:50:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
void InnerLoopVectorizer::fixFirstOrderRecurrence(PHINode *Phi) {
|
2016-05-13 21:01:07 +00:00
|
|
|
// This is the second phase of vectorizing first-order recurrences. An
|
2016-02-19 17:56:08 +00:00
|
|
|
// overview of the transformation is described below. Suppose we have the
|
|
|
|
// following loop.
|
|
|
|
//
|
|
|
|
// for (int i = 0; i < n; ++i)
|
|
|
|
// b[i] = a[i] - a[i - 1];
|
|
|
|
//
|
|
|
|
// There is a first-order recurrence on "a". For this loop, the shorthand
|
|
|
|
// scalar IR looks like:
|
|
|
|
//
|
|
|
|
// scalar.ph:
|
|
|
|
// s_init = a[-1]
|
|
|
|
// br scalar.body
|
|
|
|
//
|
|
|
|
// scalar.body:
|
|
|
|
// i = phi [0, scalar.ph], [i+1, scalar.body]
|
|
|
|
// s1 = phi [s_init, scalar.ph], [s2, scalar.body]
|
|
|
|
// s2 = a[i]
|
|
|
|
// b[i] = s2 - s1
|
|
|
|
// br cond, scalar.body, ...
|
|
|
|
//
|
|
|
|
// In this example, s1 is a recurrence because it's value depends on the
|
|
|
|
// previous iteration. In the first phase of vectorization, we created a
|
|
|
|
// temporary value for s1. We now complete the vectorization and produce the
|
|
|
|
// shorthand vector IR shown below (for VF = 4, UF = 1).
|
|
|
|
//
|
|
|
|
// vector.ph:
|
|
|
|
// v_init = vector(..., ..., ..., a[-1])
|
|
|
|
// br vector.body
|
|
|
|
//
|
|
|
|
// vector.body
|
|
|
|
// i = phi [0, vector.ph], [i+4, vector.body]
|
|
|
|
// v1 = phi [v_init, vector.ph], [v2, vector.body]
|
|
|
|
// v2 = a[i, i+1, i+2, i+3];
|
|
|
|
// v3 = vector(v1(3), v2(0, 1, 2))
|
|
|
|
// b[i, i+1, i+2, i+3] = v2 - v3
|
|
|
|
// br cond, vector.body, middle.block
|
|
|
|
//
|
|
|
|
// middle.block:
|
|
|
|
// x = v2(3)
|
|
|
|
// br scalar.ph
|
|
|
|
//
|
|
|
|
// scalar.ph:
|
|
|
|
// s_init = phi [x, middle.block], [a[-1], otherwise]
|
|
|
|
// br scalar.body
|
|
|
|
//
|
|
|
|
// After execution completes the vector loop, we extract the next value of
|
|
|
|
// the recurrence (x) to use as the initial value in the scalar loop.
|
|
|
|
|
|
|
|
// Get the original loop preheader and single loop latch.
|
|
|
|
auto *Preheader = OrigLoop->getLoopPreheader();
|
|
|
|
auto *Latch = OrigLoop->getLoopLatch();
|
|
|
|
|
|
|
|
// Get the initial and previous values of the scalar recurrence.
|
|
|
|
auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader);
|
|
|
|
auto *Previous = Phi->getIncomingValueForBlock(Latch);
|
|
|
|
|
|
|
|
// Create a vector from the initial value.
|
|
|
|
auto *VectorInit = ScalarInit;
|
|
|
|
if (VF > 1) {
|
|
|
|
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
|
|
|
VectorInit = Builder.CreateInsertElement(
|
|
|
|
UndefValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit,
|
|
|
|
Builder.getInt32(VF - 1), "vector.recur.init");
|
|
|
|
}
|
|
|
|
|
|
|
|
// We constructed a temporary phi node in the first phase of vectorization.
|
|
|
|
// This phi node will eventually be deleted.
|
2017-06-27 08:41:19 +00:00
|
|
|
Builder.SetInsertPoint(
|
|
|
|
cast<Instruction>(VectorLoopValueMap.getVectorValue(Phi, 0)));
|
2016-02-19 17:56:08 +00:00
|
|
|
|
|
|
|
// Create a phi node for the new recurrence. The current value will either be
|
|
|
|
// the initial value inserted into a vector or loop-varying vector value.
|
|
|
|
auto *VecPhi = Builder.CreatePHI(VectorInit->getType(), 2, "vector.recur");
|
|
|
|
VecPhi->addIncoming(VectorInit, LoopVectorPreHeader);
|
|
|
|
|
2017-06-28 17:59:33 +00:00
|
|
|
// Get the vectorized previous value of the last part UF - 1. It appears last
|
|
|
|
// among all unrolled iterations, due to the order of their construction.
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *PreviousLastPart = getOrCreateVectorValue(Previous, UF - 1);
|
2016-02-19 17:56:08 +00:00
|
|
|
|
2017-03-08 18:18:20 +00:00
|
|
|
// Set the insertion point after the previous value if it is an instruction.
|
|
|
|
// Note that the previous value may have been constant-folded so it is not
|
2017-05-09 14:29:33 +00:00
|
|
|
// guaranteed to be an instruction in the vector loop. Also, if the previous
|
|
|
|
// value is a phi node, we should insert after all the phi nodes to avoid
|
|
|
|
// breaking basic block verification.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (LI->getLoopFor(LoopVectorBody)->isLoopInvariant(PreviousLastPart) ||
|
|
|
|
isa<PHINode>(PreviousLastPart))
|
2017-03-08 18:18:20 +00:00
|
|
|
Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
|
|
|
|
else
|
|
|
|
Builder.SetInsertPoint(
|
2017-06-27 08:41:19 +00:00
|
|
|
&*++BasicBlock::iterator(cast<Instruction>(PreviousLastPart)));
|
2016-02-19 17:56:08 +00:00
|
|
|
|
|
|
|
// We will construct a vector for the recurrence by combining the values for
|
|
|
|
// the current and previous iterations. This is the required shuffle mask.
|
|
|
|
SmallVector<Constant *, 8> ShuffleMask(VF);
|
|
|
|
ShuffleMask[0] = Builder.getInt32(VF - 1);
|
|
|
|
for (unsigned I = 1; I < VF; ++I)
|
|
|
|
ShuffleMask[I] = Builder.getInt32(I + VF - 1);
|
|
|
|
|
|
|
|
// The vector from which to take the initial value for the current iteration
|
|
|
|
// (actual or unrolled). Initially, this is the vector phi node.
|
|
|
|
Value *Incoming = VecPhi;
|
|
|
|
|
|
|
|
// Shuffle the current and previous vector and update the vector parts.
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *PreviousPart = getOrCreateVectorValue(Previous, Part);
|
|
|
|
Value *PhiPart = VectorLoopValueMap.getVectorValue(Phi, Part);
|
2016-02-19 17:56:08 +00:00
|
|
|
auto *Shuffle =
|
2017-06-27 08:41:19 +00:00
|
|
|
VF > 1 ? Builder.CreateShuffleVector(Incoming, PreviousPart,
|
|
|
|
ConstantVector::get(ShuffleMask))
|
|
|
|
: Incoming;
|
|
|
|
PhiPart->replaceAllUsesWith(Shuffle);
|
|
|
|
cast<Instruction>(PhiPart)->eraseFromParent();
|
|
|
|
VectorLoopValueMap.resetVectorValue(Phi, Part, Shuffle);
|
|
|
|
Incoming = PreviousPart;
|
2016-02-19 17:56:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fix the latch value of the new recurrence in the vector loop.
|
2016-07-12 19:35:15 +00:00
|
|
|
VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
|
2016-02-19 17:56:08 +00:00
|
|
|
|
|
|
|
// Extract the last vector element in the middle block. This will be the
|
2017-04-13 18:59:25 +00:00
|
|
|
// initial value for the recurrence when jumping to the scalar loop.
|
|
|
|
auto *ExtractForScalar = Incoming;
|
2016-02-19 17:56:08 +00:00
|
|
|
if (VF > 1) {
|
|
|
|
Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
|
2017-04-13 18:59:25 +00:00
|
|
|
ExtractForScalar = Builder.CreateExtractElement(
|
|
|
|
ExtractForScalar, Builder.getInt32(VF - 1), "vector.recur.extract");
|
2016-02-19 17:56:08 +00:00
|
|
|
}
|
2017-04-13 18:59:25 +00:00
|
|
|
// Extract the second last element in the middle block if the
|
|
|
|
// Phi is used outside the loop. We need to extract the phi itself
|
|
|
|
// and not the last element (the phi update in the current iteration). This
|
|
|
|
// will be the value when jumping to the exit block from the LoopMiddleBlock,
|
|
|
|
// when the scalar loop is not run at all.
|
|
|
|
Value *ExtractForPhiUsedOutsideLoop = nullptr;
|
|
|
|
if (VF > 1)
|
|
|
|
ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
|
|
|
|
Incoming, Builder.getInt32(VF - 2), "vector.recur.extract.for.phi");
|
|
|
|
// When loop is unrolled without vectorizing, initialize
|
|
|
|
// ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of
|
|
|
|
// `Incoming`. This is analogous to the vectorized case above: extracting the
|
|
|
|
// second last element when VF > 1.
|
|
|
|
else if (UF > 1)
|
2017-06-27 08:41:19 +00:00
|
|
|
ExtractForPhiUsedOutsideLoop = getOrCreateVectorValue(Previous, UF - 2);
|
2016-02-19 17:56:08 +00:00
|
|
|
|
|
|
|
// Fix the initial value of the original recurrence in the scalar loop.
|
|
|
|
Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
|
|
|
|
auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
|
|
|
|
for (auto *BB : predecessors(LoopScalarPreHeader)) {
|
2017-04-13 18:59:25 +00:00
|
|
|
auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
|
2016-02-19 17:56:08 +00:00
|
|
|
Start->addIncoming(Incoming, BB);
|
|
|
|
}
|
|
|
|
|
|
|
|
Phi->setIncomingValue(Phi->getBasicBlockIndex(LoopScalarPreHeader), Start);
|
|
|
|
Phi->setName("scalar.recur");
|
|
|
|
|
|
|
|
// Finally, fix users of the recurrence outside the loop. The users will need
|
|
|
|
// either the last value of the scalar recurrence or the last value of the
|
|
|
|
// vector recurrence we extracted in the middle block. Since the loop is in
|
|
|
|
// LCSSA form, we just need to find the phi node for the original scalar
|
|
|
|
// recurrence in the exit block, and then add an edge for the middle block.
|
2017-12-30 15:27:33 +00:00
|
|
|
for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
|
|
|
|
if (LCSSAPhi.getIncomingValue(0) == Phi) {
|
|
|
|
LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
|
2016-02-19 17:56:08 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-14 13:50:47 +00:00
|
|
|
void InnerLoopVectorizer::fixReduction(PHINode *Phi) {
|
|
|
|
Constant *Zero = Builder.getInt32(0);
|
|
|
|
|
|
|
|
// Get it's reduction variable descriptor.
|
|
|
|
assert(Legal->isReductionVariable(Phi) &&
|
|
|
|
"Unable to find the reduction variable");
|
|
|
|
RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[Phi];
|
|
|
|
|
|
|
|
RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
|
|
|
|
TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
|
|
|
|
Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
|
|
|
|
RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
|
|
|
|
RdxDesc.getMinMaxRecurrenceKind();
|
|
|
|
setDebugLocFromInst(Builder, ReductionStartValue);
|
|
|
|
|
|
|
|
// We need to generate a reduction vector from the incoming scalar.
|
|
|
|
// To do so, we need to generate the 'identity' vector and override
|
|
|
|
// one of the elements with the incoming scalar reduction. We need
|
|
|
|
// to do it in the vector-loop preheader.
|
2017-07-19 05:16:39 +00:00
|
|
|
Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
|
2017-03-14 13:50:47 +00:00
|
|
|
|
|
|
|
// This is the vector-clone of the value that leaves the loop.
|
2017-06-27 08:41:19 +00:00
|
|
|
Type *VecTy = getOrCreateVectorValue(LoopExitInst, 0)->getType();
|
2017-03-14 13:50:47 +00:00
|
|
|
|
|
|
|
// Find the reduction identity variable. Zero for addition, or, xor,
|
|
|
|
// one for multiplication, -1 for And.
|
|
|
|
Value *Identity;
|
|
|
|
Value *VectorStart;
|
|
|
|
if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
|
|
|
|
RK == RecurrenceDescriptor::RK_FloatMinMax) {
|
|
|
|
// MinMax reduction have the start value as their identify.
|
|
|
|
if (VF == 1) {
|
|
|
|
VectorStart = Identity = ReductionStartValue;
|
|
|
|
} else {
|
|
|
|
VectorStart = Identity =
|
|
|
|
Builder.CreateVectorSplat(VF, ReductionStartValue, "minmax.ident");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Handle other reduction kinds:
|
|
|
|
Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
|
|
|
|
RK, VecTy->getScalarType());
|
|
|
|
if (VF == 1) {
|
|
|
|
Identity = Iden;
|
|
|
|
// This vector is the Identity vector where the first element is the
|
|
|
|
// incoming scalar reduction.
|
|
|
|
VectorStart = ReductionStartValue;
|
|
|
|
} else {
|
|
|
|
Identity = ConstantVector::getSplat(VF, Iden);
|
|
|
|
|
|
|
|
// This vector is the Identity vector where the first element is the
|
|
|
|
// incoming scalar reduction.
|
|
|
|
VectorStart =
|
|
|
|
Builder.CreateInsertElement(Identity, ReductionStartValue, Zero);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix the vector-loop phi.
|
|
|
|
|
|
|
|
// Reductions do not have to start at zero. They can start with
|
|
|
|
// any loop invariant values.
|
|
|
|
BasicBlock *Latch = OrigLoop->getLoopLatch();
|
|
|
|
Value *LoopVal = Phi->getIncomingValueForBlock(Latch);
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *VecRdxPhi = getOrCreateVectorValue(Phi, Part);
|
|
|
|
Value *Val = getOrCreateVectorValue(LoopVal, Part);
|
2017-03-14 13:50:47 +00:00
|
|
|
// Make sure to add the reduction stat value only to the
|
|
|
|
// first unroll part.
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *StartVal = (Part == 0) ? VectorStart : Identity;
|
|
|
|
cast<PHINode>(VecRdxPhi)->addIncoming(StartVal, LoopVectorPreHeader);
|
|
|
|
cast<PHINode>(VecRdxPhi)
|
|
|
|
->addIncoming(Val, LI->getLoopFor(LoopVectorBody)->getLoopLatch());
|
2017-03-14 13:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Before each round, move the insertion point right between
|
|
|
|
// the PHIs and the values we are going to write.
|
|
|
|
// This allows us to write both PHINodes and the extractelement
|
|
|
|
// instructions.
|
|
|
|
Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
|
|
|
|
|
|
|
|
setDebugLocFromInst(Builder, LoopExitInst);
|
|
|
|
|
|
|
|
// If the vector reduction can be performed in a smaller type, we truncate
|
|
|
|
// then extend the loop exit value to enable InstCombine to evaluate the
|
|
|
|
// entire expression in the smaller type.
|
|
|
|
if (VF > 1 && Phi->getType() != RdxDesc.getRecurrenceType()) {
|
|
|
|
Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
|
2017-09-29 18:07:39 +00:00
|
|
|
Builder.SetInsertPoint(
|
|
|
|
LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorParts RdxParts(UF);
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
RdxParts[Part] = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
|
|
|
|
Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
|
2017-03-14 13:50:47 +00:00
|
|
|
Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
|
2017-06-27 08:41:19 +00:00
|
|
|
: Builder.CreateZExt(Trunc, VecTy);
|
|
|
|
for (Value::user_iterator UI = RdxParts[Part]->user_begin();
|
|
|
|
UI != RdxParts[Part]->user_end();)
|
2017-03-14 13:50:47 +00:00
|
|
|
if (*UI != Trunc) {
|
2017-06-27 08:41:19 +00:00
|
|
|
(*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd);
|
|
|
|
RdxParts[Part] = Extnd;
|
2017-03-14 13:50:47 +00:00
|
|
|
} else {
|
|
|
|
++UI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
|
|
|
|
VectorLoopValueMap.resetVectorValue(LoopExitInst, Part, RdxParts[Part]);
|
|
|
|
}
|
2017-03-14 13:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reduce all of the unrolled parts into a single vector.
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *ReducedPartRdx = VectorLoopValueMap.getVectorValue(LoopExitInst, 0);
|
2017-03-14 13:50:47 +00:00
|
|
|
unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
|
|
|
|
setDebugLocFromInst(Builder, ReducedPartRdx);
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 1; Part < UF; ++Part) {
|
|
|
|
Value *RdxPart = VectorLoopValueMap.getVectorValue(LoopExitInst, Part);
|
2017-03-14 13:50:47 +00:00
|
|
|
if (Op != Instruction::ICmp && Op != Instruction::FCmp)
|
|
|
|
// Floating point operations had to be 'fast' to enable the reduction.
|
|
|
|
ReducedPartRdx = addFastMathFlag(
|
2017-06-27 08:41:19 +00:00
|
|
|
Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxPart,
|
2017-03-14 13:50:47 +00:00
|
|
|
ReducedPartRdx, "bin.rdx"));
|
|
|
|
else
|
|
|
|
ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
|
2017-06-27 08:41:19 +00:00
|
|
|
Builder, MinMaxKind, ReducedPartRdx, RdxPart);
|
2017-03-14 13:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (VF > 1) {
|
2017-05-09 10:43:25 +00:00
|
|
|
bool NoNaN = Legal->hasFunNoNaNAttr();
|
2017-03-14 13:50:47 +00:00
|
|
|
ReducedPartRdx =
|
2017-05-09 10:43:25 +00:00
|
|
|
createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, NoNaN);
|
2017-03-14 13:50:47 +00:00
|
|
|
// If the reduction can be performed in a smaller type, we need to extend
|
|
|
|
// the reduction to the wider type before we branch to the original loop.
|
|
|
|
if (Phi->getType() != RdxDesc.getRecurrenceType())
|
|
|
|
ReducedPartRdx =
|
|
|
|
RdxDesc.isSigned()
|
|
|
|
? Builder.CreateSExt(ReducedPartRdx, Phi->getType())
|
|
|
|
: Builder.CreateZExt(ReducedPartRdx, Phi->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a phi node that merges control-flow from the backedge-taken check
|
|
|
|
// block and the middle block.
|
|
|
|
PHINode *BCBlockPhi = PHINode::Create(Phi->getType(), 2, "bc.merge.rdx",
|
|
|
|
LoopScalarPreHeader->getTerminator());
|
|
|
|
for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
|
|
|
|
BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
|
|
|
|
BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
|
|
|
|
|
|
|
|
// Now, we need to fix the users of the reduction variable
|
|
|
|
// inside and outside of the scalar remainder loop.
|
|
|
|
// We know that the loop is in LCSSA form. We need to update the
|
|
|
|
// PHI nodes in the exit blocks.
|
2017-12-30 15:27:33 +00:00
|
|
|
for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
|
2017-03-14 13:50:47 +00:00
|
|
|
// All PHINodes need to have a single entry edge, or two if
|
|
|
|
// we already fixed them.
|
2017-12-30 15:27:33 +00:00
|
|
|
assert(LCSSAPhi.getNumIncomingValues() < 3 && "Invalid LCSSA PHI");
|
2017-03-14 13:50:47 +00:00
|
|
|
|
|
|
|
// We found a reduction value exit-PHI. Update it with the
|
|
|
|
// incoming bypass edge.
|
2017-12-30 15:27:33 +00:00
|
|
|
if (LCSSAPhi.getIncomingValue(0) == LoopExitInst)
|
|
|
|
LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
|
2017-03-14 13:50:47 +00:00
|
|
|
} // end of the LCSSA phi scan.
|
|
|
|
|
|
|
|
// Fix the scalar loop reduction variable with the incoming reduction sum
|
|
|
|
// from the vector body and from the backedge value.
|
|
|
|
int IncomingEdgeBlockIdx =
|
|
|
|
Phi->getBasicBlockIndex(OrigLoop->getLoopLatch());
|
|
|
|
assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
|
|
|
|
// Pick the other block.
|
|
|
|
int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
|
|
|
|
Phi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
|
|
|
|
Phi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
|
|
|
|
}
|
|
|
|
|
2013-08-26 22:33:26 +00:00
|
|
|
void InnerLoopVectorizer::fixLCSSAPHIs() {
|
2017-12-30 15:27:33 +00:00
|
|
|
for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
|
|
|
|
if (LCSSAPhi.getNumIncomingValues() == 1) {
|
|
|
|
assert(OrigLoop->isLoopInvariant(LCSSAPhi.getIncomingValue(0)) &&
|
2017-05-13 13:25:57 +00:00
|
|
|
"Incoming value isn't loop invariant");
|
2017-12-30 15:27:33 +00:00
|
|
|
LCSSAPhi.addIncoming(LCSSAPhi.getIncomingValue(0), LoopMiddleBlock);
|
2017-05-13 13:25:57 +00:00
|
|
|
}
|
2012-12-30 07:47:00 +00:00
|
|
|
}
|
2014-10-28 11:53:30 +00:00
|
|
|
}
|
2016-08-24 11:37:57 +00:00
|
|
|
|
2016-10-25 18:59:45 +00:00
|
|
|
void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
|
|
|
|
// The basic block and loop containing the predicated instruction.
|
|
|
|
auto *PredBB = PredInst->getParent();
|
|
|
|
auto *VectorLoop = LI->getLoopFor(PredBB);
|
|
|
|
|
|
|
|
// Initialize a worklist with the operands of the predicated instruction.
|
|
|
|
SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
|
|
|
|
|
|
|
|
// Holds instructions that we need to analyze again. An instruction may be
|
|
|
|
// reanalyzed if we don't yet know if we can sink it or not.
|
|
|
|
SmallVector<Instruction *, 8> InstsToReanalyze;
|
|
|
|
|
|
|
|
// Returns true if a given use occurs in the predicated block. Phi nodes use
|
|
|
|
// their operands in their corresponding predecessor blocks.
|
|
|
|
auto isBlockOfUsePredicated = [&](Use &U) -> bool {
|
|
|
|
auto *I = cast<Instruction>(U.getUser());
|
|
|
|
BasicBlock *BB = I->getParent();
|
|
|
|
if (auto *Phi = dyn_cast<PHINode>(I))
|
|
|
|
BB = Phi->getIncomingBlock(
|
|
|
|
PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
|
|
|
|
return BB == PredBB;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Iteratively sink the scalarized operands of the predicated instruction
|
|
|
|
// into the block we created for it. When an instruction is sunk, it's
|
|
|
|
// operands are then added to the worklist. The algorithm ends after one pass
|
|
|
|
// through the worklist doesn't sink a single instruction.
|
|
|
|
bool Changed;
|
|
|
|
do {
|
|
|
|
// Add the instructions that need to be reanalyzed to the worklist, and
|
|
|
|
// reset the changed indicator.
|
|
|
|
Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
|
|
|
|
InstsToReanalyze.clear();
|
|
|
|
Changed = false;
|
|
|
|
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
|
|
|
|
|
|
|
|
// We can't sink an instruction if it is a phi node, is already in the
|
|
|
|
// predicated block, is not in the loop, or may have side effects.
|
|
|
|
if (!I || isa<PHINode>(I) || I->getParent() == PredBB ||
|
|
|
|
!VectorLoop->contains(I) || I->mayHaveSideEffects())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// It's legal to sink the instruction if all its uses occur in the
|
|
|
|
// predicated block. Otherwise, there's nothing to do yet, and we may
|
|
|
|
// need to reanalyze the instruction.
|
2017-10-12 23:30:03 +00:00
|
|
|
if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
|
2016-10-25 18:59:45 +00:00
|
|
|
InstsToReanalyze.push_back(I);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move the instruction to the beginning of the predicated block, and add
|
|
|
|
// it's operands to the worklist.
|
|
|
|
I->moveBefore(&*PredBB->getFirstInsertionPt());
|
|
|
|
Worklist.insert(I->op_begin(), I->op_end());
|
|
|
|
|
|
|
|
// The sinking may have enabled other instructions to be sunk, so we will
|
|
|
|
// need to iterate.
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
} while (Changed);
|
|
|
|
}
|
|
|
|
|
2016-09-01 18:14:27 +00:00
|
|
|
void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, unsigned UF,
|
2017-03-14 13:50:47 +00:00
|
|
|
unsigned VF) {
|
2017-11-20 12:01:47 +00:00
|
|
|
assert(PN->getParent() == OrigLoop->getHeader() &&
|
|
|
|
"Non-header phis should have been handled elsewhere");
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
PHINode *P = cast<PHINode>(PN);
|
2017-03-14 13:50:47 +00:00
|
|
|
// In order to support recurrences we need to be able to vectorize Phi nodes.
|
|
|
|
// Phi nodes have cycles, so we need to vectorize them in two stages. This is
|
|
|
|
// stage #1: We create a new vector PHI node with no incoming edges. We'll use
|
|
|
|
// this value when we vectorize all of the instructions that use the PHI.
|
2016-02-19 17:56:08 +00:00
|
|
|
if (Legal->isReductionVariable(P) || Legal->isFirstOrderRecurrence(P)) {
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2013-08-26 22:33:26 +00:00
|
|
|
// This is phase one of vectorizing PHIs.
|
2016-05-05 00:54:54 +00:00
|
|
|
Type *VecTy =
|
|
|
|
(VF == 1) ? PN->getType() : VectorType::get(PN->getType(), VF);
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *EntryPart = PHINode::Create(
|
2016-05-12 18:44:51 +00:00
|
|
|
VecTy, 2, "vec.phi", &*LoopVectorBody->getFirstInsertionPt());
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(P, Part, EntryPart);
|
2013-08-26 22:33:26 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2012-11-25 08:41:35 +00:00
|
|
|
|
2013-08-26 22:33:26 +00:00
|
|
|
setDebugLocFromInst(Builder, P);
|
2012-12-10 19:25:06 +00:00
|
|
|
|
2013-08-26 22:33:26 +00:00
|
|
|
// This PHINode must be an induction variable.
|
|
|
|
// Make sure that we know about it.
|
2016-05-05 00:54:54 +00:00
|
|
|
assert(Legal->getInductionVars()->count(P) && "Not an induction variable");
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2015-08-27 09:53:00 +00:00
|
|
|
InductionDescriptor II = Legal->getInductionVars()->lookup(P);
|
2016-05-10 07:33:35 +00:00
|
|
|
const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2015-01-30 05:02:21 +00:00
|
|
|
// FIXME: The newly created binary instructions should contain nsw/nuw flags,
|
|
|
|
// which can be found from the original scalar operations.
|
2015-08-27 09:53:00 +00:00
|
|
|
switch (II.getKind()) {
|
2016-05-05 00:54:54 +00:00
|
|
|
case InductionDescriptor::IK_NoInduction:
|
|
|
|
llvm_unreachable("Unknown induction");
|
2016-07-05 15:41:28 +00:00
|
|
|
case InductionDescriptor::IK_IntInduction:
|
2017-02-24 18:20:12 +00:00
|
|
|
case InductionDescriptor::IK_FpInduction:
|
2017-08-27 12:55:46 +00:00
|
|
|
llvm_unreachable("Integer/fp induction is handled elsewhere.");
|
2016-07-24 07:24:54 +00:00
|
|
|
case InductionDescriptor::IK_PtrInduction: {
|
2016-05-05 00:54:54 +00:00
|
|
|
// Handle the pointer induction variable case.
|
|
|
|
assert(P->getType()->isPointerTy() && "Unexpected type.");
|
|
|
|
// This is the normalized GEP that starts counting at zero.
|
|
|
|
Value *PtrInd = Induction;
|
2016-05-10 07:33:35 +00:00
|
|
|
PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType());
|
2016-09-21 16:50:24 +00:00
|
|
|
// Determine the number of scalars we need to generate for each unroll
|
|
|
|
// iteration. If the instruction is uniform, we only need to generate the
|
|
|
|
// first lane. Otherwise, we generate all VF values.
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned Lanes = Cost->isUniformAfterVectorization(P, VF) ? 1 : VF;
|
2016-09-01 19:40:19 +00:00
|
|
|
// These are the scalar results. Notice that we don't generate vector GEPs
|
|
|
|
// because scalar GEPs result in better code.
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2016-09-21 16:50:24 +00:00
|
|
|
for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
|
2016-09-01 19:40:19 +00:00
|
|
|
Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF);
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
|
2016-05-10 07:33:35 +00:00
|
|
|
Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL);
|
2016-05-05 00:54:54 +00:00
|
|
|
SclrGep->setName("next.gep");
|
2017-08-27 12:55:46 +00:00
|
|
|
VectorLoopValueMap.setScalarValue(P, {Part, Lane}, SclrGep);
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
2016-05-05 00:54:54 +00:00
|
|
|
}
|
|
|
|
return;
|
2013-08-26 22:33:26 +00:00
|
|
|
}
|
2016-07-24 07:24:54 +00:00
|
|
|
}
|
2013-08-26 22:33:26 +00:00
|
|
|
}
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2016-08-24 11:37:57 +00:00
|
|
|
/// A helper function for checking whether an integer division-related
|
|
|
|
/// instruction may divide by zero (in which case it must be predicated if
|
|
|
|
/// executed conditionally in the scalar code).
|
|
|
|
/// TODO: It may be worthwhile to generalize and check isKnownNonZero().
|
|
|
|
/// Non-zero divisors that are non compile-time constants will not be
|
|
|
|
/// converted into multiplication, so we will still end up scalarizing
|
|
|
|
/// the division, but can do so w/o predication.
|
|
|
|
static bool mayDivideByZero(Instruction &I) {
|
|
|
|
assert((I.getOpcode() == Instruction::UDiv ||
|
|
|
|
I.getOpcode() == Instruction::SDiv ||
|
|
|
|
I.getOpcode() == Instruction::URem ||
|
|
|
|
I.getOpcode() == Instruction::SRem) &&
|
|
|
|
"Unexpected instruction");
|
|
|
|
Value *Divisor = I.getOperand(1);
|
|
|
|
auto *CInt = dyn_cast<ConstantInt>(Divisor);
|
|
|
|
return !CInt || CInt->isZero();
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
void InnerLoopVectorizer::widenInstruction(Instruction &I) {
|
2017-04-13 09:07:23 +00:00
|
|
|
switch (I.getOpcode()) {
|
|
|
|
case Instruction::Br:
|
2017-08-27 12:55:46 +00:00
|
|
|
case Instruction::PHI:
|
|
|
|
llvm_unreachable("This instruction is handled by a different recipe.");
|
2017-04-13 09:07:23 +00:00
|
|
|
case Instruction::GetElementPtr: {
|
|
|
|
// Construct a vector GEP by widening the operands of the scalar GEP as
|
|
|
|
// necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
|
|
|
|
// results in a vector of pointers when at least one operand of the GEP
|
|
|
|
// is vector-typed. Thus, to keep the representation compact, we only use
|
|
|
|
// vector-typed operands for loop-varying values.
|
|
|
|
auto *GEP = cast<GetElementPtrInst>(&I);
|
2016-09-26 17:08:37 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
if (VF > 1 && OrigLoop->hasLoopInvariantOperands(GEP)) {
|
|
|
|
// If we are vectorizing, but the GEP has only loop-invariant operands,
|
|
|
|
// the GEP we build (by only using vector-typed operands for
|
|
|
|
// loop-varying values) would be a scalar pointer. Thus, to ensure we
|
|
|
|
// produce a vector of pointers, we need to either arbitrarily pick an
|
|
|
|
// operand to broadcast, or broadcast a clone of the original GEP.
|
|
|
|
// Here, we broadcast a clone of the original.
|
|
|
|
//
|
|
|
|
// TODO: If at some point we decide to scalarize instructions having
|
|
|
|
// loop-invariant operands, this special case will no longer be
|
|
|
|
// required. We would add the scalarization decision to
|
|
|
|
// collectLoopScalars() and teach getVectorValue() to broadcast
|
|
|
|
// the lane-zero scalar value.
|
|
|
|
auto *Clone = Builder.Insert(GEP->clone());
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *EntryPart = Builder.CreateVectorSplat(VF, Clone);
|
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, EntryPart);
|
|
|
|
addMetadata(EntryPart, GEP);
|
|
|
|
}
|
2017-04-13 09:07:23 +00:00
|
|
|
} else {
|
|
|
|
// If the GEP has at least one loop-varying operand, we are sure to
|
|
|
|
// produce a vector of pointers. But if we are only unrolling, we want
|
|
|
|
// to produce a scalar GEP for each unroll part. Thus, the GEP we
|
|
|
|
// produce with the code below will be scalar (if VF == 1) or vector
|
|
|
|
// (otherwise). Note that for the unroll-only case, we still maintain
|
|
|
|
// values in the vector mapping with initVector, as we do for other
|
|
|
|
// instructions.
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
// The pointer operand of the new GEP. If it's loop-invariant, we
|
|
|
|
// won't broadcast it.
|
2017-06-27 08:41:19 +00:00
|
|
|
auto *Ptr =
|
|
|
|
OrigLoop->isLoopInvariant(GEP->getPointerOperand())
|
|
|
|
? GEP->getPointerOperand()
|
|
|
|
: getOrCreateVectorValue(GEP->getPointerOperand(), Part);
|
2017-04-13 09:07:23 +00:00
|
|
|
|
|
|
|
// Collect all the indices for the new GEP. If any index is
|
|
|
|
// loop-invariant, we won't broadcast it.
|
|
|
|
SmallVector<Value *, 4> Indices;
|
|
|
|
for (auto &U : make_range(GEP->idx_begin(), GEP->idx_end())) {
|
|
|
|
if (OrigLoop->isLoopInvariant(U.get()))
|
|
|
|
Indices.push_back(U.get());
|
|
|
|
else
|
2017-06-27 08:41:19 +00:00
|
|
|
Indices.push_back(getOrCreateVectorValue(U.get(), Part));
|
2017-04-07 14:15:34 +00:00
|
|
|
}
|
2017-04-13 09:07:23 +00:00
|
|
|
|
|
|
|
// Create the new GEP. Note that this GEP may be a scalar if VF == 1,
|
|
|
|
// but it should be a vector, otherwise.
|
|
|
|
auto *NewGEP = GEP->isInBounds()
|
|
|
|
? Builder.CreateInBoundsGEP(Ptr, Indices)
|
|
|
|
: Builder.CreateGEP(Ptr, Indices);
|
|
|
|
assert((VF == 1 || NewGEP->getType()->isVectorTy()) &&
|
|
|
|
"NewGEP is not a pointer vector");
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, NewGEP);
|
|
|
|
addMetadata(NewGEP, GEP);
|
2017-04-07 14:15:34 +00:00
|
|
|
}
|
2017-04-13 09:07:23 +00:00
|
|
|
}
|
2017-04-07 14:15:34 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor: {
|
|
|
|
// Just widen binops.
|
|
|
|
auto *BinOp = cast<BinaryOperator>(&I);
|
|
|
|
setDebugLocFromInst(Builder, BinOp);
|
|
|
|
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *A = getOrCreateVectorValue(BinOp->getOperand(0), Part);
|
|
|
|
Value *B = getOrCreateVectorValue(BinOp->getOperand(1), Part);
|
|
|
|
Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B);
|
2013-01-03 00:52:27 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
if (BinaryOperator *VecOp = dyn_cast<BinaryOperator>(V))
|
|
|
|
VecOp->copyIRFlags(BinOp);
|
2014-10-28 11:53:30 +00:00
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
// Use this vector value for all users of the original instruction.
|
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, V);
|
|
|
|
addMetadata(V, BinOp);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case Instruction::Select: {
|
|
|
|
// Widen selects.
|
|
|
|
// If the selector is loop invariant we can create a select
|
|
|
|
// instruction with a scalar condition. Otherwise, use vector-select.
|
|
|
|
auto *SE = PSE.getSE();
|
|
|
|
bool InvariantCond =
|
|
|
|
SE->isLoopInvariant(PSE.getSCEV(I.getOperand(0)), OrigLoop);
|
|
|
|
setDebugLocFromInst(Builder, &I);
|
|
|
|
|
|
|
|
// The condition can be loop invariant but still defined inside the
|
|
|
|
// loop. This means that we can't just use the original 'cond' value.
|
|
|
|
// We have to take the 'vectorized' value and pick the first lane.
|
|
|
|
// Instcombine will make this a no-op.
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
auto *ScalarCond = getOrCreateScalarValue(I.getOperand(0), {0, 0});
|
2014-07-19 13:33:16 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *Cond = getOrCreateVectorValue(I.getOperand(0), Part);
|
|
|
|
Value *Op0 = getOrCreateVectorValue(I.getOperand(1), Part);
|
|
|
|
Value *Op1 = getOrCreateVectorValue(I.getOperand(2), Part);
|
|
|
|
Value *Sel =
|
|
|
|
Builder.CreateSelect(InvariantCond ? ScalarCond : Cond, Op0, Op1);
|
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, Sel);
|
|
|
|
addMetadata(Sel, &I);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp: {
|
|
|
|
// Widen compares. Generate vector compares.
|
|
|
|
bool FCmp = (I.getOpcode() == Instruction::FCmp);
|
|
|
|
auto *Cmp = dyn_cast<CmpInst>(&I);
|
|
|
|
setDebugLocFromInst(Builder, Cmp);
|
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
2017-06-27 08:41:19 +00:00
|
|
|
Value *A = getOrCreateVectorValue(Cmp->getOperand(0), Part);
|
|
|
|
Value *B = getOrCreateVectorValue(Cmp->getOperand(1), Part);
|
2017-04-13 09:07:23 +00:00
|
|
|
Value *C = nullptr;
|
|
|
|
if (FCmp) {
|
2017-08-08 18:07:44 +00:00
|
|
|
// Propagate fast math flags.
|
|
|
|
IRBuilder<>::FastMathFlagGuard FMFG(Builder);
|
|
|
|
Builder.setFastMathFlags(Cmp->getFastMathFlags());
|
2017-06-27 08:41:19 +00:00
|
|
|
C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
|
2017-04-13 09:07:23 +00:00
|
|
|
} else {
|
2017-06-27 08:41:19 +00:00
|
|
|
C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
|
2013-01-03 00:52:27 +00:00
|
|
|
}
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, C);
|
|
|
|
addMetadata(C, &I);
|
2017-04-13 09:07:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2014-07-19 13:33:16 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::BitCast: {
|
|
|
|
auto *CI = dyn_cast<CastInst>(&I);
|
|
|
|
setDebugLocFromInst(Builder, CI);
|
|
|
|
|
|
|
|
/// Vectorize casts.
|
|
|
|
Type *DestTy =
|
|
|
|
(VF == 1) ? CI->getType() : VectorType::get(CI->getType(), VF);
|
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
Value *A = getOrCreateVectorValue(CI->getOperand(0), Part);
|
|
|
|
Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
|
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, Cast);
|
|
|
|
addMetadata(Cast, &I);
|
|
|
|
}
|
2017-04-13 09:07:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::Call: {
|
|
|
|
// Ignore dbg intrinsics.
|
|
|
|
if (isa<DbgInfoIntrinsic>(I))
|
2016-05-05 00:54:54 +00:00
|
|
|
break;
|
2017-04-13 09:07:23 +00:00
|
|
|
setDebugLocFromInst(Builder, &I);
|
2016-07-05 15:41:28 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
Module *M = I.getParent()->getParent()->getParent();
|
|
|
|
auto *CI = cast<CallInst>(&I);
|
2013-01-03 00:52:27 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
StringRef FnName = CI->getCalledFunction()->getName();
|
|
|
|
Function *F = CI->getCalledFunction();
|
|
|
|
Type *RetTy = ToVectorTy(CI->getType(), VF);
|
|
|
|
SmallVector<Type *, 4> Tys;
|
|
|
|
for (Value *ArgOperand : CI->arg_operands())
|
|
|
|
Tys.push_back(ToVectorTy(ArgOperand->getType(), VF));
|
|
|
|
|
|
|
|
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
2017-08-27 12:55:46 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
// The flag shows whether we use Intrinsic or a usual Call for vectorized
|
|
|
|
// version of the instruction.
|
|
|
|
// Is it beneficial to perform intrinsic call compared to lib call?
|
|
|
|
bool NeedToScalarize;
|
|
|
|
unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
|
|
|
|
bool UseVectorIntrinsic =
|
|
|
|
ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
|
2017-08-27 12:55:46 +00:00
|
|
|
assert((UseVectorIntrinsic || !NeedToScalarize) &&
|
|
|
|
"Instruction should be scalarized elsewhere.");
|
2012-12-10 21:39:02 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
for (unsigned Part = 0; Part < UF; ++Part) {
|
|
|
|
SmallVector<Value *, 4> Args;
|
|
|
|
for (unsigned i = 0, ie = CI->getNumArgOperands(); i != ie; ++i) {
|
|
|
|
Value *Arg = CI->getArgOperand(i);
|
|
|
|
// Some intrinsics have a scalar argument - don't replace it with a
|
|
|
|
// vector.
|
2017-06-27 08:41:19 +00:00
|
|
|
if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, i))
|
|
|
|
Arg = getOrCreateVectorValue(CI->getArgOperand(i), Part);
|
2017-04-13 09:07:23 +00:00
|
|
|
Args.push_back(Arg);
|
|
|
|
}
|
2014-07-19 13:33:16 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
Function *VectorF;
|
|
|
|
if (UseVectorIntrinsic) {
|
|
|
|
// Use vector version of the intrinsic.
|
|
|
|
Type *TysForDecl[] = {CI->getType()};
|
|
|
|
if (VF > 1)
|
|
|
|
TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
|
|
|
|
VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
|
|
|
|
} else {
|
|
|
|
// Use vector version of the library call.
|
|
|
|
StringRef VFnName = TLI->getVectorizedFunction(FnName, VF);
|
|
|
|
assert(!VFnName.empty() && "Vector function name is empty.");
|
|
|
|
VectorF = M->getFunction(VFnName);
|
|
|
|
if (!VectorF) {
|
|
|
|
// Generate a declaration
|
|
|
|
FunctionType *FTy = FunctionType::get(RetTy, Tys, false);
|
|
|
|
VectorF =
|
|
|
|
Function::Create(FTy, Function::ExternalLinkage, VFnName, M);
|
|
|
|
VectorF->copyAttributesFrom(F);
|
2015-03-17 19:46:50 +00:00
|
|
|
}
|
2017-04-13 09:07:23 +00:00
|
|
|
}
|
|
|
|
assert(VectorF && "Can't create vector function.");
|
2016-04-14 07:13:24 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
CI->getOperandBundlesAsDefs(OpBundles);
|
|
|
|
CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
|
2016-04-14 07:13:24 +00:00
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
if (isa<FPMathOperator>(V))
|
|
|
|
V->copyFastMathFlags(CI);
|
2015-03-17 19:46:50 +00:00
|
|
|
|
2017-06-27 08:41:19 +00:00
|
|
|
VectorLoopValueMap.setVectorValue(&I, Part, V);
|
|
|
|
addMetadata(V, &I);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
|
|
|
|
2017-04-13 09:07:23 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2017-11-14 12:09:30 +00:00
|
|
|
// This instruction is not vectorized by simple widening.
|
2017-08-27 12:55:46 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
|
|
|
|
llvm_unreachable("Unhandled instruction!");
|
2017-04-13 09:07:23 +00:00
|
|
|
} // end of switch.
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2012-12-03 21:33:08 +00:00
|
|
|
void InnerLoopVectorizer::updateAnalysis() {
|
2012-11-29 19:25:41 +00:00
|
|
|
// Forget the original basic block.
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
PSE.getSE()->forgetLoop(OrigLoop);
|
2012-10-29 21:52:38 +00:00
|
|
|
|
|
|
|
// Update the dominator tree information.
|
2013-01-19 13:57:58 +00:00
|
|
|
assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
|
2012-10-29 21:52:38 +00:00
|
|
|
"Entry does not dominate exit.");
|
|
|
|
|
2017-04-14 07:30:23 +00:00
|
|
|
DT->addNewBlock(LoopMiddleBlock,
|
|
|
|
LI->getLoopFor(LoopVectorBody)->getLoopLatch());
|
2014-05-29 22:10:01 +00:00
|
|
|
DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
|
2012-10-29 21:52:38 +00:00
|
|
|
DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
|
2014-10-31 22:28:03 +00:00
|
|
|
DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
|
2018-02-28 11:00:08 +00:00
|
|
|
assert(DT->verify(DominatorTree::VerificationLevel::Fast));
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2013-12-17 01:11:01 +00:00
|
|
|
/// \brief Check whether it is safe to if-convert this phi node.
|
|
|
|
///
|
|
|
|
/// Phi nodes with constant expressions that can trap are not safe to if
|
|
|
|
/// convert.
|
|
|
|
static bool canIfConvertPHINodes(BasicBlock *BB) {
|
2017-12-30 15:27:33 +00:00
|
|
|
for (PHINode &Phi : BB->phis()) {
|
|
|
|
for (Value *V : Phi.incoming_values())
|
2016-07-12 19:35:15 +00:00
|
|
|
if (auto *C = dyn_cast<Constant>(V))
|
2013-12-17 01:11:01 +00:00
|
|
|
if (C->canTrap())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!EnableIfConversion) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("IfConversionDisabled")
|
|
|
|
<< "if-conversion is disabled");
|
2012-12-03 21:06:35 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
|
|
|
|
assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
|
|
|
|
|
2013-06-28 20:46:27 +00:00
|
|
|
// A list of pointers that we can safely read and write to.
|
|
|
|
SmallPtrSet<Value *, 8> SafePointes;
|
|
|
|
|
|
|
|
// Collect safe addresses.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
2013-06-28 20:46:27 +00:00
|
|
|
if (blockNeedsPredication(BB))
|
|
|
|
continue;
|
|
|
|
|
2016-08-01 20:08:09 +00:00
|
|
|
for (Instruction &I : *BB)
|
2018-03-09 21:05:58 +00:00
|
|
|
if (auto *Ptr = getLoadStorePointerOperand(&I))
|
2016-08-01 20:08:09 +00:00
|
|
|
SafePointes.insert(Ptr);
|
2013-06-28 20:46:27 +00:00
|
|
|
}
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
// Collect the blocks that need predication.
|
2013-12-17 01:11:01 +00:00
|
|
|
BasicBlock *Header = TheLoop->getHeader();
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
2012-12-11 04:55:10 +00:00
|
|
|
// We don't support switch statements inside loops.
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!isa<BranchInst>(BB->getTerminator())) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("LoopContainsSwitch", BB->getTerminator())
|
|
|
|
<< "loop contains a switch statement");
|
2012-12-11 04:55:10 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-11 04:55:10 +00:00
|
|
|
|
2012-12-03 22:57:09 +00:00
|
|
|
// We must be able to predicate all blocks that need to be predicated.
|
2013-12-17 01:11:01 +00:00
|
|
|
if (blockNeedsPredication(BB)) {
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!blockCanBePredicated(BB, SafePointes)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
|
|
|
|
<< "control flow cannot be substituted for a select");
|
2013-12-17 01:11:01 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
|
|
|
} else if (BB != Header && !canIfConvertPHINodes(BB)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NoCFGForSelect", BB->getTerminator())
|
|
|
|
<< "control flow cannot be substituted for a select");
|
2012-12-03 21:06:35 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We can if-convert this loop.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-10-24 20:36:32 +00:00
|
|
|
bool LoopVectorizationLegality::canVectorize() {
|
2017-05-23 07:08:02 +00:00
|
|
|
// Store the result and return it at the end instead of exiting early, in case
|
|
|
|
// allowExtraAnalysis is used to report multiple reasons for not vectorizing.
|
|
|
|
bool Result = true;
|
2018-03-15 19:34:27 +00:00
|
|
|
|
2017-09-15 20:10:09 +00:00
|
|
|
bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
|
2013-05-24 18:05:35 +00:00
|
|
|
// We must have a loop in canonical form. Loops with indirectbr in them cannot
|
|
|
|
// be canonicalized.
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!TheLoop->getLoopPreheader()) {
|
2017-12-20 13:28:38 +00:00
|
|
|
DEBUG(dbgs() << "LV: Loop doesn't have a legal pre-header.\n");
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
|
|
|
|
<< "loop control flow is not understood by vectorizer");
|
2017-12-20 13:28:38 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2016-08-12 22:47:13 +00:00
|
|
|
// FIXME: The code is currently dead, since the loop gets sent to
|
|
|
|
// LoopVectorizationLegality is already an innermost loop.
|
|
|
|
//
|
2012-12-03 21:06:35 +00:00
|
|
|
// We can only vectorize innermost loops.
|
2015-07-13 17:21:14 +00:00
|
|
|
if (!TheLoop->empty()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NotInnermostLoop")
|
|
|
|
<< "loop is not the innermost loop");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
|
|
|
|
// We must have a single backedge.
|
2014-06-25 17:50:15 +00:00
|
|
|
if (TheLoop->getNumBackEdges() != 1) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
|
|
|
|
<< "loop control flow is not understood by vectorizer");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
|
|
|
|
// We must have a single exiting block.
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!TheLoop->getExitingBlock()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
|
|
|
|
<< "loop control flow is not understood by vectorizer");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
|
2014-12-02 22:59:06 +00:00
|
|
|
// We only handle bottom-tested loops, i.e. loop in which the condition is
|
|
|
|
// checked at the end of each iteration. With that we can assume that all
|
|
|
|
// instructions in the loop are executed the same number of times.
|
|
|
|
if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood")
|
|
|
|
<< "loop control flow is not understood by vectorizer");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2014-12-02 22:59:06 +00:00
|
|
|
}
|
|
|
|
|
2013-10-11 16:14:39 +00:00
|
|
|
// We need to have a loop header.
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
|
|
|
|
<< '\n');
|
2012-12-03 21:06:35 +00:00
|
|
|
|
2013-12-05 05:44:44 +00:00
|
|
|
// Check if we can if-convert non-single-bb loops.
|
2013-10-11 16:14:39 +00:00
|
|
|
unsigned NumBlocks = TheLoop->getNumBlocks();
|
2012-12-03 21:06:35 +00:00
|
|
|
if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
|
|
|
|
DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2012-10-18 05:29:12 +00:00
|
|
|
}
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
// Check if we can vectorize the instructions and CFG in this loop.
|
2012-12-03 22:57:09 +00:00
|
|
|
if (!canVectorizeInstrs()) {
|
2012-12-03 21:06:35 +00:00
|
|
|
DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2012-12-03 21:06:35 +00:00
|
|
|
}
|
|
|
|
|
2012-11-09 07:09:44 +00:00
|
|
|
// Go over each instruction and look at memory deps.
|
2012-12-03 21:06:35 +00:00
|
|
|
if (!canVectorizeMemory()) {
|
|
|
|
DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
2012-11-09 07:09:44 +00:00
|
|
|
}
|
|
|
|
|
2015-07-14 22:32:44 +00:00
|
|
|
DEBUG(dbgs() << "LV: We can vectorize this loop"
|
|
|
|
<< (LAI->getRuntimePointerChecking()->Need
|
|
|
|
? " (with a runtime bound check)"
|
|
|
|
: "")
|
|
|
|
<< "!\n");
|
2012-10-18 05:29:12 +00:00
|
|
|
|
2015-08-10 14:50:54 +00:00
|
|
|
bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
|
|
|
|
|
|
|
|
// If an override option has been passed in for interleaved accesses, use it.
|
|
|
|
if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
|
|
|
|
UseInterleaved = EnableInterleavedMemAccesses;
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
// Analyze interleaved memory accesses.
|
2015-08-10 14:50:54 +00:00
|
|
|
if (UseInterleaved)
|
2016-06-16 21:55:10 +00:00
|
|
|
InterleaveInfo.analyzeInterleaving(*getSymbolicStrides());
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
|
|
|
|
unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
|
|
|
|
if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
|
|
|
|
SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
|
|
|
|
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("TooManySCEVRunTimeChecks")
|
|
|
|
<< "Too many SCEV assumptions need to be made and checked "
|
|
|
|
<< "at runtime");
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
DEBUG(dbgs() << "LV: Too many SCEV checks needed.\n");
|
2017-09-15 20:10:09 +00:00
|
|
|
if (DoExtraAnalysis)
|
2017-05-23 07:08:02 +00:00
|
|
|
Result = false;
|
|
|
|
else
|
|
|
|
return false;
|
[SCEV][LV] Add SCEV Predicates and use them to re-implement stride versioning
Summary:
SCEV Predicates represent conditions that typically cannot be derived from
static analysis, but can be used to reduce SCEV expressions to forms which are
usable for different optimizers.
ScalarEvolution now has the rewriteUsingPredicate method which can simplify a
SCEV expression using a SCEVPredicateSet. The normal workflow of a pass using
SCEVPredicates would be to hold a SCEVPredicateSet and every time assumptions
need to be made a new SCEV Predicate would be created and added to the set.
Each time after calling getSCEV, the user will call the rewriteUsingPredicate
method.
We add two types of predicates
SCEVPredicateSet - implements a set of predicates
SCEVEqualPredicate - tests for equality between two SCEV expressions
We use the SCEVEqualPredicate to re-implement stride versioning. Every time we
version a stride, we will add a SCEVEqualPredicate to the context.
Instead of adding specific stride checks, LoopVectorize now adds a more
generic SCEV check.
We only need to add support for this in the LoopVectorizer since this is the
only pass that will do stride versioning.
Reviewers: mzolotukhin, anemet, hfinkel, sanjoy
Subscribers: sanjoy, hfinkel, rengolin, jmolloy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13595
llvm-svn: 251800
2015-11-02 14:41:02 +00:00
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2017-05-23 07:08:02 +00:00
|
|
|
// Okay! We've done all the tests. If any have failed, return false. Otherwise
|
|
|
|
// we can vectorize, and at this point we don't have any other mem analysis
|
2012-10-24 20:36:32 +00:00
|
|
|
// which may limit our maximum vectorization factor, so just return true with
|
|
|
|
// no restrictions.
|
2017-05-23 07:08:02 +00:00
|
|
|
return Result;
|
2012-10-17 18:25:06 +00:00
|
|
|
}
|
|
|
|
|
2014-02-24 23:12:18 +00:00
|
|
|
static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
|
2013-05-11 23:04:28 +00:00
|
|
|
if (Ty->isPointerTy())
|
2013-08-22 02:42:55 +00:00
|
|
|
return DL.getIntPtrType(Ty);
|
|
|
|
|
2013-11-18 13:14:32 +00:00
|
|
|
// It is possible that char's or short's overflow when we ask for the loop's
|
|
|
|
// trip count, work around this by changing the type size.
|
|
|
|
if (Ty->getScalarSizeInBits() < 32)
|
|
|
|
return Type::getInt32Ty(Ty->getContext());
|
|
|
|
|
2013-05-11 23:04:28 +00:00
|
|
|
return Ty;
|
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
|
2013-05-11 23:04:28 +00:00
|
|
|
Ty0 = convertPointerToIntegerType(DL, Ty0);
|
|
|
|
Ty1 = convertPointerToIntegerType(DL, Ty1);
|
|
|
|
if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
|
|
|
|
return Ty0;
|
|
|
|
return Ty1;
|
|
|
|
}
|
|
|
|
|
2013-05-31 19:53:50 +00:00
|
|
|
/// \brief Check that the instruction has outside loop users and is not an
|
|
|
|
/// identified reduction variable.
|
|
|
|
static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
|
2016-06-15 00:35:26 +00:00
|
|
|
SmallPtrSetImpl<Value *> &AllowedExit) {
|
|
|
|
// Reduction and Induction instructions are allowed to have exit users. All
|
|
|
|
// other instructions must not have external users.
|
|
|
|
if (!AllowedExit.count(Inst))
|
2016-05-05 00:54:54 +00:00
|
|
|
// Check that all of the users of the loop are inside the BB.
|
2014-03-09 03:16:01 +00:00
|
|
|
for (User *U : Inst->users()) {
|
|
|
|
Instruction *UI = cast<Instruction>(U);
|
2013-05-31 19:53:50 +00:00
|
|
|
// This user may be a reduction exit value.
|
2014-03-09 03:16:01 +00:00
|
|
|
if (!TheLoop->contains(UI)) {
|
|
|
|
DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
|
2013-05-31 19:53:50 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
void LoopVectorizationLegality::addInductionPhi(
|
2016-06-17 20:41:14 +00:00
|
|
|
PHINode *Phi, const InductionDescriptor &ID,
|
2016-06-15 00:35:26 +00:00
|
|
|
SmallPtrSetImpl<Value *> &AllowedExit) {
|
2016-05-05 15:14:01 +00:00
|
|
|
Inductions[Phi] = ID;
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
|
|
|
|
// In case this induction also comes with casts that we know we can ignore
|
|
|
|
// in the vectorized loop body, record them here. All casts could be recorded
|
|
|
|
// here for ignoring, but suffices to record only the first (as it is the
|
|
|
|
// only one that may bw used outside the cast sequence).
|
|
|
|
const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
|
|
|
|
if (!Casts.empty())
|
|
|
|
InductionCastsToIgnore.insert(*Casts.begin());
|
|
|
|
|
2016-05-05 15:14:01 +00:00
|
|
|
Type *PhiTy = Phi->getType();
|
|
|
|
const DataLayout &DL = Phi->getModule()->getDataLayout();
|
|
|
|
|
|
|
|
// Get the widest type.
|
2016-07-24 07:24:54 +00:00
|
|
|
if (!PhiTy->isFloatingPointTy()) {
|
|
|
|
if (!WidestIndTy)
|
|
|
|
WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
|
|
|
|
else
|
|
|
|
WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
|
|
|
|
}
|
2016-05-05 15:14:01 +00:00
|
|
|
|
|
|
|
// Int inductions are special because we only allow one IV.
|
|
|
|
if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
|
2016-05-10 07:33:35 +00:00
|
|
|
ID.getConstIntStepValue() &&
|
|
|
|
ID.getConstIntStepValue()->isOne() &&
|
2016-05-05 15:14:01 +00:00
|
|
|
isa<Constant>(ID.getStartValue()) &&
|
2016-05-10 07:33:35 +00:00
|
|
|
cast<Constant>(ID.getStartValue())->isNullValue()) {
|
|
|
|
|
2016-05-05 15:14:01 +00:00
|
|
|
// Use the phi node with the widest type as induction. Use the last
|
|
|
|
// one if there are multiple (no good reason for doing this other
|
|
|
|
// than it is expedient). We've checked that it begins at zero and
|
|
|
|
// steps by one, so this is a canonical induction variable.
|
2017-02-14 22:14:01 +00:00
|
|
|
if (!PrimaryInduction || PhiTy == WidestIndTy)
|
|
|
|
PrimaryInduction = Phi;
|
2016-05-05 15:14:01 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
// Both the PHI node itself, and the "post-increment" value feeding
|
|
|
|
// back into the PHI node may have external users.
|
2017-07-12 19:53:55 +00:00
|
|
|
// We can allow those uses, except if the SCEVs we have for them rely
|
|
|
|
// on predicates that only hold within the loop, since allowing the exit
|
|
|
|
// currently means re-using this SCEV outside the loop.
|
|
|
|
if (PSE.getUnionPredicate().isAlwaysTrue()) {
|
|
|
|
AllowedExit.insert(Phi);
|
|
|
|
AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
|
|
|
|
}
|
2016-06-14 22:30:41 +00:00
|
|
|
|
2016-06-15 00:35:26 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an induction variable.\n");
|
2016-05-05 15:14:01 +00:00
|
|
|
}
|
|
|
|
|
2012-12-03 22:57:09 +00:00
|
|
|
bool LoopVectorizationLegality::canVectorizeInstrs() {
|
2012-12-03 21:06:35 +00:00
|
|
|
BasicBlock *Header = TheLoop->getHeader();
|
2012-11-17 00:27:03 +00:00
|
|
|
|
2013-05-05 01:54:48 +00:00
|
|
|
// Look for the attribute signaling the absence of NaNs.
|
|
|
|
Function &F = *Header->getParent();
|
2016-03-30 15:41:12 +00:00
|
|
|
HasFunNoNaNAttr =
|
|
|
|
F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
|
2013-05-05 01:54:48 +00:00
|
|
|
|
2012-12-03 22:57:09 +00:00
|
|
|
// For each block in the loop.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
2012-12-03 21:06:35 +00:00
|
|
|
// Scan the instructions in the block and look for hazards.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
if (auto *Phi = dyn_cast<PHINode>(&I)) {
|
2013-05-11 23:04:26 +00:00
|
|
|
Type *PhiTy = Phi->getType();
|
2012-12-10 19:25:06 +00:00
|
|
|
// Check that this PHI type is allowed.
|
2016-05-05 00:54:54 +00:00
|
|
|
if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
|
2013-05-11 23:04:26 +00:00
|
|
|
!PhiTy->isPointerTy()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
|
|
|
|
<< "loop control flow is not understood by vectorizer");
|
2012-12-10 19:25:06 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an non-int non-pointer PHI.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
// If this PHINode is not in the header block, then we know that we
|
2012-12-10 19:25:06 +00:00
|
|
|
// can convert it to select during if-conversion. No need to check if
|
|
|
|
// the PHIs in this block are induction or reduction variables.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (BB != Header) {
|
2013-05-31 19:53:50 +00:00
|
|
|
// Check that this instruction has no outside users or is an
|
|
|
|
// identified reduction value with an outside user.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (!hasOutsideLoopUser(TheLoop, Phi, AllowedExit))
|
2013-05-31 19:53:50 +00:00
|
|
|
continue;
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NeitherInductionNorReduction", Phi)
|
|
|
|
<< "value could not be identified as "
|
|
|
|
"an induction or reduction variable");
|
2013-05-31 19:53:50 +00:00
|
|
|
return false;
|
|
|
|
}
|
2012-10-20 08:26:33 +00:00
|
|
|
|
2015-01-14 03:02:16 +00:00
|
|
|
// We only allow if-converted PHIs with exactly two incoming values.
|
2013-05-03 17:42:55 +00:00
|
|
|
if (Phi->getNumIncomingValues() != 2) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CFGNotUnderstood", Phi)
|
|
|
|
<< "control flow not understood by vectorizer");
|
2013-05-03 17:42:55 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-01-06 12:50:29 +00:00
|
|
|
RecurrenceDescriptor RedDes;
|
2018-02-04 15:42:24 +00:00
|
|
|
if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC,
|
|
|
|
DT)) {
|
2016-01-06 12:50:29 +00:00
|
|
|
if (RedDes.hasUnsafeAlgebra())
|
|
|
|
Requirements->addUnsafeAlgebraInst(RedDes.getUnsafeAlgebraInst());
|
|
|
|
AllowedExit.insert(RedDes.getLoopExitInstr());
|
|
|
|
Reductions[Phi] = RedDes;
|
2013-05-05 01:54:48 +00:00
|
|
|
continue;
|
|
|
|
}
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2016-05-05 15:20:39 +00:00
|
|
|
InductionDescriptor ID;
|
2016-07-24 07:24:54 +00:00
|
|
|
if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID)) {
|
2016-06-15 00:35:26 +00:00
|
|
|
addInductionPhi(Phi, ID, AllowedExit);
|
2016-07-24 07:24:54 +00:00
|
|
|
if (ID.hasUnsafeAlgebra() && !HasFunNoNaNAttr)
|
|
|
|
Requirements->addUnsafeAlgebraInst(ID.getUnsafeAlgebraInst());
|
2016-05-05 15:20:39 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-06-30 21:05:06 +00:00
|
|
|
if (RecurrenceDescriptor::isFirstOrderRecurrence(Phi, TheLoop,
|
|
|
|
SinkAfter, DT)) {
|
2016-02-19 17:56:08 +00:00
|
|
|
FirstOrderRecurrences.insert(Phi);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-05 15:20:39 +00:00
|
|
|
// As a last resort, coerce the PHI to a AddRec expression
|
|
|
|
// and re-try classifying it a an induction PHI.
|
2016-07-24 07:24:54 +00:00
|
|
|
if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true)) {
|
2016-07-12 19:35:15 +00:00
|
|
|
addInductionPhi(Phi, ID, AllowedExit);
|
2016-05-05 15:20:39 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NonReductionValueUsedOutsideLoop", Phi)
|
|
|
|
<< "value that could not be identified as "
|
|
|
|
"reduction is used outside the loop");
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an unidentified PHI." << *Phi << "\n");
|
2012-12-03 21:06:35 +00:00
|
|
|
return false;
|
2016-05-05 00:54:54 +00:00
|
|
|
} // end of PHI handling
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2015-03-17 19:46:50 +00:00
|
|
|
// We handle calls that:
|
|
|
|
// * Are debug info intrinsics.
|
|
|
|
// * Have a mapping to an IR intrinsic.
|
|
|
|
// * Have a vector version available.
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *CI = dyn_cast<CallInst>(&I);
|
2016-05-05 00:54:54 +00:00
|
|
|
if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
|
|
|
|
!isa<DbgInfoIntrinsic>(CI) &&
|
2015-03-17 19:46:50 +00:00
|
|
|
!(CI->getCalledFunction() && TLI &&
|
|
|
|
TLI->isFunctionVectorizable(CI->getCalledFunction()->getName()))) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVectorizeCall", CI)
|
|
|
|
<< "call instruction cannot be vectorized");
|
2015-03-17 19:46:50 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found a non-intrinsic, non-libfunc callsite.\n");
|
2013-09-23 14:54:39 +00:00
|
|
|
return false;
|
2012-12-03 21:06:35 +00:00
|
|
|
}
|
2012-10-19 23:05:40 +00:00
|
|
|
|
2014-05-30 04:31:24 +00:00
|
|
|
// Intrinsics such as powi,cttz and ctlz are legal to vectorize if the
|
|
|
|
// second argument is the same (i.e. loop invariant)
|
2016-05-05 00:54:54 +00:00
|
|
|
if (CI && hasVectorInstrinsicScalarOpd(
|
|
|
|
getVectorIntrinsicIDForCall(CI, TLI), 1)) {
|
Re-commit r255115, with the PredicatedScalarEvolution class moved to
ScalarEvolution.h, in order to avoid cyclic dependencies between the Transform
and Analysis modules:
[LV][LAA] Add a layer over SCEV to apply run-time checked knowledge on SCEV expressions
Summary:
This change creates a layer over ScalarEvolution for LAA and LV, and centralizes the
usage of SCEV predicates. The SCEVPredicatedLayer takes the statically deduced knowledge
by ScalarEvolution and applies the knowledge from the SCEV predicates. The end goal is
that both LAA and LV should use this interface everywhere.
This also solves a problem involving the result of SCEV expression rewritting when
the predicate changes. Suppose we have the expression (sext {a,+,b}) and two predicates
P1: {a,+,b} has nsw
P2: b = 1.
Applying P1 and then P2 gives us {a,+,1}, while applying P2 and the P1 gives us
sext({a,+,1}) (the AddRec expression was changed by P2 so P1 no longer applies).
The SCEVPredicatedLayer maintains the order of transformations by feeding back
the results of previous transformations into new transformations, and therefore
avoiding this issue.
The SCEVPredicatedLayer maintains a cache to remember the results of previous
SCEV rewritting results. This also has the benefit of reducing the overall number
of expression rewrites.
Reviewers: mzolotukhin, anemet
Subscribers: jmolloy, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D14296
llvm-svn: 255122
2015-12-09 16:06:28 +00:00
|
|
|
auto *SE = PSE.getSE();
|
|
|
|
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVectorizeIntrinsic", CI)
|
|
|
|
<< "intrinsic instruction cannot be vectorized");
|
2014-05-30 04:31:24 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-24 09:14:18 +00:00
|
|
|
// Check that the instruction return type is vectorizable.
|
2013-10-25 20:40:15 +00:00
|
|
|
// Also, we can't vectorize extractelement instructions.
|
2016-07-12 19:35:15 +00:00
|
|
|
if ((!VectorType::isValidElementType(I.getType()) &&
|
|
|
|
!I.getType()->isVoidTy()) ||
|
|
|
|
isa<ExtractElementInst>(I)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVectorizeInstructionReturnType", &I)
|
|
|
|
<< "instruction return type cannot be vectorized");
|
2013-10-02 20:04:29 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found unvectorizable type.\n");
|
2012-12-03 21:06:35 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-24 09:14:18 +00:00
|
|
|
// Check that the stored type is vectorizable.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (auto *ST = dyn_cast<StoreInst>(&I)) {
|
2012-12-24 09:14:18 +00:00
|
|
|
Type *T = ST->getValueOperand()->getType();
|
2014-06-25 17:50:15 +00:00
|
|
|
if (!VectorType::isValidElementType(T)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVectorizeStore", ST)
|
|
|
|
<< "store instruction cannot be vectorized");
|
2012-12-24 09:14:18 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2014-01-10 18:20:32 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
// FP instructions can allow unsafe algebra, thus vectorizable by
|
|
|
|
// non-IEEE-754 compliant SIMD units.
|
|
|
|
// This applies to floating-point math operations and calls, not memory
|
|
|
|
// operations, shuffles, or casts, as they don't change precision or
|
|
|
|
// semantics.
|
2016-07-12 19:35:15 +00:00
|
|
|
} else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
|
[IR] redefine 'UnsafeAlgebra' / 'reassoc' fast-math-flags and add 'trans' fast-math-flag
As discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2016-November/107104.html
and again more recently:
http://lists.llvm.org/pipermail/llvm-dev/2017-October/118118.html
...this is a step in cleaning up our fast-math-flags implementation in IR to better match
the capabilities of both clang's user-visible flags and the backend's flags for SDNode.
As proposed in the above threads, we're replacing the 'UnsafeAlgebra' bit (which had the
'umbrella' meaning that all flags are set) with a new bit that only applies to algebraic
reassociation - 'AllowReassoc'.
We're also adding a bit to allow approximations for library functions called 'ApproxFunc'
(this was initially proposed as 'libm' or similar).
...and we're out of bits. 7 bits ought to be enough for anyone, right? :) FWIW, I did
look at getting this out of SubclassOptionalData via SubclassData (spacious 16-bits),
but that's apparently already used for other purposes. Also, I don't think we can just
add a field to FPMathOperator because Operator is not intended to be instantiated.
We'll defer movement of FMF to another day.
We keep the 'fast' keyword. I thought about removing that, but seeing IR like this:
%f.fast = fadd reassoc nnan ninf nsz arcp contract afn float %op1, %op2
...made me think we want to keep the shortcut synonym.
Finally, this change is binary incompatible with existing IR as seen in the
compatibility tests. This statement:
"Newer releases can ignore features from older releases, but they cannot miscompile
them. For example, if nsw is ever replaced with something else, dropping it would be
a valid way to upgrade the IR."
( http://llvm.org/docs/DeveloperPolicy.html#ir-backwards-compatibility )
...provides the flexibility we want to make this change without requiring a new IR
version. Ie, we're not loosening the FP strictness of existing IR. At worst, we will
fail to optimize some previously 'fast' code because it's no longer recognized as
'fast'. This should get fixed as we audit/squash all of the uses of 'isFast()'.
Note: an inter-dependent clang commit to use the new API name should closely follow
commit.
Differential Revision: https://reviews.llvm.org/D39304
llvm-svn: 317488
2017-11-06 16:27:15 +00:00
|
|
|
!I.isFast()) {
|
2016-04-14 20:42:18 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
|
|
|
|
Hints->setPotentiallyUnsafe();
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
// Reduction instructions are allowed to have exit users.
|
|
|
|
// All other instructions must not have external users.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("ValueUsedOutsideLoop", &I)
|
|
|
|
<< "value cannot be used outside the loop");
|
2013-05-31 19:53:50 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-12-03 21:06:35 +00:00
|
|
|
} // next instr.
|
|
|
|
}
|
2012-10-17 18:25:06 +00:00
|
|
|
|
2017-02-14 22:14:01 +00:00
|
|
|
if (!PrimaryInduction) {
|
2012-11-25 08:41:35 +00:00
|
|
|
DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
|
2014-06-25 17:50:15 +00:00
|
|
|
if (Inductions.empty()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("NoInductionVariable")
|
|
|
|
<< "loop induction variable could not be identified");
|
2013-05-09 00:32:18 +00:00
|
|
|
return false;
|
2014-06-25 17:50:15 +00:00
|
|
|
}
|
2012-10-18 05:29:12 +00:00
|
|
|
}
|
|
|
|
|
2015-09-02 10:15:05 +00:00
|
|
|
// Now we know the widest induction type, check if our found induction
|
|
|
|
// is the same size. If it's not, unset it here and InnerLoopVectorizer
|
|
|
|
// will create another.
|
2017-02-14 22:14:01 +00:00
|
|
|
if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
|
|
|
|
PrimaryInduction = nullptr;
|
2015-09-02 10:15:05 +00:00
|
|
|
|
2012-12-03 21:06:35 +00:00
|
|
|
return true;
|
|
|
|
}
|
2012-10-26 23:49:28 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
void LoopVectorizationCostModel::collectLoopScalars(unsigned VF) {
|
2017-04-07 14:15:34 +00:00
|
|
|
// We should not collect Scalars more than once per VF. Right now, this
|
|
|
|
// function is called from collectUniformsAndScalars(), which already does
|
|
|
|
// this check. Collecting Scalars for VF=1 does not make any sense.
|
2017-02-08 19:25:23 +00:00
|
|
|
assert(VF >= 2 && !Scalars.count(VF) &&
|
|
|
|
"This function should not be visited twice for the same VF");
|
2016-08-02 14:29:41 +00:00
|
|
|
|
2017-04-07 14:15:34 +00:00
|
|
|
SmallSetVector<Instruction *, 8> Worklist;
|
|
|
|
|
|
|
|
// These sets are used to seed the analysis with pointers used by memory
|
|
|
|
// accesses that will remain scalar.
|
|
|
|
SmallSetVector<Instruction *, 8> ScalarPtrs;
|
|
|
|
SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
|
|
|
|
|
|
|
|
// A helper that returns true if the use of Ptr by MemAccess will be scalar.
|
|
|
|
// The pointer operands of loads and stores will be scalar as long as the
|
|
|
|
// memory access is not a gather or scatter operation. The value operand of a
|
|
|
|
// store will remain scalar if the store is scalarized.
|
|
|
|
auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
|
|
|
|
InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
|
|
|
|
assert(WideningDecision != CM_Unknown &&
|
|
|
|
"Widening decision should be ready at this moment");
|
|
|
|
if (auto *Store = dyn_cast<StoreInst>(MemAccess))
|
|
|
|
if (Ptr == Store->getValueOperand())
|
|
|
|
return WideningDecision == CM_Scalarize;
|
2018-03-09 21:05:58 +00:00
|
|
|
assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
|
2017-04-07 14:15:34 +00:00
|
|
|
"Ptr is neither a value or pointer operand");
|
|
|
|
return WideningDecision != CM_GatherScatter;
|
|
|
|
};
|
|
|
|
|
|
|
|
// A helper that returns true if the given value is a bitcast or
|
|
|
|
// getelementptr instruction contained in the loop.
|
|
|
|
auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
|
|
|
|
return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
|
|
|
|
isa<GetElementPtrInst>(V)) &&
|
|
|
|
!TheLoop->isLoopInvariant(V);
|
|
|
|
};
|
|
|
|
|
|
|
|
// A helper that evaluates a memory access's use of a pointer. If the use
|
|
|
|
// will be a scalar use, and the pointer is only used by memory accesses, we
|
|
|
|
// place the pointer in ScalarPtrs. Otherwise, the pointer is placed in
|
|
|
|
// PossibleNonScalarPtrs.
|
|
|
|
auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
|
|
|
|
// We only care about bitcast and getelementptr instructions contained in
|
|
|
|
// the loop.
|
|
|
|
if (!isLoopVaryingBitCastOrGEP(Ptr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If the pointer has already been identified as scalar (e.g., if it was
|
|
|
|
// also identified as uniform), there's nothing to do.
|
|
|
|
auto *I = cast<Instruction>(Ptr);
|
|
|
|
if (Worklist.count(I))
|
|
|
|
return;
|
2016-08-02 14:29:41 +00:00
|
|
|
|
2017-04-07 14:15:34 +00:00
|
|
|
// If the use of the pointer will be a scalar use, and all users of the
|
|
|
|
// pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
|
|
|
|
// place the pointer in PossibleNonScalarPtrs.
|
2017-10-12 23:30:03 +00:00
|
|
|
if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
|
2017-04-07 14:15:34 +00:00
|
|
|
return isa<LoadInst>(U) || isa<StoreInst>(U);
|
|
|
|
}))
|
|
|
|
ScalarPtrs.insert(I);
|
|
|
|
else
|
|
|
|
PossibleNonScalarPtrs.insert(I);
|
|
|
|
};
|
|
|
|
|
|
|
|
// We seed the scalars analysis with three classes of instructions: (1)
|
|
|
|
// instructions marked uniform-after-vectorization, (2) bitcast and
|
|
|
|
// getelementptr instructions used by memory accesses requiring a scalar use,
|
|
|
|
// and (3) pointer induction variables and their update instructions (we
|
|
|
|
// currently only scalarize these).
|
|
|
|
//
|
|
|
|
// (1) Add to the worklist all instructions that have been identified as
|
|
|
|
// uniform-after-vectorization.
|
|
|
|
Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
|
|
|
|
|
|
|
|
// (2) Add to the worklist all bitcast and getelementptr instructions used by
|
|
|
|
// memory accesses requiring a scalar use. The pointer operands of loads and
|
|
|
|
// stores will be scalar as long as the memory accesses is not a gather or
|
|
|
|
// scatter operation. The value operand of a store will remain scalar if the
|
|
|
|
// store is scalarized.
|
2017-03-24 20:49:43 +00:00
|
|
|
for (auto *BB : TheLoop->blocks())
|
2016-08-02 14:29:41 +00:00
|
|
|
for (auto &I : *BB) {
|
2017-04-07 14:15:34 +00:00
|
|
|
if (auto *Load = dyn_cast<LoadInst>(&I)) {
|
|
|
|
evaluatePtrUse(Load, Load->getPointerOperand());
|
|
|
|
} else if (auto *Store = dyn_cast<StoreInst>(&I)) {
|
|
|
|
evaluatePtrUse(Store, Store->getPointerOperand());
|
|
|
|
evaluatePtrUse(Store, Store->getValueOperand());
|
2017-03-24 20:49:43 +00:00
|
|
|
}
|
2016-08-02 14:29:41 +00:00
|
|
|
}
|
2017-04-07 14:15:34 +00:00
|
|
|
for (auto *I : ScalarPtrs)
|
|
|
|
if (!PossibleNonScalarPtrs.count(I)) {
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
|
|
|
|
Worklist.insert(I);
|
|
|
|
}
|
|
|
|
|
|
|
|
// (3) Add to the worklist all pointer induction variables and their update
|
|
|
|
// instructions.
|
|
|
|
//
|
|
|
|
// TODO: Once we are able to vectorize pointer induction variables we should
|
|
|
|
// no longer insert them into the worklist here.
|
|
|
|
auto *Latch = TheLoop->getLoopLatch();
|
|
|
|
for (auto &Induction : *Legal->getInductionVars()) {
|
|
|
|
auto *Ind = Induction.first;
|
|
|
|
auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
|
|
|
|
if (Induction.second.getKind() != InductionDescriptor::IK_PtrInduction)
|
|
|
|
continue;
|
|
|
|
Worklist.insert(Ind);
|
|
|
|
Worklist.insert(IndUpdate);
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:42:56 +00:00
|
|
|
// Insert the forced scalars.
|
|
|
|
// FIXME: Currently widenPHIInstruction() often creates a dead vector
|
|
|
|
// induction variable when the PHI user is scalarized.
|
|
|
|
if (ForcedScalars.count(VF))
|
|
|
|
for (auto *I : ForcedScalars.find(VF)->second)
|
|
|
|
Worklist.insert(I);
|
|
|
|
|
2017-04-07 14:15:34 +00:00
|
|
|
// Expand the worklist by looking through any bitcasts and getelementptr
|
|
|
|
// instructions we've already identified as scalar. This is similar to the
|
|
|
|
// expansion step in collectLoopUniforms(); however, here we're only
|
|
|
|
// expanding to include additional bitcasts and getelementptr instructions.
|
|
|
|
unsigned Idx = 0;
|
|
|
|
while (Idx != Worklist.size()) {
|
|
|
|
Instruction *Dst = Worklist[Idx++];
|
|
|
|
if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
|
|
|
|
continue;
|
|
|
|
auto *Src = cast<Instruction>(Dst->getOperand(0));
|
2017-10-12 23:30:03 +00:00
|
|
|
if (llvm::all_of(Src->users(), [&](User *U) -> bool {
|
2017-04-07 14:15:34 +00:00
|
|
|
auto *J = cast<Instruction>(U);
|
|
|
|
return !TheLoop->contains(J) || Worklist.count(J) ||
|
|
|
|
((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
|
|
|
|
isScalarUse(J, Src));
|
|
|
|
})) {
|
|
|
|
Worklist.insert(Src);
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
|
|
|
|
}
|
|
|
|
}
|
2016-08-02 14:29:41 +00:00
|
|
|
|
|
|
|
// An induction variable will remain scalar if all users of the induction
|
|
|
|
// variable and induction variable update remain scalar.
|
2017-02-08 19:25:23 +00:00
|
|
|
for (auto &Induction : *Legal->getInductionVars()) {
|
2016-08-02 14:29:41 +00:00
|
|
|
auto *Ind = Induction.first;
|
|
|
|
auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
|
|
|
|
|
2017-04-07 14:15:34 +00:00
|
|
|
// We already considered pointer induction variables, so there's no reason
|
|
|
|
// to look at their users again.
|
|
|
|
//
|
|
|
|
// TODO: Once we are able to vectorize pointer induction variables we
|
|
|
|
// should no longer skip over them here.
|
|
|
|
if (Induction.second.getKind() == InductionDescriptor::IK_PtrInduction)
|
|
|
|
continue;
|
|
|
|
|
2016-08-02 14:29:41 +00:00
|
|
|
// Determine if all users of the induction variable are scalar after
|
|
|
|
// vectorization.
|
2017-10-12 23:30:03 +00:00
|
|
|
auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
|
2016-08-02 14:29:41 +00:00
|
|
|
auto *I = cast<Instruction>(U);
|
2017-04-07 14:15:34 +00:00
|
|
|
return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I);
|
2016-08-02 14:29:41 +00:00
|
|
|
});
|
|
|
|
if (!ScalarInd)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Determine if all users of the induction variable update instruction are
|
|
|
|
// scalar after vectorization.
|
2017-10-12 23:30:03 +00:00
|
|
|
auto ScalarIndUpdate =
|
|
|
|
llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
|
|
|
|
auto *I = cast<Instruction>(U);
|
|
|
|
return I == Ind || !TheLoop->contains(I) || Worklist.count(I);
|
|
|
|
});
|
2016-08-02 14:29:41 +00:00
|
|
|
if (!ScalarIndUpdate)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// The induction variable and its update instruction will remain scalar.
|
2017-04-07 14:15:34 +00:00
|
|
|
Worklist.insert(Ind);
|
|
|
|
Worklist.insert(IndUpdate);
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
|
|
|
|
DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate << "\n");
|
2016-08-02 14:29:41 +00:00
|
|
|
}
|
2017-04-07 14:15:34 +00:00
|
|
|
|
|
|
|
Scalars[VF].insert(Worklist.begin(), Worklist.end());
|
2016-08-02 14:29:41 +00:00
|
|
|
}
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) {
|
|
|
|
if (!Legal->blockNeedsPredication(I->getParent()))
|
2016-10-05 17:52:34 +00:00
|
|
|
return false;
|
|
|
|
switch(I->getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
2018-02-26 11:06:36 +00:00
|
|
|
case Instruction::Load:
|
|
|
|
case Instruction::Store: {
|
|
|
|
if (!Legal->isMaskRequired(I))
|
|
|
|
return false;
|
2018-03-09 21:05:58 +00:00
|
|
|
auto *Ptr = getLoadStorePointerOperand(I);
|
2018-02-26 11:06:36 +00:00
|
|
|
auto *Ty = getMemInstValueType(I);
|
|
|
|
return isa<LoadInst>(I) ?
|
|
|
|
!(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty))
|
|
|
|
: !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
|
|
|
|
}
|
2016-10-05 17:52:34 +00:00
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::URem:
|
|
|
|
return mayDivideByZero(*I);
|
|
|
|
}
|
|
|
|
return false;
|
2016-09-08 19:11:07 +00:00
|
|
|
}
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,
|
|
|
|
unsigned VF) {
|
2016-09-08 19:11:07 +00:00
|
|
|
// Get and ensure we have a valid memory instruction.
|
|
|
|
LoadInst *LI = dyn_cast<LoadInst>(I);
|
|
|
|
StoreInst *SI = dyn_cast<StoreInst>(I);
|
|
|
|
assert((LI || SI) && "Invalid memory instruction");
|
|
|
|
|
2018-03-09 21:05:58 +00:00
|
|
|
auto *Ptr = getLoadStorePointerOperand(I);
|
2016-09-08 19:11:07 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
// In order to be widened, the pointer should be consecutive, first of all.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (!Legal->isConsecutivePtr(Ptr))
|
2017-02-08 19:25:23 +00:00
|
|
|
return false;
|
2016-09-08 19:11:07 +00:00
|
|
|
|
|
|
|
// If the instruction is a store located in a predicated block, it will be
|
|
|
|
// scalarized.
|
2016-10-05 17:52:34 +00:00
|
|
|
if (isScalarWithPredication(I))
|
2017-02-08 19:25:23 +00:00
|
|
|
return false;
|
2016-09-08 19:11:07 +00:00
|
|
|
|
|
|
|
// If the instruction's allocated size doesn't equal it's type size, it
|
|
|
|
// requires padding and will be scalarized.
|
|
|
|
auto &DL = I->getModule()->getDataLayout();
|
|
|
|
auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType();
|
|
|
|
if (hasIrregularType(ScalarTy, DL, VF))
|
2017-02-08 19:25:23 +00:00
|
|
|
return false;
|
2016-09-08 19:11:07 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
return true;
|
2016-09-08 19:11:07 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
void LoopVectorizationCostModel::collectLoopUniforms(unsigned VF) {
|
|
|
|
// We should not collect Uniforms more than once per VF. Right now,
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// this function is called from collectUniformsAndScalars(), which
|
2017-02-08 19:25:23 +00:00
|
|
|
// already does this check. Collecting Uniforms for VF=1 does not make any
|
|
|
|
// sense.
|
|
|
|
|
|
|
|
assert(VF >= 2 && !Uniforms.count(VF) &&
|
|
|
|
"This function should not be visited twice for the same VF");
|
|
|
|
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// Visit the list of Uniforms. If we'll not find any uniform value, we'll
|
2017-02-08 19:25:23 +00:00
|
|
|
// not analyze again. Uniforms.count(VF) will return 1.
|
|
|
|
Uniforms[VF].clear();
|
|
|
|
|
2012-10-26 23:49:28 +00:00
|
|
|
// We now know that the loop is vectorizable!
|
2016-08-02 20:27:49 +00:00
|
|
|
// Collect instructions inside the loop that will remain uniform after
|
|
|
|
// vectorization.
|
2012-12-03 21:06:35 +00:00
|
|
|
|
2016-08-02 20:27:49 +00:00
|
|
|
// Global values, params and instructions outside of current loop are out of
|
|
|
|
// scope.
|
2016-06-30 18:42:56 +00:00
|
|
|
auto isOutOfScope = [&](Value *V) -> bool {
|
|
|
|
Instruction *I = dyn_cast<Instruction>(V);
|
|
|
|
return (!I || !TheLoop->contains(I));
|
|
|
|
};
|
|
|
|
|
|
|
|
SetVector<Instruction *> Worklist;
|
|
|
|
BasicBlock *Latch = TheLoop->getLoopLatch();
|
2016-10-07 15:20:13 +00:00
|
|
|
|
|
|
|
// Start with the conditional branch. If the branch condition is an
|
|
|
|
// instruction contained in the loop that is only used by the branch, it is
|
|
|
|
// uniform.
|
|
|
|
auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
|
|
|
|
if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) {
|
2016-06-30 18:42:56 +00:00
|
|
|
Worklist.insert(Cmp);
|
|
|
|
DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n");
|
|
|
|
}
|
2012-10-26 23:49:28 +00:00
|
|
|
|
2016-09-08 19:11:07 +00:00
|
|
|
// Holds consecutive and consecutive-like pointers. Consecutive-like pointers
|
|
|
|
// are pointers that are treated like consecutive pointers during
|
|
|
|
// vectorization. The pointer operands of interleaved accesses are an
|
|
|
|
// example.
|
2016-11-16 18:53:17 +00:00
|
|
|
SmallSetVector<Instruction *, 8> ConsecutiveLikePtrs;
|
2016-09-08 19:11:07 +00:00
|
|
|
|
|
|
|
// Holds pointer operands of instructions that are possibly non-uniform.
|
|
|
|
SmallPtrSet<Instruction *, 8> PossibleNonUniformPtrs;
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
auto isUniformDecision = [&](Instruction *I, unsigned VF) {
|
|
|
|
InstWidening WideningDecision = getWideningDecision(I, VF);
|
|
|
|
assert(WideningDecision != CM_Unknown &&
|
|
|
|
"Widening decision should be ready at this moment");
|
|
|
|
|
|
|
|
return (WideningDecision == CM_Widen ||
|
2017-12-16 02:55:24 +00:00
|
|
|
WideningDecision == CM_Widen_Reverse ||
|
2017-02-08 19:25:23 +00:00
|
|
|
WideningDecision == CM_Interleave);
|
|
|
|
};
|
2016-09-08 19:11:07 +00:00
|
|
|
// Iterate over the instructions in the loop, and collect all
|
|
|
|
// consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible
|
|
|
|
// that a consecutive-like pointer operand will be scalarized, we collect it
|
|
|
|
// in PossibleNonUniformPtrs instead. We use two sets here because a single
|
|
|
|
// getelementptr instruction can be used by both vectorized and scalarized
|
|
|
|
// memory instructions. For example, if a loop loads and stores from the same
|
|
|
|
// location, but the store is conditional, the store will be scalarized, and
|
|
|
|
// the getelementptr won't remain uniform.
|
2016-08-02 14:29:41 +00:00
|
|
|
for (auto *BB : TheLoop->blocks())
|
2016-06-30 18:42:56 +00:00
|
|
|
for (auto &I : *BB) {
|
2016-09-08 21:38:26 +00:00
|
|
|
// If there's no pointer operand, there's nothing to do.
|
2018-03-09 21:05:58 +00:00
|
|
|
auto *Ptr = dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
|
2016-09-08 21:38:26 +00:00
|
|
|
if (!Ptr)
|
2016-08-02 14:29:41 +00:00
|
|
|
continue;
|
2016-09-08 19:11:07 +00:00
|
|
|
|
2016-09-14 14:47:40 +00:00
|
|
|
// True if all users of Ptr are memory accesses that have Ptr as their
|
|
|
|
// pointer operand.
|
2017-10-12 23:30:03 +00:00
|
|
|
auto UsersAreMemAccesses =
|
|
|
|
llvm::all_of(Ptr->users(), [&](User *U) -> bool {
|
2018-03-09 21:05:58 +00:00
|
|
|
return getLoadStorePointerOperand(U) == Ptr;
|
2017-10-12 23:30:03 +00:00
|
|
|
});
|
2016-09-14 14:47:40 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
// Ensure the memory instruction will not be scalarized or used by
|
|
|
|
// gather/scatter, making its pointer operand non-uniform. If the pointer
|
|
|
|
// operand is used by any instruction other than a memory access, we
|
|
|
|
// conservatively assume the pointer operand may be non-uniform.
|
|
|
|
if (!UsersAreMemAccesses || !isUniformDecision(&I, VF))
|
2016-09-08 19:11:07 +00:00
|
|
|
PossibleNonUniformPtrs.insert(Ptr);
|
|
|
|
|
2016-09-08 21:38:26 +00:00
|
|
|
// If the memory instruction will be vectorized and its pointer operand
|
2017-02-08 19:25:23 +00:00
|
|
|
// is consecutive-like, or interleaving - the pointer operand should
|
|
|
|
// remain uniform.
|
2017-01-17 20:51:39 +00:00
|
|
|
else
|
2017-02-08 19:25:23 +00:00
|
|
|
ConsecutiveLikePtrs.insert(Ptr);
|
2016-09-08 19:11:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add to the Worklist all consecutive and consecutive-like pointers that
|
|
|
|
// aren't also identified as possibly non-uniform.
|
|
|
|
for (auto *V : ConsecutiveLikePtrs)
|
|
|
|
if (!PossibleNonUniformPtrs.count(V)) {
|
|
|
|
DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n");
|
|
|
|
Worklist.insert(V);
|
2016-06-30 18:42:56 +00:00
|
|
|
}
|
2012-10-26 23:49:28 +00:00
|
|
|
|
2016-06-30 18:42:56 +00:00
|
|
|
// Expand Worklist in topological order: whenever a new instruction
|
|
|
|
// is added , its users should be either already inside Worklist, or
|
|
|
|
// out of scope. It ensures a uniform instruction will only be used
|
|
|
|
// by uniform instructions or out of scope instructions.
|
|
|
|
unsigned idx = 0;
|
2016-07-27 23:53:58 +00:00
|
|
|
while (idx != Worklist.size()) {
|
2016-06-30 18:42:56 +00:00
|
|
|
Instruction *I = Worklist[idx++];
|
2012-10-26 23:49:28 +00:00
|
|
|
|
2016-06-30 18:42:56 +00:00
|
|
|
for (auto OV : I->operand_values()) {
|
|
|
|
if (isOutOfScope(OV))
|
|
|
|
continue;
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *OI = cast<Instruction>(OV);
|
2017-10-12 23:30:03 +00:00
|
|
|
if (llvm::all_of(OI->users(), [&](User *U) -> bool {
|
2017-03-07 18:47:30 +00:00
|
|
|
auto *J = cast<Instruction>(U);
|
|
|
|
return !TheLoop->contains(J) || Worklist.count(J) ||
|
2018-03-09 21:05:58 +00:00
|
|
|
(OI == getLoadStorePointerOperand(J) &&
|
|
|
|
isUniformDecision(J, VF));
|
2016-06-30 18:42:56 +00:00
|
|
|
})) {
|
|
|
|
Worklist.insert(OI);
|
|
|
|
DEBUG(dbgs() << "LV: Found uniform instruction: " << *OI << "\n");
|
|
|
|
}
|
|
|
|
}
|
2016-07-27 23:53:58 +00:00
|
|
|
}
|
2016-06-30 18:42:56 +00:00
|
|
|
|
2016-09-14 14:47:40 +00:00
|
|
|
// Returns true if Ptr is the pointer operand of a memory access instruction
|
|
|
|
// I, and I is known to not require scalarization.
|
|
|
|
auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
|
2018-03-09 21:05:58 +00:00
|
|
|
return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
|
2016-09-14 14:47:40 +00:00
|
|
|
};
|
|
|
|
|
2016-06-30 18:42:56 +00:00
|
|
|
// For an instruction to be added into Worklist above, all its users inside
|
2016-09-13 19:01:45 +00:00
|
|
|
// the loop should also be in Worklist. However, this condition cannot be
|
|
|
|
// true for phi nodes that form a cyclic dependence. We must process phi
|
|
|
|
// nodes separately. An induction variable will remain uniform if all users
|
|
|
|
// of the induction variable and induction variable update remain uniform.
|
2016-09-14 14:47:40 +00:00
|
|
|
// The code below handles both pointer and non-pointer induction variables.
|
2017-02-08 19:25:23 +00:00
|
|
|
for (auto &Induction : *Legal->getInductionVars()) {
|
2016-09-13 19:01:45 +00:00
|
|
|
auto *Ind = Induction.first;
|
|
|
|
auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
|
|
|
|
|
|
|
|
// Determine if all users of the induction variable are uniform after
|
|
|
|
// vectorization.
|
2017-10-12 23:30:03 +00:00
|
|
|
auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
|
2016-09-13 19:01:45 +00:00
|
|
|
auto *I = cast<Instruction>(U);
|
2016-09-14 14:47:40 +00:00
|
|
|
return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
|
|
|
|
isVectorizedMemAccessUse(I, Ind);
|
2016-09-13 19:01:45 +00:00
|
|
|
});
|
|
|
|
if (!UniformInd)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Determine if all users of the induction variable update instruction are
|
|
|
|
// uniform after vectorization.
|
2017-10-12 23:30:03 +00:00
|
|
|
auto UniformIndUpdate =
|
|
|
|
llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
|
|
|
|
auto *I = cast<Instruction>(U);
|
|
|
|
return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
|
|
|
|
isVectorizedMemAccessUse(I, IndUpdate);
|
|
|
|
});
|
2016-09-13 19:01:45 +00:00
|
|
|
if (!UniformIndUpdate)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// The induction variable and its update instruction will remain uniform.
|
|
|
|
Worklist.insert(Ind);
|
|
|
|
Worklist.insert(IndUpdate);
|
|
|
|
DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n");
|
|
|
|
DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n");
|
2012-10-26 23:49:28 +00:00
|
|
|
}
|
2016-06-30 18:42:56 +00:00
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
Uniforms[VF].insert(Worklist.begin(), Worklist.end());
|
2012-10-20 04:59:06 +00:00
|
|
|
}
|
|
|
|
|
2015-02-01 16:56:04 +00:00
|
|
|
bool LoopVectorizationLegality::canVectorizeMemory() {
|
2016-07-09 22:56:50 +00:00
|
|
|
LAI = &(*GetLAA)(*TheLoop);
|
2016-06-24 15:33:25 +00:00
|
|
|
InterleaveInfo.setLAI(LAI);
|
2016-09-30 00:01:30 +00:00
|
|
|
const OptimizationRemarkAnalysis *LAR = LAI->getReport();
|
|
|
|
if (LAR) {
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
|
|
|
|
"loop not vectorized: ", *LAR);
|
|
|
|
});
|
2016-09-30 00:01:30 +00:00
|
|
|
}
|
2015-03-10 18:54:23 +00:00
|
|
|
if (!LAI->canVectorizeMemory())
|
|
|
|
return false;
|
|
|
|
|
2015-04-08 17:48:40 +00:00
|
|
|
if (LAI->hasStoreToLoopInvariantAddress()) {
|
2016-09-29 16:49:42 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVectorizeStoreToLoopInvariantAddress")
|
|
|
|
<< "write to a loop invariant address could not be vectorized");
|
2015-04-08 17:48:40 +00:00
|
|
|
DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-08-10 23:01:55 +00:00
|
|
|
Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
|
2016-07-01 05:59:55 +00:00
|
|
|
PSE.addPredicate(LAI->getPSE().getUnionPredicate());
|
2015-08-10 23:01:55 +00:00
|
|
|
|
2015-03-10 18:54:23 +00:00
|
|
|
return true;
|
2015-02-01 16:56:04 +00:00
|
|
|
}
|
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
bool LoopVectorizationLegality::isInductionPhi(const Value *V) {
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *In0 = const_cast<Value *>(V);
|
2012-12-13 00:21:03 +00:00
|
|
|
PHINode *PN = dyn_cast_or_null<PHINode>(In0);
|
|
|
|
if (!PN)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return Inductions.count(PN);
|
|
|
|
}
|
|
|
|
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
bool LoopVectorizationLegality::isCastedInductionVariable(const Value *V) {
|
|
|
|
auto *Inst = dyn_cast<Instruction>(V);
|
|
|
|
return (Inst && InductionCastsToIgnore.count(Inst));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LoopVectorizationLegality::isInductionVariable(const Value *V) {
|
|
|
|
return isInductionPhi(V) || isCastedInductionVariable(V);
|
|
|
|
}
|
|
|
|
|
2016-02-19 17:56:08 +00:00
|
|
|
bool LoopVectorizationLegality::isFirstOrderRecurrence(const PHINode *Phi) {
|
|
|
|
return FirstOrderRecurrences.count(Phi);
|
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
|
2015-02-18 03:43:19 +00:00
|
|
|
return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
|
2015-02-01 16:56:04 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
bool LoopVectorizationLegality::blockCanBePredicated(
|
|
|
|
BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs) {
|
2016-04-26 02:00:36 +00:00
|
|
|
const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Instruction &I : *BB) {
|
2014-12-16 11:50:42 +00:00
|
|
|
// Check that we don't have a constant expression that can trap as operand.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Value *Operand : I.operands()) {
|
|
|
|
if (auto *C = dyn_cast<Constant>(Operand))
|
2014-12-16 11:50:42 +00:00
|
|
|
if (C->canTrap())
|
|
|
|
return false;
|
|
|
|
}
|
2013-05-15 01:44:30 +00:00
|
|
|
// We might be able to hoist the load.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (I.mayReadFromMemory()) {
|
|
|
|
auto *LI = dyn_cast<LoadInst>(&I);
|
2014-12-16 11:50:42 +00:00
|
|
|
if (!LI)
|
|
|
|
return false;
|
|
|
|
if (!SafePtrs.count(LI->getPointerOperand())) {
|
2016-04-26 02:00:36 +00:00
|
|
|
// !llvm.mem.parallel_loop_access implies if-conversion safety.
|
2018-02-26 11:06:36 +00:00
|
|
|
// Otherwise, record that the load needs (real or emulated) masking
|
|
|
|
// and let the cost model decide.
|
|
|
|
if (!IsAnnotatedParallel)
|
|
|
|
MaskedOp.insert(LI);
|
|
|
|
continue;
|
2014-12-16 11:50:42 +00:00
|
|
|
}
|
2013-06-28 20:46:27 +00:00
|
|
|
}
|
2013-05-15 01:44:30 +00:00
|
|
|
|
2016-07-12 19:35:15 +00:00
|
|
|
if (I.mayWriteToMemory()) {
|
|
|
|
auto *SI = dyn_cast<StoreInst>(&I);
|
2014-12-16 11:50:42 +00:00
|
|
|
if (!SI)
|
|
|
|
return false;
|
2018-02-26 11:06:36 +00:00
|
|
|
// Predicated store requires some form of masking:
|
|
|
|
// 1) masked store HW instruction,
|
|
|
|
// 2) emulation via load-blend-store (only if safe and legal to do so,
|
|
|
|
// be aware on the race conditions), or
|
|
|
|
// 3) element-by-element predicate check and scalar store.
|
|
|
|
MaskedOp.insert(SI);
|
|
|
|
continue;
|
2014-01-28 01:01:53 +00:00
|
|
|
}
|
2016-07-12 19:35:15 +00:00
|
|
|
if (I.mayThrow())
|
2012-12-03 21:06:35 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-14 21:05:08 +00:00
|
|
|
void InterleavedAccessInfo::collectConstStrideAccesses(
|
|
|
|
MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
const ValueToValueMap &Strides) {
|
2016-07-14 20:59:47 +00:00
|
|
|
auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
// Since it's desired that the load/store instructions be maintained in
|
|
|
|
// "program order" for the interleaved access analysis, we have to visit the
|
|
|
|
// blocks in the loop in reverse postorder (i.e., in a topological order).
|
|
|
|
// Such an ordering will ensure that any load/store that may be executed
|
2016-07-14 20:59:47 +00:00
|
|
|
// before a second load/store will precede the second load/store in
|
2016-07-14 21:05:08 +00:00
|
|
|
// AccessStrideInfo.
|
2016-06-24 15:33:25 +00:00
|
|
|
LoopBlocksDFS DFS(TheLoop);
|
|
|
|
DFS.perform(LI);
|
2016-07-14 20:59:47 +00:00
|
|
|
for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
for (auto &I : *BB) {
|
2016-07-14 20:59:47 +00:00
|
|
|
auto *LI = dyn_cast<LoadInst>(&I);
|
|
|
|
auto *SI = dyn_cast<StoreInst>(&I);
|
|
|
|
if (!LI && !SI)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
continue;
|
|
|
|
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(&I);
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// We don't check wrapping here because we don't know yet if Ptr will be
|
|
|
|
// part of a full group or a group with gaps. Checking wrapping for all
|
2016-10-31 13:17:31 +00:00
|
|
|
// pointers (even those that end up in groups with no gaps) will be overly
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// conservative. For full groups, wrapping should be ok since if we would
|
2016-10-31 13:17:31 +00:00
|
|
|
// wrap around the address space we would do a memory access at nullptr
|
|
|
|
// even without the transformation. The wrapping checks are therefore
|
|
|
|
// deferred until after we've formed the interleaved groups.
|
|
|
|
int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
|
|
|
|
/*Assume=*/true, /*ShouldCheckWrap=*/false);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-14 20:59:47 +00:00
|
|
|
const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
|
|
|
|
PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
|
|
|
|
uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-14 20:59:47 +00:00
|
|
|
// An alignment of 0 means target ABI alignment.
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned Align = getMemInstAlignment(&I);
|
2016-07-14 20:59:47 +00:00
|
|
|
if (!Align)
|
|
|
|
Align = DL.getABITypeAlignment(PtrTy->getElementType());
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-14 21:05:08 +00:00
|
|
|
AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align);
|
2016-07-14 20:59:47 +00:00
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
// Analyze interleaved accesses and collect them into interleaved load and
|
|
|
|
// store groups.
|
|
|
|
//
|
|
|
|
// When generating code for an interleaved load group, we effectively hoist all
|
|
|
|
// loads in the group to the location of the first load in program order. When
|
|
|
|
// generating code for an interleaved store group, we sink all stores to the
|
|
|
|
// location of the last store. This code motion can change the order of load
|
|
|
|
// and store instructions and may break dependences.
|
|
|
|
//
|
|
|
|
// The code generation strategy mentioned above ensures that we won't violate
|
|
|
|
// any write-after-read (WAR) dependences.
|
|
|
|
//
|
|
|
|
// E.g., for the WAR dependence: a = A[i]; // (1)
|
|
|
|
// A[i] = b; // (2)
|
|
|
|
//
|
|
|
|
// The store group of (2) is always inserted at or below (2), and the load
|
|
|
|
// group of (1) is always inserted at or above (1). Thus, the instructions will
|
|
|
|
// never be reordered. All other dependences are checked to ensure the
|
|
|
|
// correctness of the instruction reordering.
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
//
|
2016-06-24 15:33:25 +00:00
|
|
|
// The algorithm visits all memory accesses in the loop in bottom-up program
|
|
|
|
// order. Program order is established by traversing the blocks in the loop in
|
|
|
|
// reverse postorder when collecting the accesses.
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
//
|
2016-06-24 15:33:25 +00:00
|
|
|
// We visit the memory accesses in bottom-up order because it can simplify the
|
|
|
|
// construction of store groups in the presence of write-after-write (WAW)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
// dependences.
|
|
|
|
//
|
2016-06-24 15:33:25 +00:00
|
|
|
// E.g., for the WAW dependence: A[i] = a; // (1)
|
|
|
|
// A[i] = b; // (2)
|
|
|
|
// A[i + 1] = c; // (3)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
//
|
2016-06-24 15:33:25 +00:00
|
|
|
// We will first create a store group with (3) and (2). (1) can't be added to
|
|
|
|
// this group because it and (2) are dependent. However, (1) can be grouped
|
|
|
|
// with other accesses that may precede it in program order. Note that a
|
|
|
|
// bottom-up order does not imply that WAW dependences should not be checked.
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
void InterleavedAccessInfo::analyzeInterleaving(
|
|
|
|
const ValueToValueMap &Strides) {
|
|
|
|
DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
|
|
|
|
|
2016-07-14 21:05:08 +00:00
|
|
|
// Holds all accesses with a constant stride.
|
|
|
|
MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
|
|
|
|
collectConstStrideAccesses(AccessStrideInfo, Strides);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-14 21:05:08 +00:00
|
|
|
if (AccessStrideInfo.empty())
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
return;
|
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
// Collect the dependences in the loop.
|
|
|
|
collectDependences();
|
|
|
|
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
// Holds all interleaved store groups temporarily.
|
|
|
|
SmallSetVector<InterleaveGroup *, 4> StoreGroups;
|
2016-02-19 15:46:10 +00:00
|
|
|
// Holds all interleaved load groups temporarily.
|
|
|
|
SmallSetVector<InterleaveGroup *, 4> LoadGroups;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Search in bottom-up program order for pairs of accesses (A and B) that can
|
|
|
|
// form interleaved load or store groups. In the algorithm below, access A
|
|
|
|
// precedes access B in program order. We initialize a group for B in the
|
|
|
|
// outer loop of the algorithm, and then in the inner loop, we attempt to
|
|
|
|
// insert each A into B's group if:
|
|
|
|
//
|
|
|
|
// 1. A and B have the same stride,
|
|
|
|
// 2. A and B have the same memory object size, and
|
|
|
|
// 3. A belongs in B's group according to its distance from B.
|
|
|
|
//
|
|
|
|
// Special care is taken to ensure group formation will not break any
|
|
|
|
// dependences.
|
|
|
|
for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
|
|
|
|
BI != E; ++BI) {
|
|
|
|
Instruction *B = BI->first;
|
|
|
|
StrideDescriptor DesB = BI->second;
|
|
|
|
|
|
|
|
// Initialize a group for B if it has an allowable stride. Even if we don't
|
|
|
|
// create a group for B, we continue with the bottom-up algorithm to ensure
|
|
|
|
// we don't break any of B's dependences.
|
2016-06-24 15:33:25 +00:00
|
|
|
InterleaveGroup *Group = nullptr;
|
2016-07-15 15:22:43 +00:00
|
|
|
if (isStrided(DesB.Stride)) {
|
|
|
|
Group = getInterleaveGroup(B);
|
2016-06-24 15:33:25 +00:00
|
|
|
if (!Group) {
|
2016-07-15 15:22:43 +00:00
|
|
|
DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B << '\n');
|
|
|
|
Group = createInterleaveGroup(B, DesB.Stride, DesB.Align);
|
2016-06-24 15:33:25 +00:00
|
|
|
}
|
2016-07-15 15:22:43 +00:00
|
|
|
if (B->mayWriteToMemory())
|
2016-06-24 15:33:25 +00:00
|
|
|
StoreGroups.insert(Group);
|
|
|
|
else
|
|
|
|
LoadGroups.insert(Group);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
for (auto AI = std::next(BI); AI != E; ++AI) {
|
|
|
|
Instruction *A = AI->first;
|
|
|
|
StrideDescriptor DesA = AI->second;
|
2016-06-24 15:33:25 +00:00
|
|
|
|
|
|
|
// Our code motion strategy implies that we can't have dependences
|
|
|
|
// between accesses in an interleaved group and other accesses located
|
|
|
|
// between the first and last member of the group. Note that this also
|
|
|
|
// means that a group can't have more than one member at a given offset.
|
|
|
|
// The accesses in a group can have dependences with other accesses, but
|
|
|
|
// we must ensure we don't extend the boundaries of the group such that
|
|
|
|
// we encompass those dependent accesses.
|
|
|
|
//
|
|
|
|
// For example, assume we have the sequence of accesses shown below in a
|
|
|
|
// stride-2 loop:
|
|
|
|
//
|
|
|
|
// (1, 2) is a group | A[i] = a; // (1)
|
|
|
|
// | A[i-1] = b; // (2) |
|
|
|
|
// A[i-3] = c; // (3)
|
|
|
|
// A[i] = d; // (4) | (2, 4) is not a group
|
|
|
|
//
|
|
|
|
// Because accesses (2) and (3) are dependent, we can group (2) with (1)
|
|
|
|
// but not with (4). If we did, the dependent access (3) would be within
|
|
|
|
// the boundaries of the (2, 4) group.
|
2016-07-15 15:22:43 +00:00
|
|
|
if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
|
|
|
|
// If a dependence exists and A is already in a group, we know that A
|
|
|
|
// must be a store since A precedes B and WAR dependences are allowed.
|
|
|
|
// Thus, A would be sunk below B. We release A's group to prevent this
|
|
|
|
// illegal code motion. A will then be free to form another group with
|
2016-06-24 15:33:25 +00:00
|
|
|
// instructions that precede it.
|
2016-07-15 15:22:43 +00:00
|
|
|
if (isInterleaved(A)) {
|
|
|
|
InterleaveGroup *StoreGroup = getInterleaveGroup(A);
|
2016-06-24 15:33:25 +00:00
|
|
|
StoreGroups.remove(StoreGroup);
|
|
|
|
releaseGroup(StoreGroup);
|
|
|
|
}
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// If a dependence exists and A is not already in a group (or it was
|
|
|
|
// and we just released it), B might be hoisted above A (if B is a
|
|
|
|
// load) or another store might be sunk below A (if B is a store). In
|
|
|
|
// either case, we can't add additional instructions to B's group. B
|
2016-06-24 15:33:25 +00:00
|
|
|
// will only form a group with instructions that it precedes.
|
|
|
|
break;
|
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-06-24 15:33:25 +00:00
|
|
|
// At this point, we've checked for illegal code motion. If either A or B
|
|
|
|
// isn't strided, there's nothing left to do.
|
|
|
|
if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
|
|
|
|
continue;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Ignore A if it's already in a group or isn't the same kind of memory
|
|
|
|
// operation as B.
|
2018-02-10 00:07:45 +00:00
|
|
|
// Note that mayReadFromMemory() isn't mutually exclusive to mayWriteToMemory
|
|
|
|
// in the case of atomic loads. We shouldn't see those here, canVectorizeMemory()
|
|
|
|
// should have returned false - except for the case we asked for optimization
|
|
|
|
// remarks.
|
|
|
|
if (isInterleaved(A) || (A->mayReadFromMemory() != B->mayReadFromMemory())
|
|
|
|
|| (A->mayWriteToMemory() != B->mayWriteToMemory()))
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
continue;
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Check rules 1 and 2. Ignore A if its stride or size is different from
|
|
|
|
// that of B.
|
|
|
|
if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
continue;
|
|
|
|
|
2017-02-22 18:37:36 +00:00
|
|
|
// Ignore A if the memory object of A and B don't belong to the same
|
|
|
|
// address space
|
|
|
|
if (getMemInstAddressSpace(A) != getMemInstAddressSpace(B))
|
|
|
|
continue;
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Calculate the distance from A to B.
|
|
|
|
const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
|
|
|
|
PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
|
|
|
|
if (!DistToB)
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
continue;
|
2016-07-15 15:22:43 +00:00
|
|
|
int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Check rule 3. Ignore A if its distance to B is not a multiple of the
|
|
|
|
// size.
|
|
|
|
if (DistanceToB % static_cast<int64_t>(DesB.Size))
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
continue;
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Ignore A if either A or B is in a predicated block. Although we
|
|
|
|
// currently prevent group formation for predicated accesses, we may be
|
|
|
|
// able to relax this limitation in the future once we handle more
|
|
|
|
// complicated blocks.
|
2016-07-14 20:59:47 +00:00
|
|
|
if (isPredicated(A->getParent()) || isPredicated(B->getParent()))
|
|
|
|
continue;
|
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// The index of A is the index of B plus A's distance to B in multiples
|
|
|
|
// of the size.
|
|
|
|
int IndexA =
|
|
|
|
Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
2016-07-15 15:22:43 +00:00
|
|
|
// Try to insert A into B's group.
|
|
|
|
if (Group->insertMember(A, IndexA, DesA.Align)) {
|
|
|
|
DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
|
|
|
|
<< " into the interleave group with" << *B << '\n');
|
|
|
|
InterleaveGroupMap[A] = Group;
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Set the first load in program order as the insert position.
|
2016-07-15 15:22:43 +00:00
|
|
|
if (A->mayReadFromMemory())
|
|
|
|
Group->setInsertPos(A);
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
2016-07-15 15:22:43 +00:00
|
|
|
} // Iteration over A accesses.
|
|
|
|
} // Iteration over B accesses.
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
|
|
|
|
// Remove interleaved store groups with gaps.
|
|
|
|
for (InterleaveGroup *Group : StoreGroups)
|
2017-09-14 07:40:02 +00:00
|
|
|
if (Group->getNumMembers() != Group->getFactor()) {
|
|
|
|
DEBUG(dbgs() << "LV: Invalidate candidate interleaved store group due "
|
|
|
|
"to gaps.\n");
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
releaseGroup(Group);
|
2017-09-14 07:40:02 +00:00
|
|
|
}
|
2017-01-19 13:35:13 +00:00
|
|
|
// Remove interleaved groups with gaps (currently only loads) whose memory
|
2017-02-22 18:37:36 +00:00
|
|
|
// accesses may wrap around. We have to revisit the getPtrStride analysis,
|
|
|
|
// this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
|
2016-10-31 13:17:31 +00:00
|
|
|
// not check wrapping (see documentation there).
|
2017-02-22 18:37:36 +00:00
|
|
|
// FORNOW we use Assume=false;
|
|
|
|
// TODO: Change to Assume=true but making sure we don't exceed the threshold
|
2016-10-31 13:17:31 +00:00
|
|
|
// of runtime SCEV assumptions checks (thereby potentially failing to
|
2017-02-22 18:37:36 +00:00
|
|
|
// vectorize altogether).
|
2016-10-31 13:17:31 +00:00
|
|
|
// Additional optional optimizations:
|
2017-02-22 18:37:36 +00:00
|
|
|
// TODO: If we are peeling the loop and we know that the first pointer doesn't
|
2016-10-31 13:17:31 +00:00
|
|
|
// wrap then we can deduce that all pointers in the group don't wrap.
|
2017-02-22 18:37:36 +00:00
|
|
|
// This means that we can forcefully peel the loop in order to only have to
|
|
|
|
// check the first pointer for no-wrap. When we'll change to use Assume=true
|
2016-10-31 13:17:31 +00:00
|
|
|
// we'll only need at most one runtime check per interleaved group.
|
|
|
|
for (InterleaveGroup *Group : LoadGroups) {
|
|
|
|
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
|
2017-02-22 18:37:36 +00:00
|
|
|
// load would wrap around the address space we would do a memory access at
|
|
|
|
// nullptr even without the transformation.
|
|
|
|
if (Group->getNumMembers() == Group->getFactor())
|
2016-10-31 13:17:31 +00:00
|
|
|
continue;
|
|
|
|
|
2017-02-22 18:37:36 +00:00
|
|
|
// Case 2: If first and last members of the group don't wrap this implies
|
2016-10-31 13:17:31 +00:00
|
|
|
// that all the pointers in the group don't wrap.
|
|
|
|
// So we check only group member 0 (which is always guaranteed to exist),
|
2017-02-22 18:37:36 +00:00
|
|
|
// and group member Factor - 1; If the latter doesn't exist we rely on
|
2016-10-31 13:17:31 +00:00
|
|
|
// peeling (if it is a non-reveresed accsess -- see Case 3).
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
|
2017-02-22 18:37:36 +00:00
|
|
|
if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
|
2016-10-31 13:17:31 +00:00
|
|
|
/*ShouldCheckWrap=*/true)) {
|
|
|
|
DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
|
|
|
|
"first group member potentially pointer-wrapping.\n");
|
|
|
|
releaseGroup(Group);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
|
|
|
|
if (LastMember) {
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
|
2016-10-31 13:17:31 +00:00
|
|
|
/*ShouldCheckWrap=*/true)) {
|
|
|
|
DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
|
|
|
|
"last group member potentially pointer-wrapping.\n");
|
|
|
|
releaseGroup(Group);
|
|
|
|
}
|
2017-02-08 19:25:23 +00:00
|
|
|
} else {
|
2016-10-31 13:17:31 +00:00
|
|
|
// Case 3: A non-reversed interleaved load group with gaps: We need
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// to execute at least one scalar epilogue iteration. This will ensure
|
2016-10-31 13:17:31 +00:00
|
|
|
// we don't speculatively access memory out-of-bounds. We only need
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
// to look for a member at index factor - 1, since every group must have
|
2016-10-31 13:17:31 +00:00
|
|
|
// a member at index zero.
|
2016-04-27 18:21:36 +00:00
|
|
|
if (Group->isReverse()) {
|
2017-09-14 07:40:02 +00:00
|
|
|
DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
|
|
|
|
"a reverse access with gaps.\n");
|
2016-04-27 18:21:36 +00:00
|
|
|
releaseGroup(Group);
|
2016-10-31 13:17:31 +00:00
|
|
|
continue;
|
2016-04-27 18:21:36 +00:00
|
|
|
}
|
2016-10-31 13:17:31 +00:00
|
|
|
DEBUG(dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
|
|
|
|
RequiresScalarEpilogue = true;
|
2016-04-27 18:21:36 +00:00
|
|
|
}
|
2016-10-31 13:17:31 +00:00
|
|
|
}
|
[LoopVectorize] Teach Loop Vectorizor about interleaved memory accesses.
Interleaved memory accesses are grouped and vectorized into vector load/store and shufflevector.
E.g. for (i = 0; i < N; i+=2) {
a = A[i]; // load of even element
b = A[i+1]; // load of odd element
... // operations on a, b, c, d
A[i] = c; // store of even element
A[i+1] = d; // store of odd element
}
The loads of even and odd elements are identified as an interleave load group, which will be transfered into vectorized IRs like:
%wide.vec = load <8 x i32>, <8 x i32>* %ptr
%vec.even = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%vec.odd = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
The stores of even and odd elements are identified as an interleave store group, which will be transfered into vectorized IRs like:
%interleaved.vec = shufflevector <4 x i32> %vec.even, %vec.odd, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
This optimization is currently disabled by defaut. To try it by adding '-enable-interleaved-mem-accesses=true'.
llvm-svn: 239291
2015-06-08 06:39:56 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
Optional<unsigned> LoopVectorizationCostModel::computeMaxVF(bool OptForSize) {
|
2017-08-02 21:43:08 +00:00
|
|
|
if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
|
|
|
|
// TODO: It may by useful to do since it's still likely to be dynamically
|
|
|
|
// uniform if the target can skip.
|
|
|
|
DEBUG(dbgs() << "LV: Not inserting runtime ptr check for divergent target");
|
|
|
|
|
|
|
|
ORE->emit(
|
|
|
|
createMissedAnalysis("CantVersionLoopWithDivergentTarget")
|
|
|
|
<< "runtime pointer checks needed. Not enabled for divergent target");
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2017-09-12 16:32:45 +00:00
|
|
|
unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
|
2017-03-14 13:07:04 +00:00
|
|
|
if (!OptForSize) // Remaining checks deal with scalar loop when OptForSize.
|
2017-09-12 16:32:45 +00:00
|
|
|
return computeFeasibleMaxVF(OptForSize, TC);
|
2017-03-14 13:07:04 +00:00
|
|
|
|
|
|
|
if (Legal->getRuntimePointerChecking()->Need) {
|
2016-09-29 17:15:48 +00:00
|
|
|
ORE->emit(createMissedAnalysis("CantVersionLoopWithOptForSize")
|
|
|
|
<< "runtime pointer checks needed. Enable vectorization of this "
|
|
|
|
"loop with '#pragma clang loop vectorize(enable)' when "
|
|
|
|
"compiling with -Os/-Oz");
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs()
|
|
|
|
<< "LV: Aborting. Runtime ptr check is required with -Os/-Oz.\n");
|
2017-03-14 13:07:04 +00:00
|
|
|
return None;
|
2012-12-12 01:11:46 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
// If we optimize the program for size, avoid creating the tail loop.
|
|
|
|
DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
|
|
|
|
|
|
|
|
// If we don't know the precise trip count, don't try to vectorize.
|
|
|
|
if (TC < 2) {
|
|
|
|
ORE->emit(
|
|
|
|
createMissedAnalysis("UnknownLoopCountComplexCFG")
|
|
|
|
<< "unable to calculate the loop count due to complex control flow");
|
|
|
|
DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
|
|
|
|
return None;
|
2014-01-28 01:01:53 +00:00
|
|
|
}
|
|
|
|
|
2017-09-04 08:35:13 +00:00
|
|
|
unsigned MaxVF = computeFeasibleMaxVF(OptForSize, TC);
|
2017-03-14 13:07:04 +00:00
|
|
|
|
|
|
|
if (TC % MaxVF != 0) {
|
|
|
|
// If the trip count that we found modulo the vectorization factor is not
|
|
|
|
// zero then we require a tail.
|
|
|
|
// FIXME: look for a smaller MaxVF that does divide TC rather than give up.
|
|
|
|
// FIXME: return None if loop requiresScalarEpilog(<MaxVF>), or look for a
|
|
|
|
// smaller MaxVF that does not require a scalar epilog.
|
|
|
|
|
|
|
|
ORE->emit(createMissedAnalysis("NoTailLoopWithOptForSize")
|
|
|
|
<< "cannot optimize for size and vectorize at the "
|
|
|
|
"same time. Enable vectorization of this loop "
|
|
|
|
"with '#pragma clang loop vectorize(enable)' "
|
|
|
|
"when compiling with -Os/-Oz");
|
|
|
|
DEBUG(dbgs() << "LV: Aborting. A tail loop is required with -Os/-Oz.\n");
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MaxVF;
|
|
|
|
}
|
|
|
|
|
2017-09-04 08:35:13 +00:00
|
|
|
unsigned
|
|
|
|
LoopVectorizationCostModel::computeFeasibleMaxVF(bool OptForSize,
|
|
|
|
unsigned ConstTripCount) {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
|
2015-11-02 22:53:48 +00:00
|
|
|
unsigned SmallestType, WidestType;
|
|
|
|
std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
|
2013-01-09 22:29:00 +00:00
|
|
|
unsigned WidestRegister = TTI.getRegisterBitWidth(true);
|
2017-09-14 07:40:02 +00:00
|
|
|
|
|
|
|
// Get the maximum safe dependence distance in bits computed by LAA.
|
|
|
|
// It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
|
|
|
|
// the memory accesses that is most restrictive (involved in the smallest
|
|
|
|
// dependence distance).
|
|
|
|
unsigned MaxSafeRegisterWidth = Legal->getMaxSafeRegisterWidth();
|
|
|
|
|
|
|
|
WidestRegister = std::min(WidestRegister, MaxSafeRegisterWidth);
|
|
|
|
|
2013-01-09 22:29:00 +00:00
|
|
|
unsigned MaxVectorSize = WidestRegister / WidestType;
|
2015-11-02 22:53:48 +00:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType << " / "
|
|
|
|
<< WidestType << " bits.\n");
|
2017-09-14 07:40:02 +00:00
|
|
|
DEBUG(dbgs() << "LV: The Widest register safe to use is: " << WidestRegister
|
2016-05-05 00:54:54 +00:00
|
|
|
<< " bits.\n");
|
2013-01-09 22:29:00 +00:00
|
|
|
|
2017-09-12 16:32:45 +00:00
|
|
|
assert(MaxVectorSize <= 64 && "Did not expect to pack so many elements"
|
|
|
|
" into one vector!");
|
2013-01-09 22:29:00 +00:00
|
|
|
if (MaxVectorSize == 0) {
|
|
|
|
DEBUG(dbgs() << "LV: The target has no vector registers.\n");
|
2013-01-15 18:25:16 +00:00
|
|
|
MaxVectorSize = 1;
|
2017-09-13 19:35:45 +00:00
|
|
|
return MaxVectorSize;
|
2017-09-04 08:35:13 +00:00
|
|
|
} else if (ConstTripCount && ConstTripCount < MaxVectorSize &&
|
2017-09-12 16:32:45 +00:00
|
|
|
isPowerOf2_32(ConstTripCount)) {
|
|
|
|
// We need to clamp the VF to be the ConstTripCount. There is no point in
|
|
|
|
// choosing a higher viable VF as done in the loop below.
|
|
|
|
DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
|
|
|
|
<< ConstTripCount << "\n");
|
2017-09-04 08:35:13 +00:00
|
|
|
MaxVectorSize = ConstTripCount;
|
2017-09-12 16:32:45 +00:00
|
|
|
return MaxVectorSize;
|
|
|
|
}
|
2013-01-11 07:11:59 +00:00
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
unsigned MaxVF = MaxVectorSize;
|
2018-03-27 16:14:11 +00:00
|
|
|
if (TTI.shouldMaximizeVectorBandwidth(OptForSize) ||
|
|
|
|
(MaximizeBandwidth && !OptForSize)) {
|
2017-09-13 19:35:45 +00:00
|
|
|
// Collect all viable vectorization factors larger than the default MaxVF
|
|
|
|
// (i.e. MaxVectorSize).
|
2015-11-02 22:53:48 +00:00
|
|
|
SmallVector<unsigned, 8> VFs;
|
|
|
|
unsigned NewMaxVectorSize = WidestRegister / SmallestType;
|
2017-09-13 19:35:45 +00:00
|
|
|
for (unsigned VS = MaxVectorSize * 2; VS <= NewMaxVectorSize; VS *= 2)
|
2015-11-02 22:53:48 +00:00
|
|
|
VFs.push_back(VS);
|
|
|
|
|
|
|
|
// For each VF calculate its register usage.
|
|
|
|
auto RUs = calculateRegisterUsage(VFs);
|
|
|
|
|
|
|
|
// Select the largest VF which doesn't require more registers than existing
|
|
|
|
// ones.
|
|
|
|
unsigned TargetNumRegisters = TTI.getNumberOfRegisters(true);
|
|
|
|
for (int i = RUs.size() - 1; i >= 0; --i) {
|
|
|
|
if (RUs[i].MaxLocalUsers <= TargetNumRegisters) {
|
2017-03-14 13:07:04 +00:00
|
|
|
MaxVF = VFs[i];
|
2015-11-02 22:53:48 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-14 13:07:04 +00:00
|
|
|
return MaxVF;
|
|
|
|
}
|
2012-12-12 01:11:46 +00:00
|
|
|
|
2018-01-07 16:02:58 +00:00
|
|
|
VectorizationFactor
|
2017-03-14 13:07:04 +00:00
|
|
|
LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) {
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
float Cost = expectedCost(1).first;
|
2014-04-29 08:55:11 +00:00
|
|
|
const float ScalarCost = Cost;
|
2012-10-24 20:36:32 +00:00
|
|
|
unsigned Width = 1;
|
2014-04-29 09:45:08 +00:00
|
|
|
DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n");
|
2014-04-29 08:55:11 +00:00
|
|
|
|
2014-08-02 00:14:03 +00:00
|
|
|
bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
|
2014-04-29 08:55:11 +00:00
|
|
|
// Ignore scalar width, because the user explicitly wants vectorization.
|
2017-03-14 13:07:04 +00:00
|
|
|
if (ForceVectorization && MaxVF > 1) {
|
2014-04-29 08:55:11 +00:00
|
|
|
Width = 2;
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
Cost = expectedCost(Width).first / (float)Width;
|
2014-04-29 08:55:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
for (unsigned i = 2; i <= MaxVF; i *= 2) {
|
2012-10-24 20:36:32 +00:00
|
|
|
// Notice that the vector loop needs to be executed less times, so
|
|
|
|
// we need to divide the cost of the vector loops by the width of
|
|
|
|
// the vector elements.
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
VectorizationCostTy C = expectedCost(i);
|
|
|
|
float VectorCost = C.first / (float)i;
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Vector loop of width " << i
|
|
|
|
<< " costs: " << (int)VectorCost << ".\n");
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
if (!C.second && !ForceVectorization) {
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(
|
|
|
|
dbgs() << "LV: Not considering vector loop of width " << i
|
|
|
|
<< " because it will not generate any vector instructions.\n");
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
continue;
|
|
|
|
}
|
2012-10-24 20:36:32 +00:00
|
|
|
if (VectorCost < Cost) {
|
|
|
|
Cost = VectorCost;
|
|
|
|
Width = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
if (!EnableCondStoresVectorization && NumPredStores) {
|
|
|
|
ORE->emit(createMissedAnalysis("ConditionalStore")
|
|
|
|
<< "store that is conditionally executed prevents vectorization");
|
|
|
|
DEBUG(dbgs() << "LV: No vectorization. There are conditional stores.\n");
|
|
|
|
Width = 1;
|
|
|
|
Cost = ScalarCost;
|
|
|
|
}
|
|
|
|
|
2014-04-29 08:55:11 +00:00
|
|
|
DEBUG(if (ForceVectorization && Width > 1 && Cost >= ScalarCost) dbgs()
|
|
|
|
<< "LV: Vectorization seems to be not beneficial, "
|
|
|
|
<< "but was forced by a user.\n");
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Selecting VF: " << Width << ".\n");
|
2017-03-14 13:07:04 +00:00
|
|
|
VectorizationFactor Factor = {Width, (unsigned)(Width * Cost)};
|
2013-01-28 16:02:45 +00:00
|
|
|
return Factor;
|
2012-10-24 20:36:32 +00:00
|
|
|
}
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
std::pair<unsigned, unsigned>
|
|
|
|
LoopVectorizationCostModel::getSmallestAndWidestTypes() {
|
|
|
|
unsigned MinWidth = -1U;
|
2013-01-09 22:29:00 +00:00
|
|
|
unsigned MaxWidth = 8;
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = TheFunction->getParent()->getDataLayout();
|
2013-01-09 22:29:00 +00:00
|
|
|
|
|
|
|
// For each block.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
2013-01-09 22:29:00 +00:00
|
|
|
// For each instruction in the loop.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
Type *T = I.getType();
|
2013-01-11 07:11:59 +00:00
|
|
|
|
2015-08-27 14:12:17 +00:00
|
|
|
// Skip ignored values.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (ValuesToIgnore.count(&I))
|
2014-10-14 22:59:49 +00:00
|
|
|
continue;
|
|
|
|
|
2013-01-11 07:11:59 +00:00
|
|
|
// Only examine Loads, Stores and PHINodes.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
|
2013-01-09 22:29:00 +00:00
|
|
|
continue;
|
|
|
|
|
2015-08-27 14:12:17 +00:00
|
|
|
// Examine PHI nodes that are reduction variables. Update the type to
|
|
|
|
// account for the recurrence type.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (auto *PN = dyn_cast<PHINode>(&I)) {
|
2015-11-19 14:19:06 +00:00
|
|
|
if (!Legal->isReductionVariable(PN))
|
2013-01-11 07:11:59 +00:00
|
|
|
continue;
|
2015-08-27 14:12:17 +00:00
|
|
|
RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[PN];
|
|
|
|
T = RdxDesc.getRecurrenceType();
|
|
|
|
}
|
2013-01-09 22:29:00 +00:00
|
|
|
|
2013-01-11 07:11:59 +00:00
|
|
|
// Examine the stored values.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (auto *ST = dyn_cast<StoreInst>(&I))
|
2013-01-09 22:29:00 +00:00
|
|
|
T = ST->getValueOperand()->getType();
|
|
|
|
|
2013-02-05 15:08:02 +00:00
|
|
|
// Ignore loaded pointer types and stored pointer types that are not
|
2017-03-02 13:55:05 +00:00
|
|
|
// vectorizable.
|
|
|
|
//
|
|
|
|
// FIXME: The check here attempts to predict whether a load or store will
|
|
|
|
// be vectorized. We only know this for certain after a VF has
|
|
|
|
// been selected. Here, we assume that if an access can be
|
|
|
|
// vectorized, it will be. We should also look at extending this
|
|
|
|
// optimization to non-pointer types.
|
|
|
|
//
|
|
|
|
if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
|
2018-02-26 11:06:36 +00:00
|
|
|
!Legal->isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
|
2013-02-13 21:12:29 +00:00
|
|
|
continue;
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
MinWidth = std::min(MinWidth,
|
|
|
|
(unsigned)DL.getTypeSizeInBits(T->getScalarType()));
|
2013-02-13 21:12:29 +00:00
|
|
|
MaxWidth = std::max(MaxWidth,
|
2015-03-10 02:37:25 +00:00
|
|
|
(unsigned)DL.getTypeSizeInBits(T->getScalarType()));
|
2013-01-09 22:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
return {MinWidth, MaxWidth};
|
2013-01-09 22:29:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize,
|
|
|
|
unsigned VF,
|
|
|
|
unsigned LoopCost) {
|
|
|
|
// -- The interleave heuristics --
|
|
|
|
// We interleave the loop in order to expose ILP and reduce the loop overhead.
|
2013-01-20 05:24:29 +00:00
|
|
|
// There are many micro-architectural considerations that we can't predict
|
2014-08-26 00:59:15 +00:00
|
|
|
// at this level. For example, frontend pressure (on decode or fetch) due to
|
2013-01-20 05:24:29 +00:00
|
|
|
// code size, or the number and capabilities of the execution ports.
|
|
|
|
//
|
2015-07-11 00:31:11 +00:00
|
|
|
// We use the following heuristics to select the interleave count:
|
|
|
|
// 1. If the code has reductions, then we interleave to break the cross
|
2013-01-20 05:24:29 +00:00
|
|
|
// iteration dependency.
|
2015-07-11 00:31:11 +00:00
|
|
|
// 2. If the loop is really small, then we interleave to reduce the loop
|
2013-01-20 05:24:29 +00:00
|
|
|
// overhead.
|
2015-07-11 00:31:11 +00:00
|
|
|
// 3. We don't interleave if we think that we will spill registers to memory
|
|
|
|
// due to the increased register pressure.
|
2013-01-20 05:24:29 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// When we optimize for size, we don't interleave.
|
2013-01-04 17:48:25 +00:00
|
|
|
if (OptForSize)
|
|
|
|
return 1;
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// We used the distance for the interleave count.
|
2013-06-24 12:09:15 +00:00
|
|
|
if (Legal->getMaxSafeDepDistBytes() != -1U)
|
|
|
|
return 1;
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Do not interleave loops with a relatively small trip count.
|
2016-05-19 20:38:03 +00:00
|
|
|
unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
|
2015-07-11 00:31:11 +00:00
|
|
|
if (TC > 1 && TC < TinyTripCountInterleaveThreshold)
|
2013-01-07 21:54:51 +00:00
|
|
|
return 1;
|
|
|
|
|
2014-01-27 11:12:14 +00:00
|
|
|
unsigned TargetNumRegisters = TTI.getNumberOfRegisters(VF > 1);
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
|
|
|
|
<< " registers\n");
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2014-01-27 11:12:19 +00:00
|
|
|
if (VF == 1) {
|
|
|
|
if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
|
|
|
|
TargetNumRegisters = ForceTargetNumScalarRegs;
|
|
|
|
} else {
|
|
|
|
if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
|
|
|
|
TargetNumRegisters = ForceTargetNumVectorRegs;
|
|
|
|
}
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
RegisterUsage R = calculateRegisterUsage({VF})[0];
|
2013-01-04 17:48:25 +00:00
|
|
|
// We divide by these constants so assume that we have at least one
|
|
|
|
// instruction that uses at least one register.
|
|
|
|
R.MaxLocalUsers = std::max(R.MaxLocalUsers, 1U);
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// We calculate the interleave count using the following formula.
|
2013-01-04 17:48:25 +00:00
|
|
|
// Subtract the number of loop invariants from the number of available
|
2015-07-11 00:31:11 +00:00
|
|
|
// registers. These registers are used by all of the interleaved instances.
|
2013-01-04 17:48:25 +00:00
|
|
|
// Next, divide the remaining registers by the number of registers that is
|
|
|
|
// required by the loop, in order to estimate how many parallel instances
|
2014-01-27 11:12:24 +00:00
|
|
|
// fit without causing spills. All of this is rounded down if necessary to be
|
2015-07-11 00:31:11 +00:00
|
|
|
// a power of two. We want power of two interleave count to simplify any
|
2014-01-27 11:12:24 +00:00
|
|
|
// addressing operations or alignment considerations.
|
2015-07-11 00:31:11 +00:00
|
|
|
unsigned IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs) /
|
2014-01-27 11:12:24 +00:00
|
|
|
R.MaxLocalUsers);
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Don't count the induction variable as interleaved.
|
2014-01-29 04:36:12 +00:00
|
|
|
if (EnableIndVarRegisterHeur)
|
2015-07-11 00:31:11 +00:00
|
|
|
IC = PowerOf2Floor((TargetNumRegisters - R.LoopInvariantRegs - 1) /
|
2014-01-29 04:36:12 +00:00
|
|
|
std::max(1U, (R.MaxLocalUsers - 1)));
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Clamp the interleave ranges to reasonable counts.
|
|
|
|
unsigned MaxInterleaveCount = TTI.getMaxInterleaveFactor(VF);
|
2013-01-20 05:24:29 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Check if the user has overridden the max.
|
2014-01-27 11:12:19 +00:00
|
|
|
if (VF == 1) {
|
2014-09-10 17:58:16 +00:00
|
|
|
if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
|
2015-07-11 00:31:11 +00:00
|
|
|
MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
|
2014-01-27 11:12:19 +00:00
|
|
|
} else {
|
2014-09-10 17:58:16 +00:00
|
|
|
if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
|
2015-07-11 00:31:11 +00:00
|
|
|
MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
|
2014-01-27 11:12:19 +00:00
|
|
|
}
|
|
|
|
|
2013-01-20 05:24:29 +00:00
|
|
|
// If we did not calculate the cost for VF (because the user selected the VF)
|
|
|
|
// then we calculate the cost of VF here.
|
|
|
|
if (LoopCost == 0)
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
LoopCost = expectedCost(VF).first;
|
2013-01-20 05:24:29 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Clamp the calculated IC to be between the 1 and the max interleave count
|
2013-01-20 05:24:29 +00:00
|
|
|
// that the target allows.
|
2015-07-11 00:31:11 +00:00
|
|
|
if (IC > MaxInterleaveCount)
|
|
|
|
IC = MaxInterleaveCount;
|
|
|
|
else if (IC < 1)
|
|
|
|
IC = 1;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Interleave if we vectorized this loop and there is a reduction that could
|
|
|
|
// benefit from interleaving.
|
2017-10-12 23:30:03 +00:00
|
|
|
if (VF > 1 && !Legal->getReductionVars()->empty()) {
|
2015-07-11 00:31:11 +00:00
|
|
|
DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
|
|
|
|
return IC;
|
2013-01-20 05:24:29 +00:00
|
|
|
}
|
|
|
|
|
2014-01-31 10:51:08 +00:00
|
|
|
// Note that if we've already vectorized the loop we will have done the
|
2015-07-11 00:31:11 +00:00
|
|
|
// runtime check and so interleaving won't require further checks.
|
|
|
|
bool InterleavingRequiresRuntimePointerCheck =
|
2015-07-14 22:32:44 +00:00
|
|
|
(VF == 1 && Legal->getRuntimePointerChecking()->Need);
|
2014-01-31 10:51:08 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// We want to interleave small loops in order to reduce the loop overhead and
|
2014-01-31 10:51:08 +00:00
|
|
|
// potentially expose ILP opportunities.
|
|
|
|
DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n');
|
2015-07-11 00:31:11 +00:00
|
|
|
if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
|
2014-01-31 10:51:08 +00:00
|
|
|
// We assume that the cost overhead is 1 and we use the cost model
|
2015-07-11 00:31:11 +00:00
|
|
|
// to estimate the cost of the loop and interleave until the cost of the
|
2014-01-31 10:51:08 +00:00
|
|
|
// loop overhead is about 5% of the cost of the loop.
|
2017-05-16 15:50:30 +00:00
|
|
|
unsigned SmallIC =
|
|
|
|
std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
|
2014-01-31 10:51:08 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Interleave until store/load ports (estimated by max interleave count) are
|
2014-01-28 01:01:53 +00:00
|
|
|
// saturated.
|
2015-02-01 16:56:02 +00:00
|
|
|
unsigned NumStores = Legal->getNumStores();
|
|
|
|
unsigned NumLoads = Legal->getNumLoads();
|
2015-07-11 00:31:11 +00:00
|
|
|
unsigned StoresIC = IC / (NumStores ? NumStores : 1);
|
|
|
|
unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
|
2014-01-31 10:51:08 +00:00
|
|
|
|
2014-08-20 23:53:52 +00:00
|
|
|
// If we have a scalar reduction (vector reductions are already dealt with
|
|
|
|
// by this point), we can increase the critical path length if the loop
|
2015-07-11 00:31:11 +00:00
|
|
|
// we're interleaving is inside another loop. Limit, by default to 2, so the
|
2014-08-20 23:53:52 +00:00
|
|
|
// critical path only gets increased by one reduction operation.
|
2017-10-12 23:30:03 +00:00
|
|
|
if (!Legal->getReductionVars()->empty() && TheLoop->getLoopDepth() > 1) {
|
2015-07-11 00:31:11 +00:00
|
|
|
unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
|
|
|
|
SmallIC = std::min(SmallIC, F);
|
|
|
|
StoresIC = std::min(StoresIC, F);
|
|
|
|
LoadsIC = std::min(LoadsIC, F);
|
2014-08-20 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
if (EnableLoadStoreRuntimeInterleave &&
|
|
|
|
std::max(StoresIC, LoadsIC) > SmallIC) {
|
|
|
|
DEBUG(dbgs() << "LV: Interleaving to saturate store or load ports.\n");
|
|
|
|
return std::max(StoresIC, LoadsIC);
|
2014-01-31 10:51:08 +00:00
|
|
|
}
|
2014-01-28 01:01:53 +00:00
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
|
|
|
|
return SmallIC;
|
2013-01-20 05:24:29 +00:00
|
|
|
}
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
// Interleave if this is a large loop (small loops are already dealt with by
|
2015-11-19 14:19:06 +00:00
|
|
|
// this point) that could benefit from interleaving.
|
2017-10-12 23:30:03 +00:00
|
|
|
bool HasReductions = !Legal->getReductionVars()->empty();
|
2015-03-06 23:12:04 +00:00
|
|
|
if (TTI.enableAggressiveInterleaving(HasReductions)) {
|
2015-07-11 00:31:11 +00:00
|
|
|
DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
|
|
|
|
return IC;
|
2015-03-06 23:12:04 +00:00
|
|
|
}
|
|
|
|
|
2015-07-11 00:31:11 +00:00
|
|
|
DEBUG(dbgs() << "LV: Not Interleaving.\n");
|
2013-01-20 05:24:29 +00:00
|
|
|
return 1;
|
2013-01-04 17:48:25 +00:00
|
|
|
}
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
|
2016-04-29 07:09:48 +00:00
|
|
|
LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<unsigned> VFs) {
|
2013-01-04 17:48:25 +00:00
|
|
|
// This function calculates the register usage by measuring the highest number
|
|
|
|
// of values that are alive at a single location. Obviously, this is a very
|
|
|
|
// rough estimation. We scan the loop in a topological order in order and
|
|
|
|
// assign a number to each instruction. We use RPO to ensure that defs are
|
|
|
|
// met before their users. We assume that each instruction that has in-loop
|
|
|
|
// users starts an interval. We record every time that an in-loop value is
|
|
|
|
// used, so we have a list of the first and last occurrences of each
|
|
|
|
// instruction. Next, we transpose this data structure into a multi map that
|
|
|
|
// holds the list of intervals that *end* at a specific location. This multi
|
|
|
|
// map allows us to perform a linear search. We scan the instructions linearly
|
|
|
|
// and record each time that a new interval starts, by placing it in a set.
|
|
|
|
// If we find this value in the multi-map then we remove it from the set.
|
|
|
|
// The max register usage is the maximum size of the set.
|
|
|
|
// We also search for instructions that are defined outside the loop, but are
|
|
|
|
// used inside the loop. We need this number separately from the max-interval
|
|
|
|
// usage number because when we unroll, loop-invariant values do not take
|
|
|
|
// more register.
|
|
|
|
LoopBlocksDFS DFS(TheLoop);
|
|
|
|
DFS.perform(LI);
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
RegisterUsage RU;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
// Each 'key' in the map opens a new interval. The values
|
|
|
|
// of the map are the index of the 'last seen' usage of the
|
|
|
|
// instruction that is the key.
|
2017-10-12 23:30:03 +00:00
|
|
|
using IntervalMap = DenseMap<Instruction *, unsigned>;
|
|
|
|
|
2013-01-04 17:48:25 +00:00
|
|
|
// Maps instruction to its index.
|
2016-05-05 00:54:54 +00:00
|
|
|
DenseMap<unsigned, Instruction *> IdxToInstr;
|
2013-01-04 17:48:25 +00:00
|
|
|
// Marks the end of each interval.
|
|
|
|
IntervalMap EndPoint;
|
|
|
|
// Saves the list of instruction indices that are used in the loop.
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallSet<Instruction *, 8> Ends;
|
2013-01-04 17:48:25 +00:00
|
|
|
// Saves the list of values that are used in the loop but are
|
|
|
|
// defined outside the loop, such as arguments and constants.
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallPtrSet<Value *, 8> LoopInvariants;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
unsigned Index = 0;
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
|
|
|
|
for (Instruction &I : *BB) {
|
2015-10-19 22:06:09 +00:00
|
|
|
IdxToInstr[Index++] = &I;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
// Save the end location of each USE.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Value *U : I.operands()) {
|
|
|
|
auto *Instr = dyn_cast<Instruction>(U);
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
// Ignore non-instruction values such as arguments, constants, etc.
|
2016-05-05 00:54:54 +00:00
|
|
|
if (!Instr)
|
|
|
|
continue;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
// If this instruction is outside the loop then record it and continue.
|
|
|
|
if (!TheLoop->contains(Instr)) {
|
|
|
|
LoopInvariants.insert(Instr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Overwrite previous end points.
|
|
|
|
EndPoint[Instr] = Index;
|
|
|
|
Ends.insert(Instr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Saves the list of intervals that end with the index in 'key'.
|
2017-10-12 23:30:03 +00:00
|
|
|
using InstrList = SmallVector<Instruction *, 2>;
|
2013-01-04 17:48:25 +00:00
|
|
|
DenseMap<unsigned, InstrList> TransposeEnds;
|
|
|
|
|
|
|
|
// Transpose the EndPoints to a list of values that end at each index.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (auto &Interval : EndPoint)
|
|
|
|
TransposeEnds[Interval.second].push_back(Interval.first);
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
SmallSet<Instruction *, 8> OpenIntervals;
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
// Get the size of the widest register.
|
|
|
|
unsigned MaxSafeDepDist = -1U;
|
|
|
|
if (Legal->getMaxSafeDepDistBytes() != -1U)
|
|
|
|
MaxSafeDepDist = Legal->getMaxSafeDepDistBytes() * 8;
|
|
|
|
unsigned WidestRegister =
|
|
|
|
std::min(TTI.getRegisterBitWidth(true), MaxSafeDepDist);
|
|
|
|
const DataLayout &DL = TheFunction->getParent()->getDataLayout();
|
|
|
|
|
|
|
|
SmallVector<RegisterUsage, 8> RUs(VFs.size());
|
|
|
|
SmallVector<unsigned, 8> MaxUsages(VFs.size(), 0);
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
|
2015-11-02 22:53:48 +00:00
|
|
|
|
|
|
|
// A lambda that gets the register usage for the given type and VF.
|
|
|
|
auto GetRegUsage = [&DL, WidestRegister](Type *Ty, unsigned VF) {
|
2016-04-29 07:09:48 +00:00
|
|
|
if (Ty->isTokenTy())
|
|
|
|
return 0U;
|
2015-11-02 22:53:48 +00:00
|
|
|
unsigned TypeSize = DL.getTypeSizeInBits(Ty->getScalarType());
|
|
|
|
return std::max<unsigned>(1, VF * TypeSize / WidestRegister);
|
|
|
|
};
|
|
|
|
|
2013-01-04 17:48:25 +00:00
|
|
|
for (unsigned int i = 0; i < Index; ++i) {
|
|
|
|
Instruction *I = IdxToInstr[i];
|
|
|
|
|
|
|
|
// Remove all of the instructions that end at this location.
|
|
|
|
InstrList &List = TransposeEnds[i];
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Instruction *ToRemove : List)
|
|
|
|
OpenIntervals.erase(ToRemove);
|
2013-01-04 17:48:25 +00:00
|
|
|
|
[LoopVectorizer] When estimating reg usage, unused insts may "end" another use
The register usage algorithm incorrectly treats instructions whose value is
not used within the loop (e.g. those that do not produce a value).
The algorithm first calculates the usages within the loop. It iterates over
the instructions in order, and records at which instruction index each use
ends (in fact, they're actually recorded against the next index, as this is
when we want to delete them from the open intervals).
The algorithm then iterates over the instructions again, adding each
instruction in turn to a list of open intervals. Instructions are then
removed from the list of open intervals when they occur in the list of uses
ended at the current index.
The problem is, instructions which are not used in the loop are skipped.
However, although they aren't used, the last use of a value may have been
recorded against that instruction index. In this case, the use is not deleted
from the open intervals, which may then bump up the estimated register usage.
This patch fixes the issue by simply moving the "is used" check after the loop
which erases the uses at the current index.
Differential Revision: https://reviews.llvm.org/D26554
llvm-svn: 286969
2016-11-15 14:27:33 +00:00
|
|
|
// Ignore instructions that are never used within the loop.
|
|
|
|
if (!Ends.count(I))
|
|
|
|
continue;
|
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
// Skip ignored values.
|
|
|
|
if (ValuesToIgnore.count(I))
|
|
|
|
continue;
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
// For each VF find the maximum usage of registers.
|
|
|
|
for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
|
|
|
|
if (VFs[j] == 1) {
|
|
|
|
MaxUsages[j] = std::max(MaxUsages[j], OpenIntervals.size());
|
|
|
|
continue;
|
|
|
|
}
|
2017-02-08 19:25:23 +00:00
|
|
|
collectUniformsAndScalars(VFs[j]);
|
2015-12-05 01:00:22 +00:00
|
|
|
// Count the number of live intervals.
|
2015-11-02 22:53:48 +00:00
|
|
|
unsigned RegUsage = 0;
|
2016-05-19 20:38:03 +00:00
|
|
|
for (auto Inst : OpenIntervals) {
|
|
|
|
// Skip ignored values for VF > 1.
|
2017-02-08 19:25:23 +00:00
|
|
|
if (VecValuesToIgnore.count(Inst) ||
|
|
|
|
isScalarAfterVectorization(Inst, VFs[j]))
|
2016-05-19 20:38:03 +00:00
|
|
|
continue;
|
2015-11-02 22:53:48 +00:00
|
|
|
RegUsage += GetRegUsage(Inst->getType(), VFs[j]);
|
2016-05-19 20:38:03 +00:00
|
|
|
}
|
2015-11-02 22:53:48 +00:00
|
|
|
MaxUsages[j] = std::max(MaxUsages[j], RegUsage);
|
|
|
|
}
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
|
|
|
|
<< OpenIntervals.size() << '\n');
|
2013-01-04 17:48:25 +00:00
|
|
|
|
|
|
|
// Add the current instruction to the list of open intervals.
|
|
|
|
OpenIntervals.insert(I);
|
|
|
|
}
|
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
|
|
|
|
unsigned Invariant = 0;
|
|
|
|
if (VFs[i] == 1)
|
|
|
|
Invariant = LoopInvariants.size();
|
|
|
|
else {
|
|
|
|
for (auto Inst : LoopInvariants)
|
|
|
|
Invariant += GetRegUsage(Inst->getType(), VFs[i]);
|
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV(REG): VF = " << VFs[i] << '\n');
|
2015-11-02 22:53:48 +00:00
|
|
|
DEBUG(dbgs() << "LV(REG): Found max usage: " << MaxUsages[i] << '\n');
|
|
|
|
DEBUG(dbgs() << "LV(REG): Found invariant usage: " << Invariant << '\n');
|
|
|
|
|
|
|
|
RU.LoopInvariantRegs = Invariant;
|
|
|
|
RU.MaxLocalUsers = MaxUsages[i];
|
|
|
|
RUs[i] = RU;
|
|
|
|
}
|
2013-01-04 17:48:25 +00:00
|
|
|
|
2015-11-02 22:53:48 +00:00
|
|
|
return RUs;
|
2013-01-04 17:48:25 +00:00
|
|
|
}
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
|
|
|
|
// TODO: Cost model for emulated masked load/store is completely
|
|
|
|
// broken. This hack guides the cost model to use an artificially
|
|
|
|
// high enough value to practically disable vectorization with such
|
|
|
|
// operations, except where previously deployed legality hack allowed
|
|
|
|
// using very low cost values. This is to avoid regressions coming simply
|
|
|
|
// from moving "masked load/store" check from legality to cost model.
|
|
|
|
// Masked Load/Gather emulation was previously never allowed.
|
|
|
|
// Limited number of Masked Store/Scatter emulation was allowed.
|
|
|
|
assert(isScalarWithPredication(I) &&
|
|
|
|
"Expecting a scalar emulated instruction");
|
|
|
|
return isa<LoadInst>(I) ||
|
|
|
|
(isa<StoreInst>(I) &&
|
|
|
|
NumPredStores > NumberOfStoresToPredicate);
|
|
|
|
}
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
void LoopVectorizationCostModel::collectInstsToScalarize(unsigned VF) {
|
|
|
|
// If we aren't vectorizing the loop, or if we've already collected the
|
|
|
|
// instructions to scalarize, there's nothing to do. Collection may already
|
|
|
|
// have occurred if we have a user-selected VF and are now computing the
|
|
|
|
// expected cost for interleaving.
|
|
|
|
if (VF < 2 || InstsToScalarize.count(VF))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Initialize a mapping for VF in InstsToScalalarize. If we find that it's
|
|
|
|
// not profitable to scalarize any instructions, the presence of VF in the
|
|
|
|
// map will indicate that we've analyzed it already.
|
|
|
|
ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
|
|
|
|
|
|
|
|
// Find all the instructions that are scalar with predication in the loop and
|
|
|
|
// determine if it would be better to not if-convert the blocks they are in.
|
|
|
|
// If so, we also record the instructions to scalarize.
|
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
|
|
|
if (!Legal->blockNeedsPredication(BB))
|
|
|
|
continue;
|
|
|
|
for (Instruction &I : *BB)
|
2018-02-26 11:06:36 +00:00
|
|
|
if (isScalarWithPredication(&I)) {
|
2016-12-07 15:03:32 +00:00
|
|
|
ScalarCostsTy ScalarCosts;
|
2018-02-26 11:06:36 +00:00
|
|
|
// Do not apply discount logic if hacked cost is needed
|
|
|
|
// for emulated masked memrefs.
|
|
|
|
if (!useEmulatedMaskMemRefHack(&I) &&
|
|
|
|
computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
|
2016-12-07 15:03:32 +00:00
|
|
|
ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
|
2017-04-12 13:13:15 +00:00
|
|
|
// Remember that BB will remain after vectorization.
|
|
|
|
PredicatedBBsAfterVectorization.insert(BB);
|
2016-12-07 15:03:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int LoopVectorizationCostModel::computePredInstDiscount(
|
|
|
|
Instruction *PredInst, DenseMap<Instruction *, unsigned> &ScalarCosts,
|
|
|
|
unsigned VF) {
|
2017-02-08 19:25:23 +00:00
|
|
|
assert(!isUniformAfterVectorization(PredInst, VF) &&
|
2016-12-07 15:03:32 +00:00
|
|
|
"Instruction marked uniform-after-vectorization will be predicated");
|
|
|
|
|
|
|
|
// Initialize the discount to zero, meaning that the scalar version and the
|
|
|
|
// vector version cost the same.
|
|
|
|
int Discount = 0;
|
|
|
|
|
|
|
|
// Holds instructions to analyze. The instructions we visit are mapped in
|
|
|
|
// ScalarCosts. Those instructions are the ones that would be scalarized if
|
|
|
|
// we find that the scalar version costs less.
|
|
|
|
SmallVector<Instruction *, 8> Worklist;
|
|
|
|
|
|
|
|
// Returns true if the given instruction can be scalarized.
|
|
|
|
auto canBeScalarized = [&](Instruction *I) -> bool {
|
|
|
|
// We only attempt to scalarize instructions forming a single-use chain
|
|
|
|
// from the original predicated block that would otherwise be vectorized.
|
|
|
|
// Although not strictly necessary, we give up on instructions we know will
|
|
|
|
// already be scalar to avoid traversing chains that are unlikely to be
|
|
|
|
// beneficial.
|
|
|
|
if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
|
2017-02-08 19:25:23 +00:00
|
|
|
isScalarAfterVectorization(I, VF))
|
2016-12-07 15:03:32 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the instruction is scalar with predication, it will be analyzed
|
|
|
|
// separately. We ignore it within the context of PredInst.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (isScalarWithPredication(I))
|
2016-12-07 15:03:32 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If any of the instruction's operands are uniform after vectorization,
|
|
|
|
// the instruction cannot be scalarized. This prevents, for example, a
|
|
|
|
// masked load from being scalarized.
|
|
|
|
//
|
|
|
|
// We assume we will only emit a value for lane zero of an instruction
|
|
|
|
// marked uniform after vectorization, rather than VF identical values.
|
|
|
|
// Thus, if we scalarize an instruction that uses a uniform, we would
|
|
|
|
// create uses of values corresponding to the lanes we aren't emitting code
|
|
|
|
// for. This behavior can be changed by allowing getScalarValue to clone
|
|
|
|
// the lane zero values for uniforms rather than asserting.
|
|
|
|
for (Use &U : I->operands())
|
|
|
|
if (auto *J = dyn_cast<Instruction>(U.get()))
|
2017-02-08 19:25:23 +00:00
|
|
|
if (isUniformAfterVectorization(J, VF))
|
2016-12-07 15:03:32 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Otherwise, we can scalarize the instruction.
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Returns true if an operand that cannot be scalarized must be extracted
|
|
|
|
// from a vector. We will account for this scalarization overhead below. Note
|
|
|
|
// that the non-void predicated instructions are placed in their own blocks,
|
|
|
|
// and their return values are inserted into vectors. Thus, an extract would
|
|
|
|
// still be required.
|
|
|
|
auto needsExtract = [&](Instruction *I) -> bool {
|
2017-02-08 19:25:23 +00:00
|
|
|
return TheLoop->contains(I) && !isScalarAfterVectorization(I, VF);
|
2016-12-07 15:03:32 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Compute the expected cost discount from scalarizing the entire expression
|
|
|
|
// feeding the predicated instruction. We currently only consider expressions
|
|
|
|
// that are single-use instruction chains.
|
|
|
|
Worklist.push_back(PredInst);
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Instruction *I = Worklist.pop_back_val();
|
|
|
|
|
|
|
|
// If we've already analyzed the instruction, there's nothing to do.
|
|
|
|
if (ScalarCosts.count(I))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Compute the cost of the vector instruction. Note that this cost already
|
|
|
|
// includes the scalarization overhead of the predicated instruction.
|
|
|
|
unsigned VectorCost = getInstructionCost(I, VF).first;
|
|
|
|
|
|
|
|
// Compute the cost of the scalarized instruction. This cost is the cost of
|
|
|
|
// the instruction as if it wasn't if-converted and instead remained in the
|
|
|
|
// predicated block. We will scale this cost by block probability after
|
|
|
|
// computing the scalarization overhead.
|
|
|
|
unsigned ScalarCost = VF * getInstructionCost(I, 1).first;
|
|
|
|
|
|
|
|
// Compute the scalarization overhead of needed insertelement instructions
|
|
|
|
// and phi nodes.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
|
2017-01-26 07:03:25 +00:00
|
|
|
ScalarCost += TTI.getScalarizationOverhead(ToVectorTy(I->getType(), VF),
|
|
|
|
true, false);
|
2016-12-07 15:03:32 +00:00
|
|
|
ScalarCost += VF * TTI.getCFInstrCost(Instruction::PHI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the scalarization overhead of needed extractelement
|
|
|
|
// instructions. For each of the instruction's operands, if the operand can
|
|
|
|
// be scalarized, add it to the worklist; otherwise, account for the
|
|
|
|
// overhead.
|
|
|
|
for (Use &U : I->operands())
|
|
|
|
if (auto *J = dyn_cast<Instruction>(U.get())) {
|
|
|
|
assert(VectorType::isValidElementType(J->getType()) &&
|
|
|
|
"Instruction has non-scalar type");
|
|
|
|
if (canBeScalarized(J))
|
|
|
|
Worklist.push_back(J);
|
|
|
|
else if (needsExtract(J))
|
2017-01-26 07:03:25 +00:00
|
|
|
ScalarCost += TTI.getScalarizationOverhead(
|
|
|
|
ToVectorTy(J->getType(),VF), false, true);
|
2016-12-07 15:03:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scale the total scalar cost by block probability.
|
|
|
|
ScalarCost /= getReciprocalPredBlockProb();
|
|
|
|
|
|
|
|
// Compute the discount. A non-negative discount means the vector version
|
|
|
|
// of the instruction costs more, and scalarizing would be beneficial.
|
|
|
|
Discount += VectorCost - ScalarCost;
|
|
|
|
ScalarCosts[I] = ScalarCost;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Discount;
|
|
|
|
}
|
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
LoopVectorizationCostModel::VectorizationCostTy
|
|
|
|
LoopVectorizationCostModel::expectedCost(unsigned VF) {
|
|
|
|
VectorizationCostTy Cost;
|
2012-10-24 20:36:32 +00:00
|
|
|
|
2012-12-03 22:46:31 +00:00
|
|
|
// For each block.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
VectorizationCostTy BlockCost;
|
2012-12-04 00:49:28 +00:00
|
|
|
|
2012-12-03 22:46:31 +00:00
|
|
|
// For each instruction in the old loop.
|
2016-07-12 19:35:15 +00:00
|
|
|
for (Instruction &I : *BB) {
|
2013-03-09 16:27:27 +00:00
|
|
|
// Skip dbg intrinsics.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (isa<DbgInfoIntrinsic>(I))
|
2013-03-09 15:56:34 +00:00
|
|
|
continue;
|
|
|
|
|
2015-08-27 14:12:17 +00:00
|
|
|
// Skip ignored values.
|
2017-12-12 08:57:43 +00:00
|
|
|
if (ValuesToIgnore.count(&I) ||
|
|
|
|
(VF > 1 && VecValuesToIgnore.count(&I)))
|
2014-10-14 22:59:49 +00:00
|
|
|
continue;
|
|
|
|
|
2016-07-12 19:35:15 +00:00
|
|
|
VectorizationCostTy C = getInstructionCost(&I, VF);
|
2014-01-27 11:41:50 +00:00
|
|
|
|
|
|
|
// Check if we should override the cost.
|
|
|
|
if (ForceTargetInstructionCost.getNumOccurrences() > 0)
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
C.first = ForceTargetInstructionCost;
|
2014-01-27 11:41:50 +00:00
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
BlockCost.first += C.first;
|
|
|
|
BlockCost.second |= C.second;
|
2016-05-05 00:54:54 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first << " for VF "
|
2016-07-12 19:35:15 +00:00
|
|
|
<< VF << " For instruction: " << I << '\n');
|
2012-12-03 22:46:31 +00:00
|
|
|
}
|
|
|
|
|
2016-10-05 18:30:36 +00:00
|
|
|
// If we are vectorizing a predicated block, it will have been
|
|
|
|
// if-converted. This means that the block's instructions (aside from
|
|
|
|
// stores and instructions that may divide by zero) will now be
|
|
|
|
// unconditionally executed. For the scalar case, we may not always execute
|
|
|
|
// the predicated block. Thus, scale the block's cost by the probability of
|
|
|
|
// executing it.
|
2016-07-12 19:35:15 +00:00
|
|
|
if (VF == 1 && Legal->blockNeedsPredication(BB))
|
2016-10-05 18:30:36 +00:00
|
|
|
BlockCost.first /= getReciprocalPredBlockProb();
|
2012-12-04 00:49:28 +00:00
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
Cost.first += BlockCost.first;
|
|
|
|
Cost.second |= BlockCost.second;
|
2012-10-24 20:36:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
2017-01-05 14:03:41 +00:00
|
|
|
/// \brief Gets Address Access SCEV after verifying that the access pattern
|
|
|
|
/// is loop invariant except the induction variable dependence.
|
2013-07-12 19:16:02 +00:00
|
|
|
///
|
2017-01-05 14:03:41 +00:00
|
|
|
/// This SCEV can be sent to the Target in order to estimate the address
|
|
|
|
/// calculation cost.
|
|
|
|
static const SCEV *getAddressAccessSCEV(
|
|
|
|
Value *Ptr,
|
|
|
|
LoopVectorizationLegality *Legal,
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
PredicatedScalarEvolution &PSE,
|
2017-01-05 14:03:41 +00:00
|
|
|
const Loop *TheLoop) {
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
|
2013-07-12 19:16:02 +00:00
|
|
|
if (!Gep)
|
2017-01-05 14:03:41 +00:00
|
|
|
return nullptr;
|
2013-07-12 19:16:02 +00:00
|
|
|
|
|
|
|
// We are looking for a gep with all loop invariant indices except for one
|
|
|
|
// which should be an induction variable.
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
auto SE = PSE.getSE();
|
2013-07-12 19:16:02 +00:00
|
|
|
unsigned NumOperands = Gep->getNumOperands();
|
|
|
|
for (unsigned i = 1; i < NumOperands; ++i) {
|
|
|
|
Value *Opd = Gep->getOperand(i);
|
|
|
|
if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
|
|
|
|
!Legal->isInductionVariable(Opd))
|
2017-01-05 14:03:41 +00:00
|
|
|
return nullptr;
|
2013-07-12 19:16:02 +00:00
|
|
|
}
|
|
|
|
|
2017-01-05 14:03:41 +00:00
|
|
|
// Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
return PSE.getSCEV(Ptr);
|
2013-07-12 19:16:02 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 18:20:32 +00:00
|
|
|
static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
|
2015-10-24 20:16:42 +00:00
|
|
|
return Legal->hasStride(I->getOperand(0)) ||
|
|
|
|
Legal->hasStride(I->getOperand(1));
|
2014-01-10 18:20:32 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
Type *ValTy = getMemInstValueType(I);
|
|
|
|
auto SE = PSE.getSE();
|
|
|
|
|
|
|
|
unsigned Alignment = getMemInstAlignment(I);
|
|
|
|
unsigned AS = getMemInstAddressSpace(I);
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(I);
|
2017-02-08 19:25:23 +00:00
|
|
|
Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
|
|
|
|
|
|
|
|
// Figure out whether the access is strided and get the stride value
|
|
|
|
// if it's known in compile time
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
// Get the cost of the scalar memory instruction and address computation.
|
|
|
|
unsigned Cost = VF * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
|
|
|
|
|
|
|
|
Cost += VF *
|
|
|
|
TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
|
2017-04-12 11:49:08 +00:00
|
|
|
AS, I);
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
// Get the overhead of the extractelement and insertelement instructions
|
|
|
|
// we might create due to scalarization.
|
|
|
|
Cost += getScalarizationOverhead(I, VF, TTI);
|
|
|
|
|
|
|
|
// If we have a predicated store, it may not be executed for each vector
|
|
|
|
// lane. Scale the cost by the probability of executing the predicated
|
|
|
|
// block.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (isScalarWithPredication(I)) {
|
2017-02-08 19:25:23 +00:00
|
|
|
Cost /= getReciprocalPredBlockProb();
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
if (useEmulatedMaskMemRefHack(I))
|
|
|
|
// Artificially setting to a high enough value to practically disable
|
|
|
|
// vectorization with such operations.
|
|
|
|
Cost = 3000000;
|
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
Type *ValTy = getMemInstValueType(I);
|
|
|
|
Type *VectorTy = ToVectorTy(ValTy, VF);
|
|
|
|
unsigned Alignment = getMemInstAlignment(I);
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(I);
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned AS = getMemInstAddressSpace(I);
|
|
|
|
int ConsecutiveStride = Legal->isConsecutivePtr(Ptr);
|
|
|
|
|
|
|
|
assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
|
|
|
|
"Stride should be 1 or -1 for consecutive memory access");
|
|
|
|
unsigned Cost = 0;
|
|
|
|
if (Legal->isMaskRequired(I))
|
|
|
|
Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS);
|
|
|
|
else
|
2017-04-12 11:49:08 +00:00
|
|
|
Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, I);
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
bool Reverse = ConsecutiveStride < 0;
|
|
|
|
if (Reverse)
|
|
|
|
Cost += TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
|
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
LoadInst *LI = cast<LoadInst>(I);
|
|
|
|
Type *ValTy = LI->getType();
|
|
|
|
Type *VectorTy = ToVectorTy(ValTy, VF);
|
|
|
|
unsigned Alignment = LI->getAlignment();
|
|
|
|
unsigned AS = LI->getPointerAddressSpace();
|
|
|
|
|
|
|
|
return TTI.getAddressComputationCost(ValTy) +
|
|
|
|
TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS) +
|
|
|
|
TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
Type *ValTy = getMemInstValueType(I);
|
|
|
|
Type *VectorTy = ToVectorTy(ValTy, VF);
|
|
|
|
unsigned Alignment = getMemInstAlignment(I);
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(I);
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
return TTI.getAddressComputationCost(VectorTy) +
|
|
|
|
TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
|
|
|
|
Legal->isMaskRequired(I), Alignment);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
Type *ValTy = getMemInstValueType(I);
|
|
|
|
Type *VectorTy = ToVectorTy(ValTy, VF);
|
|
|
|
unsigned AS = getMemInstAddressSpace(I);
|
|
|
|
|
|
|
|
auto Group = Legal->getInterleavedAccessGroup(I);
|
|
|
|
assert(Group && "Fail to get an interleaved access group.");
|
|
|
|
|
|
|
|
unsigned InterleaveFactor = Group->getFactor();
|
|
|
|
Type *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
|
|
|
|
|
|
|
|
// Holds the indices of existing members in an interleaved load group.
|
|
|
|
// An interleaved store group doesn't need this as it doesn't allow gaps.
|
|
|
|
SmallVector<unsigned, 4> Indices;
|
|
|
|
if (isa<LoadInst>(I)) {
|
|
|
|
for (unsigned i = 0; i < InterleaveFactor; i++)
|
|
|
|
if (Group->getMember(i))
|
|
|
|
Indices.push_back(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the cost of the whole interleaved group.
|
|
|
|
unsigned Cost = TTI.getInterleavedMemoryOpCost(I->getOpcode(), WideVecTy,
|
|
|
|
Group->getFactor(), Indices,
|
|
|
|
Group->getAlignment(), AS);
|
|
|
|
|
|
|
|
if (Group->isReverse())
|
|
|
|
Cost += Group->getNumMembers() *
|
|
|
|
TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, 0);
|
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
|
|
|
|
unsigned VF) {
|
|
|
|
// Calculate scalar cost only. Vectorization cost should be ready at this
|
|
|
|
// moment.
|
|
|
|
if (VF == 1) {
|
|
|
|
Type *ValTy = getMemInstValueType(I);
|
|
|
|
unsigned Alignment = getMemInstAlignment(I);
|
2017-05-04 05:31:56 +00:00
|
|
|
unsigned AS = getMemInstAddressSpace(I);
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
return TTI.getAddressComputationCost(ValTy) +
|
2017-04-12 11:49:08 +00:00
|
|
|
TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, I);
|
2017-02-08 19:25:23 +00:00
|
|
|
}
|
|
|
|
return getWideningCost(I, VF);
|
|
|
|
}
|
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
LoopVectorizationCostModel::VectorizationCostTy
|
2012-10-24 20:36:32 +00:00
|
|
|
LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
|
2012-10-26 23:49:28 +00:00
|
|
|
// If we know that this instruction will remain uniform, check the cost of
|
|
|
|
// the scalar version.
|
2017-02-08 19:25:23 +00:00
|
|
|
if (isUniformAfterVectorization(I, VF))
|
2012-10-26 23:49:28 +00:00
|
|
|
VF = 1;
|
|
|
|
|
2016-12-07 15:03:32 +00:00
|
|
|
if (VF > 1 && isProfitableToScalarize(I, VF))
|
|
|
|
return VectorizationCostTy(InstsToScalarize[VF][I], false);
|
|
|
|
|
2017-05-24 13:42:56 +00:00
|
|
|
// Forced scalars do not have any scalarization overhead.
|
|
|
|
if (VF > 1 && ForcedScalars.count(VF) &&
|
|
|
|
ForcedScalars.find(VF)->second.count(I))
|
|
|
|
return VectorizationCostTy((getInstructionCost(I, 1).first * VF), false);
|
|
|
|
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
Type *VectorTy;
|
|
|
|
unsigned C = getInstructionCost(I, VF, VectorTy);
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
bool TypeNotScalarized =
|
2017-05-30 19:55:57 +00:00
|
|
|
VF > 1 && VectorTy->isVectorTy() && TTI.getNumberOfParts(VectorTy) < VF;
|
[LoopVectorize] Don't vectorize loops when everything will be scalarized
This change prevents the loop vectorizer from vectorizing when all of the vector
types it generates will be scalarized. I've run into this problem on the PPC's QPX
vector ISA, which only holds floating-point vector types. The loop vectorizer
will, however, happily vectorize loops with purely integer computation. Here's
an example:
LV: The Smallest and Widest types: 32 / 32 bits.
LV: The Widest register is: 256 bits.
LV: Found an estimated cost of 0 for VF 1 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 1 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 1 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 1 for VF 1 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 1 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 1 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 1 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Scalar loop costs: 3.
LV: Found an estimated cost of 0 for VF 2 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 2 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 2 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 2 for VF 2 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 2 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 2 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 2 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 2 costs: 2.
LV: Found an estimated cost of 0 for VF 4 For instruction: %indvars.iv25 = phi i64 [ 0, %entry ], [ %indvars.iv.next26, %for.body ]
LV: Found an estimated cost of 0 for VF 4 For instruction: %arrayidx = getelementptr inbounds [1600 x i32], [1600 x i32]* %a, i64 0, i64 %indvars.iv25
LV: Found an estimated cost of 0 for VF 4 For instruction: %2 = trunc i64 %indvars.iv25 to i32
LV: Found an estimated cost of 4 for VF 4 For instruction: store i32 %2, i32* %arrayidx, align 4
LV: Found an estimated cost of 1 for VF 4 For instruction: %indvars.iv.next26 = add nuw nsw i64 %indvars.iv25, 1
LV: Found an estimated cost of 1 for VF 4 For instruction: %exitcond27 = icmp eq i64 %indvars.iv.next26, 1600
LV: Found an estimated cost of 0 for VF 4 For instruction: br i1 %exitcond27, label %for.cond.cleanup, label %for.body
LV: Vector loop of width 4 costs: 1.
...
LV: Selecting VF: 8.
LV: The target has 32 registers
LV(REG): Calculating max register usage:
LV(REG): At #0 Interval # 0
LV(REG): At #1 Interval # 1
LV(REG): At #2 Interval # 2
LV(REG): At #4 Interval # 1
LV(REG): At #5 Interval # 1
LV(REG): VF = 8
The problem is that the cost model here is not wrong, exactly. Since all of
these operations are scalarized, their cost (aside from the uniform ones) are
indeed VF*(scalar cost), just as the model suggests. In fact, the larger the VF
picked, the lower the relative overhead from the loop itself (and the
induction-variable update and check), and so in a sense, picking the largest VF
here is the right thing to do.
The problem is that vectorizing like this, where all of the vectors will be
scalarized in the backend, isn't really vectorizing, but rather interleaving.
By itself, this would be okay, but then the vectorizer itself also interleaves,
and that's where the problem manifests itself. There's aren't actually enough
scalar registers to support the normal interleave factor multiplied by a factor
of VF (8 in this example). In other words, the problem with this is that our
register-pressure heuristic does not account for scalarization.
While we might want to improve our register-pressure heuristic, I don't think
this is the right motivating case for that work. Here we have a more-basic
problem: The job of the vectorizer is to vectorize things (interleaving aside),
and if the IR it generates won't generate any actual vector code, then
something is wrong. Thus, if every type looks like it will be scalarized (i.e.
will be split into VF or more parts), then don't consider that VF.
This is not a problem specific to PPC/QPX, however. The problem comes up under
SSE on x86 too, and as such, this change fixes PR26837 too. I've added Sanjay's
reduced test case from PR26837 to this commit.
Differential Revision: http://reviews.llvm.org/D18537
llvm-svn: 264904
2016-03-30 19:37:08 +00:00
|
|
|
return VectorizationCostTy(C, TypeNotScalarized);
|
|
|
|
}
|
|
|
|
|
2017-02-08 19:25:23 +00:00
|
|
|
void LoopVectorizationCostModel::setCostBasedWideningDecision(unsigned VF) {
|
|
|
|
if (VF == 1)
|
|
|
|
return;
|
2018-02-26 11:06:36 +00:00
|
|
|
NumPredStores = 0;
|
2017-02-08 19:25:23 +00:00
|
|
|
for (BasicBlock *BB : TheLoop->blocks()) {
|
|
|
|
// For each instruction in the old loop.
|
|
|
|
for (Instruction &I : *BB) {
|
2018-03-09 21:05:58 +00:00
|
|
|
Value *Ptr = getLoadStorePointerOperand(&I);
|
2017-02-08 19:25:23 +00:00
|
|
|
if (!Ptr)
|
|
|
|
continue;
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
|
|
|
|
NumPredStores++;
|
2017-02-08 19:25:23 +00:00
|
|
|
if (isa<LoadInst>(&I) && Legal->isUniform(Ptr)) {
|
|
|
|
// Scalar load + broadcast
|
|
|
|
unsigned Cost = getUniformMemOpCost(&I, VF);
|
|
|
|
setWideningDecision(&I, VF, CM_Scalarize, Cost);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We assume that widening is the best solution when possible.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (memoryInstructionCanBeWidened(&I, VF)) {
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned Cost = getConsecutiveMemOpCost(&I, VF);
|
2018-03-09 21:05:58 +00:00
|
|
|
int ConsecutiveStride =
|
|
|
|
Legal->isConsecutivePtr(getLoadStorePointerOperand(&I));
|
2017-12-16 02:55:24 +00:00
|
|
|
assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
|
|
|
|
"Expected consecutive stride.");
|
|
|
|
InstWidening Decision =
|
|
|
|
ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
|
|
|
|
setWideningDecision(&I, VF, Decision, Cost);
|
2017-02-08 19:25:23 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Choose between Interleaving, Gather/Scatter or Scalarization.
|
2017-10-12 23:30:03 +00:00
|
|
|
unsigned InterleaveCost = std::numeric_limits<unsigned>::max();
|
2017-02-08 19:25:23 +00:00
|
|
|
unsigned NumAccesses = 1;
|
|
|
|
if (Legal->isAccessInterleaved(&I)) {
|
|
|
|
auto Group = Legal->getInterleavedAccessGroup(&I);
|
|
|
|
assert(Group && "Fail to get an interleaved access group.");
|
|
|
|
|
|
|
|
// Make one decision for the whole group.
|
|
|
|
if (getWideningDecision(&I, VF) != CM_Unknown)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
NumAccesses = Group->getNumMembers();
|
|
|
|
InterleaveCost = getInterleaveGroupCost(&I, VF);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned GatherScatterCost =
|
2018-02-26 11:06:36 +00:00
|
|
|
isLegalGatherOrScatter(&I)
|
2017-02-08 19:25:23 +00:00
|
|
|
? getGatherScatterCost(&I, VF) * NumAccesses
|
2017-10-12 23:30:03 +00:00
|
|
|
: std::numeric_limits<unsigned>::max();
|
2017-02-08 19:25:23 +00:00
|
|
|
|
|
|
|
unsigned ScalarizationCost =
|
|
|
|
getMemInstScalarizationCost(&I, VF) * NumAccesses;
|
|
|
|
|
|
|
|
// Choose better solution for the current VF,
|
|
|
|
// write down this decision and use it during vectorization.
|
|
|
|
unsigned Cost;
|
|
|
|
InstWidening Decision;
|
|
|
|
if (InterleaveCost <= GatherScatterCost &&
|
|
|
|
InterleaveCost < ScalarizationCost) {
|
|
|
|
Decision = CM_Interleave;
|
|
|
|
Cost = InterleaveCost;
|
|
|
|
} else if (GatherScatterCost < ScalarizationCost) {
|
|
|
|
Decision = CM_GatherScatter;
|
|
|
|
Cost = GatherScatterCost;
|
|
|
|
} else {
|
|
|
|
Decision = CM_Scalarize;
|
|
|
|
Cost = ScalarizationCost;
|
|
|
|
}
|
|
|
|
// If the instructions belongs to an interleave group, the whole group
|
|
|
|
// receives the same decision. The whole group receives the cost, but
|
|
|
|
// the cost will actually be assigned to one instruction.
|
|
|
|
if (auto Group = Legal->getInterleavedAccessGroup(&I))
|
|
|
|
setWideningDecision(Group, VF, Decision, Cost);
|
|
|
|
else
|
|
|
|
setWideningDecision(&I, VF, Decision, Cost);
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 13:42:56 +00:00
|
|
|
|
|
|
|
// Make sure that any load of address and any other address computation
|
|
|
|
// remains scalar unless there is gather/scatter support. This avoids
|
|
|
|
// inevitable extracts into address registers, and also has the benefit of
|
|
|
|
// activating LSR more, since that pass can't optimize vectorized
|
|
|
|
// addresses.
|
|
|
|
if (TTI.prefersVectorizedAddressing())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Start with all scalar pointer uses.
|
|
|
|
SmallPtrSet<Instruction *, 8> AddrDefs;
|
|
|
|
for (BasicBlock *BB : TheLoop->blocks())
|
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
Instruction *PtrDef =
|
2018-03-09 21:05:58 +00:00
|
|
|
dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
|
2017-05-24 13:42:56 +00:00
|
|
|
if (PtrDef && TheLoop->contains(PtrDef) &&
|
|
|
|
getWideningDecision(&I, VF) != CM_GatherScatter)
|
|
|
|
AddrDefs.insert(PtrDef);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add all instructions used to generate the addresses.
|
|
|
|
SmallVector<Instruction *, 4> Worklist;
|
|
|
|
for (auto *I : AddrDefs)
|
|
|
|
Worklist.push_back(I);
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Instruction *I = Worklist.pop_back_val();
|
|
|
|
for (auto &Op : I->operands())
|
|
|
|
if (auto *InstOp = dyn_cast<Instruction>(Op))
|
|
|
|
if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
|
2017-10-12 23:30:03 +00:00
|
|
|
AddrDefs.insert(InstOp).second)
|
2017-05-24 13:42:56 +00:00
|
|
|
Worklist.push_back(InstOp);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto *I : AddrDefs) {
|
|
|
|
if (isa<LoadInst>(I)) {
|
|
|
|
// Setting the desired widening decision should ideally be handled in
|
|
|
|
// by cost functions, but since this involves the task of finding out
|
|
|
|
// if the loaded register is involved in an address computation, it is
|
|
|
|
// instead changed here when we know this is the case.
|
2017-12-16 02:55:24 +00:00
|
|
|
InstWidening Decision = getWideningDecision(I, VF);
|
|
|
|
if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
|
2017-05-24 13:42:56 +00:00
|
|
|
// Scalarize a widened load of address.
|
|
|
|
setWideningDecision(I, VF, CM_Scalarize,
|
|
|
|
(VF * getMemoryInstructionCost(I, 1)));
|
|
|
|
else if (auto Group = Legal->getInterleavedAccessGroup(I)) {
|
|
|
|
// Scalarize an interleave group of address loads.
|
|
|
|
for (unsigned I = 0; I < Group->getFactor(); ++I) {
|
|
|
|
if (Instruction *Member = Group->getMember(I))
|
|
|
|
setWideningDecision(Member, VF, CM_Scalarize,
|
|
|
|
(VF * getMemoryInstructionCost(Member, 1)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
// Make sure I gets scalarized and a cost estimate without
|
|
|
|
// scalarization overhead.
|
|
|
|
ForcedScalars[VF].insert(I);
|
|
|
|
}
|
2017-02-08 19:25:23 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
|
|
|
|
unsigned VF,
|
|
|
|
Type *&VectorTy) {
|
2012-10-25 21:03:48 +00:00
|
|
|
Type *RetTy = I->getType();
|
2016-12-16 16:52:35 +00:00
|
|
|
if (canTruncateToMinimalBitwidth(I, VF))
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
|
2017-05-30 19:55:57 +00:00
|
|
|
VectorTy = isScalarAfterVectorization(I, VF) ? RetTy : ToVectorTy(RetTy, VF);
|
2016-05-19 20:38:03 +00:00
|
|
|
auto SE = PSE.getSE();
|
2012-10-25 21:03:48 +00:00
|
|
|
|
|
|
|
// TODO: We need to estimate the cost of intrinsic calls.
|
2012-10-24 20:36:32 +00:00
|
|
|
switch (I->getOpcode()) {
|
2012-12-10 21:39:02 +00:00
|
|
|
case Instruction::GetElementPtr:
|
2013-02-08 14:50:48 +00:00
|
|
|
// We mark this instruction as zero-cost because the cost of GEPs in
|
|
|
|
// vectorized code depends on whether the corresponding memory instruction
|
|
|
|
// is scalarized or not. Therefore, we handle GEPs with the memory
|
|
|
|
// instruction cost.
|
2012-12-10 21:39:02 +00:00
|
|
|
return 0;
|
|
|
|
case Instruction::Br: {
|
2017-04-12 13:13:15 +00:00
|
|
|
// In cases of scalarized and predicated instructions, there will be VF
|
|
|
|
// predicated blocks in the vectorized loop. Each branch around these
|
|
|
|
// blocks requires also an extract of its vector compare i1 element.
|
|
|
|
bool ScalarPredicatedBB = false;
|
|
|
|
BranchInst *BI = cast<BranchInst>(I);
|
|
|
|
if (VF > 1 && BI->isConditional() &&
|
|
|
|
(PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
|
|
|
|
PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
|
|
|
|
ScalarPredicatedBB = true;
|
|
|
|
|
|
|
|
if (ScalarPredicatedBB) {
|
|
|
|
// Return cost for branches around scalarized and predicated blocks.
|
|
|
|
Type *Vec_i1Ty =
|
|
|
|
VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
|
|
|
|
return (TTI.getScalarizationOverhead(Vec_i1Ty, false, true) +
|
|
|
|
(TTI.getCFInstrCost(Instruction::Br) * VF));
|
|
|
|
} else if (I->getParent() == TheLoop->getLoopLatch() || VF == 1)
|
|
|
|
// The back-edge branch will remain, as will all scalar branches.
|
|
|
|
return TTI.getCFInstrCost(Instruction::Br);
|
|
|
|
else
|
|
|
|
// This branch will be eliminated by if-conversion.
|
|
|
|
return 0;
|
|
|
|
// Note: We currently assume zero cost for an unconditional branch inside
|
|
|
|
// a predicated block since it will become a fall-through, although we
|
|
|
|
// may decide in the future to call TTI for all branches.
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
2016-02-19 17:56:08 +00:00
|
|
|
case Instruction::PHI: {
|
|
|
|
auto *Phi = cast<PHINode>(I);
|
|
|
|
|
|
|
|
// First-order recurrences are replaced by vector shuffles inside the loop.
|
|
|
|
if (VF > 1 && Legal->isFirstOrderRecurrence(Phi))
|
|
|
|
return TTI.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
|
|
|
|
VectorTy, VF - 1, VectorTy);
|
|
|
|
|
2017-04-21 14:14:54 +00:00
|
|
|
// Phi nodes in non-header blocks (not inductions, reductions, etc.) are
|
|
|
|
// converted into select instructions. We require N - 1 selects per phi
|
|
|
|
// node, where N is the number of incoming values.
|
|
|
|
if (VF > 1 && Phi->getParent() != TheLoop->getHeader())
|
|
|
|
return (Phi->getNumIncomingValues() - 1) *
|
|
|
|
TTI.getCmpSelInstrCost(
|
|
|
|
Instruction::Select, ToVectorTy(Phi->getType(), VF),
|
|
|
|
ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF));
|
|
|
|
|
|
|
|
return TTI.getCFInstrCost(Instruction::PHI);
|
2016-02-19 17:56:08 +00:00
|
|
|
}
|
2016-08-24 11:37:57 +00:00
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::SRem:
|
2016-10-05 18:30:36 +00:00
|
|
|
// If we have a predicated instruction, it may not be executed for each
|
|
|
|
// vector lane. Get the scalarization cost and scale this amount by the
|
|
|
|
// probability of executing the predicated block. If the instruction is not
|
|
|
|
// predicated, we fall through to the next case.
|
2018-02-26 11:06:36 +00:00
|
|
|
if (VF > 1 && isScalarWithPredication(I)) {
|
2016-10-13 14:19:48 +00:00
|
|
|
unsigned Cost = 0;
|
|
|
|
|
|
|
|
// These instructions have a non-void type, so account for the phi nodes
|
|
|
|
// that we will create. This cost is likely to be zero. The phi node
|
|
|
|
// cost, if any, should be scaled by the block probability because it
|
|
|
|
// models a copy at the end of each predicated block.
|
|
|
|
Cost += VF * TTI.getCFInstrCost(Instruction::PHI);
|
|
|
|
|
|
|
|
// The cost of the non-predicated instruction.
|
|
|
|
Cost += VF * TTI.getArithmeticInstrCost(I->getOpcode(), RetTy);
|
|
|
|
|
|
|
|
// The cost of insertelement and extractelement instructions needed for
|
|
|
|
// scalarization.
|
|
|
|
Cost += getScalarizationOverhead(I, VF, TTI);
|
|
|
|
|
|
|
|
// Scale the cost by the probability of executing the predicated blocks.
|
|
|
|
// This assumes the predicated block for each vector lane is equally
|
|
|
|
// likely.
|
|
|
|
return Cost / getReciprocalPredBlockProb();
|
|
|
|
}
|
2017-06-03 05:18:46 +00:00
|
|
|
LLVM_FALLTHROUGH;
|
2012-12-10 21:39:02 +00:00
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
2013-04-04 23:26:27 +00:00
|
|
|
case Instruction::Xor: {
|
2014-01-10 18:20:32 +00:00
|
|
|
// Since we will replace the stride by 1 the multiplication should go away.
|
|
|
|
if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
|
|
|
|
return 0;
|
2013-04-04 23:26:27 +00:00
|
|
|
// Certain instructions can be cheaper to vectorize if they have a constant
|
|
|
|
// second vector operand. One example of this are shifts on x86.
|
|
|
|
TargetTransformInfo::OperandValueKind Op1VK =
|
2016-05-05 00:54:54 +00:00
|
|
|
TargetTransformInfo::OK_AnyValue;
|
2013-04-04 23:26:27 +00:00
|
|
|
TargetTransformInfo::OperandValueKind Op2VK =
|
2016-05-05 00:54:54 +00:00
|
|
|
TargetTransformInfo::OK_AnyValue;
|
2014-08-25 04:56:54 +00:00
|
|
|
TargetTransformInfo::OperandValueProperties Op1VP =
|
|
|
|
TargetTransformInfo::OP_None;
|
|
|
|
TargetTransformInfo::OperandValueProperties Op2VP =
|
|
|
|
TargetTransformInfo::OP_None;
|
2014-02-12 23:43:47 +00:00
|
|
|
Value *Op2 = I->getOperand(1);
|
2013-04-04 23:26:27 +00:00
|
|
|
|
2016-08-04 22:48:03 +00:00
|
|
|
// Check for a splat or for a non uniform vector of constants.
|
2014-08-25 04:56:54 +00:00
|
|
|
if (isa<ConstantInt>(Op2)) {
|
|
|
|
ConstantInt *CInt = cast<ConstantInt>(Op2);
|
|
|
|
if (CInt && CInt->getValue().isPowerOf2())
|
|
|
|
Op2VP = TargetTransformInfo::OP_PowerOf2;
|
2013-04-04 23:26:27 +00:00
|
|
|
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
|
2014-08-25 04:56:54 +00:00
|
|
|
} else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
|
2014-02-12 23:43:47 +00:00
|
|
|
Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
|
2014-08-25 04:56:54 +00:00
|
|
|
Constant *SplatValue = cast<Constant>(Op2)->getSplatValue();
|
|
|
|
if (SplatValue) {
|
|
|
|
ConstantInt *CInt = dyn_cast<ConstantInt>(SplatValue);
|
|
|
|
if (CInt && CInt->getValue().isPowerOf2())
|
|
|
|
Op2VP = TargetTransformInfo::OP_PowerOf2;
|
2014-02-12 23:43:47 +00:00
|
|
|
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
|
2014-08-25 04:56:54 +00:00
|
|
|
}
|
2016-08-04 22:48:03 +00:00
|
|
|
} else if (Legal->isUniform(Op2)) {
|
|
|
|
Op2VK = TargetTransformInfo::OK_UniformValue;
|
2014-02-12 23:43:47 +00:00
|
|
|
}
|
2017-05-30 19:55:57 +00:00
|
|
|
SmallVector<const Value *, 4> Operands(I->operand_values());
|
|
|
|
unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
|
|
|
|
return N * TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy, Op1VK,
|
|
|
|
Op2VK, Op1VP, Op2VP, Operands);
|
2013-04-04 23:26:27 +00:00
|
|
|
}
|
2012-12-10 21:39:02 +00:00
|
|
|
case Instruction::Select: {
|
|
|
|
SelectInst *SI = cast<SelectInst>(I);
|
|
|
|
const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
|
|
|
|
bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
|
|
|
|
Type *CondTy = SI->getCondition()->getType();
|
2013-03-14 18:54:36 +00:00
|
|
|
if (!ScalarCond)
|
2012-12-10 21:39:02 +00:00
|
|
|
CondTy = VectorType::get(CondTy, VF);
|
|
|
|
|
2017-04-12 11:49:08 +00:00
|
|
|
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, I);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp: {
|
|
|
|
Type *ValTy = I->getOperand(0)->getType();
|
2015-11-09 14:32:05 +00:00
|
|
|
Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
|
2016-12-16 16:52:35 +00:00
|
|
|
if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
|
|
|
|
ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
|
2012-12-10 21:39:02 +00:00
|
|
|
VectorTy = ToVectorTy(ValTy, VF);
|
2017-04-12 11:49:08 +00:00
|
|
|
return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, I);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
2013-02-07 19:05:21 +00:00
|
|
|
case Instruction::Store:
|
|
|
|
case Instruction::Load: {
|
2017-05-30 19:55:57 +00:00
|
|
|
unsigned Width = VF;
|
|
|
|
if (Width > 1) {
|
|
|
|
InstWidening Decision = getWideningDecision(I, Width);
|
|
|
|
assert(Decision != CM_Unknown &&
|
|
|
|
"CM decision should be taken at this point");
|
|
|
|
if (Decision == CM_Scalarize)
|
|
|
|
Width = 1;
|
|
|
|
}
|
|
|
|
VectorTy = ToVectorTy(getMemInstValueType(I), Width);
|
2017-02-08 19:25:23 +00:00
|
|
|
return getMemoryInstructionCost(I, VF);
|
2013-02-07 19:05:21 +00:00
|
|
|
}
|
2012-12-10 21:39:02 +00:00
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::BitCast: {
|
2017-02-14 16:28:32 +00:00
|
|
|
// We optimize the truncation of induction variables having constant
|
|
|
|
// integer steps. The cost of these truncations is the same as the scalar
|
|
|
|
// operation.
|
|
|
|
if (isOptimizableIVTruncate(I, VF)) {
|
|
|
|
auto *Trunc = cast<TruncInst>(I);
|
|
|
|
return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
|
2017-04-12 11:49:08 +00:00
|
|
|
Trunc->getSrcTy(), Trunc);
|
2017-02-14 16:28:32 +00:00
|
|
|
}
|
2016-02-03 23:16:39 +00:00
|
|
|
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
Type *SrcScalarTy = I->getOperand(0)->getType();
|
2017-05-30 19:55:57 +00:00
|
|
|
Type *SrcVecTy =
|
|
|
|
VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
|
2016-12-16 16:52:35 +00:00
|
|
|
if (canTruncateToMinimalBitwidth(I, VF)) {
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
// This cast is going to be shrunk. This may remove the cast or it might
|
|
|
|
// turn it into slightly different cast. For example, if MinBW == 16,
|
|
|
|
// "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
|
|
|
|
//
|
|
|
|
// Calculate the modified src and dest types.
|
|
|
|
Type *MinVecTy = VectorTy;
|
|
|
|
if (I->getOpcode() == Instruction::Trunc) {
|
|
|
|
SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
|
2016-05-05 00:54:54 +00:00
|
|
|
VectorTy =
|
|
|
|
largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
} else if (I->getOpcode() == Instruction::ZExt ||
|
|
|
|
I->getOpcode() == Instruction::SExt) {
|
|
|
|
SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
|
2016-05-05 00:54:54 +00:00
|
|
|
VectorTy =
|
|
|
|
smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
|
[LoopVectorize] Shrink integer operations into the smallest type possible
C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
type (e.g. i32) whenever arithmetic is performed on them.
For targets with native i8 or i16 operations, usually InstCombine can shrink
the arithmetic type down again. However InstCombine refuses to create illegal
types, so for targets without i8 or i16 registers, the lengthening and
shrinking remains.
Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
their scalar equivalents do not, so during vectorization it is important to
remove these lengthens and truncates when deciding the profitability of
vectorization.
The algorithm this uses starts at truncs and icmps, trawling their use-def
chains until they terminate or instructions outside the loop are found (or
unsafe instructions like inttoptr casts are found). If the use-def chains
starting from different root instructions (truncs/icmps) meet, they are
unioned. The demanded bits of each node in the graph are ORed together to form
an overall mask of the demanded bits in the entire graph. The minimum bitwidth
that graph can be truncated to is the bitwidth minus the number of leading
zeroes in the overall mask.
The intention is that this algorithm should "first do no harm", so it will
never insert extra cast instructions. This is why the use-def graphs are
unioned, so that subgraphs with different minimum bitwidths do not need casts
inserted between them.
This algorithm works hard to reduce compile time impact. DemandedBits are only
queried if there are extends of illegal types and if a truncate to an illegal
type is seen. In the general case, this results in a simple linear scan of the
instructions in the loop.
No non-noise compile time impact was seen on a clang bootstrap build.
llvm-svn: 250032
2015-10-12 12:34:45 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-03 23:16:39 +00:00
|
|
|
|
2017-05-30 19:55:57 +00:00
|
|
|
unsigned N = isScalarAfterVectorization(I, VF) ? VF : 1;
|
|
|
|
return N * TTI.getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy, I);
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
|
|
|
case Instruction::Call: {
|
2015-03-17 19:46:50 +00:00
|
|
|
bool NeedToScalarize;
|
2013-02-27 15:24:19 +00:00
|
|
|
CallInst *CI = cast<CallInst>(I);
|
2015-03-17 19:46:50 +00:00
|
|
|
unsigned CallCost = getVectorCallCost(CI, VF, TTI, TLI, NeedToScalarize);
|
2016-04-19 19:10:21 +00:00
|
|
|
if (getVectorIntrinsicIDForCall(CI, TLI))
|
2015-03-17 19:46:50 +00:00
|
|
|
return std::min(CallCost, getVectorIntrinsicCost(CI, VF, TTI, TLI));
|
|
|
|
return CallCost;
|
2012-12-10 21:39:02 +00:00
|
|
|
}
|
2016-08-24 11:37:57 +00:00
|
|
|
default:
|
2012-12-23 07:23:55 +00:00
|
|
|
// The cost of executing VF copies of the scalar instruction. This opcode
|
|
|
|
// is unknown. Assume that it is the same as 'mul'.
|
2016-08-24 11:37:57 +00:00
|
|
|
return VF * TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy) +
|
2016-10-13 14:19:48 +00:00
|
|
|
getScalarizationOverhead(I, VF, TTI);
|
2016-05-05 00:54:54 +00:00
|
|
|
} // end of switch.
|
2012-10-24 20:36:32 +00:00
|
|
|
}
|
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
char LoopVectorize::ID = 0;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
static const char lv_name[] = "Loop Vectorization";
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2012-10-17 18:25:06 +00:00
|
|
|
INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 03:43:40 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-09 17:55:00 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
|
2016-12-19 08:22:17 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2015-07-14 23:40:50 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
|
2014-01-13 13:07:17 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 02:08:17 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
2015-01-17 14:16:18 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
2016-07-08 20:55:26 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
|
2016-04-18 23:55:01 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
|
2016-07-20 04:03:43 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
|
2012-10-17 18:25:06 +00:00
|
|
|
INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
|
|
|
|
|
|
|
|
namespace llvm {
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Pass *createLoopVectorizePass(bool NoUnrolling, bool AlwaysVectorize) {
|
|
|
|
return new LoopVectorize(NoUnrolling, AlwaysVectorize);
|
|
|
|
}
|
2012-12-10 21:39:02 +00:00
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
} // end namespace llvm
|
2013-02-05 15:08:02 +00:00
|
|
|
|
2017-10-12 23:30:03 +00:00
|
|
|
bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
|
2016-08-01 20:08:09 +00:00
|
|
|
// Check if the pointer operand of a load or store instruction is
|
|
|
|
// consecutive.
|
2018-03-09 21:05:58 +00:00
|
|
|
if (auto *Ptr = getLoadStorePointerOperand(Inst))
|
2016-08-01 20:08:09 +00:00
|
|
|
return Legal->isConsecutivePtr(Ptr);
|
2013-02-05 15:08:02 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2016-05-19 20:38:03 +00:00
|
|
|
void LoopVectorizationCostModel::collectValuesToIgnore() {
|
|
|
|
// Ignore ephemeral values.
|
2016-12-19 08:22:17 +00:00
|
|
|
CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
|
2016-05-19 20:38:03 +00:00
|
|
|
|
|
|
|
// Ignore type-promoting instructions we identified during reduction
|
|
|
|
// detection.
|
|
|
|
for (auto &Reduction : *Legal->getReductionVars()) {
|
|
|
|
RecurrenceDescriptor &RedDes = Reduction.second;
|
|
|
|
SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
|
|
|
|
VecValuesToIgnore.insert(Casts.begin(), Casts.end());
|
|
|
|
}
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
// Ignore type-casting instructions we identified during induction
|
|
|
|
// detection.
|
|
|
|
for (auto &Induction : *Legal->getInductionVars()) {
|
|
|
|
InductionDescriptor &IndDes = Induction.second;
|
|
|
|
const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
|
|
|
|
VecValuesToIgnore.insert(Casts.begin(), Casts.end());
|
|
|
|
}
|
2016-05-19 20:38:03 +00:00
|
|
|
}
|
|
|
|
|
2018-01-07 16:02:58 +00:00
|
|
|
VectorizationFactor
|
2017-03-14 13:07:04 +00:00
|
|
|
LoopVectorizationPlanner::plan(bool OptForSize, unsigned UserVF) {
|
|
|
|
// Width 1 means no vectorize, cost 0 means uncomputed cost.
|
2018-01-07 16:02:58 +00:00
|
|
|
const VectorizationFactor NoVectorization = {1U, 0U};
|
2017-03-14 13:07:04 +00:00
|
|
|
Optional<unsigned> MaybeMaxVF = CM.computeMaxVF(OptForSize);
|
|
|
|
if (!MaybeMaxVF.hasValue()) // Cases considered too costly to vectorize.
|
|
|
|
return NoVectorization;
|
|
|
|
|
|
|
|
if (UserVF) {
|
|
|
|
DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
|
|
|
|
assert(isPowerOf2_32(UserVF) && "VF needs to be a power of two");
|
|
|
|
// Collect the instructions (and their associated costs) that will be more
|
|
|
|
// profitable to scalarize.
|
|
|
|
CM.selectUserVectorizationFactor(UserVF);
|
2017-08-27 12:55:46 +00:00
|
|
|
buildVPlans(UserVF, UserVF);
|
|
|
|
DEBUG(printPlans(dbgs()));
|
2017-03-14 13:07:04 +00:00
|
|
|
return {UserVF, 0};
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned MaxVF = MaybeMaxVF.getValue();
|
|
|
|
assert(MaxVF != 0 && "MaxVF is zero.");
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
for (unsigned VF = 1; VF <= MaxVF; VF *= 2) {
|
|
|
|
// Collect Uniform and Scalar instructions after vectorization with VF.
|
|
|
|
CM.collectUniformsAndScalars(VF);
|
|
|
|
|
|
|
|
// Collect the instructions (and their associated costs) that will be more
|
|
|
|
// profitable to scalarize.
|
|
|
|
if (VF > 1)
|
|
|
|
CM.collectInstsToScalarize(VF);
|
|
|
|
}
|
|
|
|
|
|
|
|
buildVPlans(1, MaxVF);
|
|
|
|
DEBUG(printPlans(dbgs()));
|
2017-03-14 13:07:04 +00:00
|
|
|
if (MaxVF == 1)
|
|
|
|
return NoVectorization;
|
|
|
|
|
|
|
|
// Select the optimal vectorization factor.
|
|
|
|
return CM.selectVectorizationFactor(MaxVF);
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
void LoopVectorizationPlanner::setBestPlan(unsigned VF, unsigned UF) {
|
|
|
|
DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF << '\n');
|
|
|
|
BestVF = VF;
|
|
|
|
BestUF = UF;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
erase_if(VPlans, [VF](const VPlanPtr &Plan) {
|
2017-10-31 14:58:22 +00:00
|
|
|
return !Plan->hasVF(VF);
|
|
|
|
});
|
2017-08-27 12:55:46 +00:00
|
|
|
assert(VPlans.size() == 1 && "Best VF has not a single VPlan.");
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV,
|
|
|
|
DominatorTree *DT) {
|
2017-05-11 11:36:33 +00:00
|
|
|
// Perform the actual loop transformation.
|
|
|
|
|
|
|
|
// 1. Create a new empty loop. Unlink the old loop and connect the new one.
|
2017-11-20 12:01:47 +00:00
|
|
|
VPCallbackILV CallbackILV(ILV);
|
|
|
|
|
|
|
|
VPTransformState State{BestVF, BestUF, LI,
|
|
|
|
DT, ILV.Builder, ILV.VectorLoopValueMap,
|
|
|
|
&ILV, CallbackILV};
|
2017-08-27 12:55:46 +00:00
|
|
|
State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
|
2017-05-11 11:36:33 +00:00
|
|
|
|
|
|
|
//===------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Notice: any optimization or new instruction that go
|
|
|
|
// into the code below should also be implemented in
|
|
|
|
// the cost-model.
|
|
|
|
//
|
|
|
|
//===------------------------------------------------===//
|
|
|
|
|
|
|
|
// 2. Copy and widen instructions from the old loop into the new loop.
|
2017-08-27 12:55:46 +00:00
|
|
|
assert(VPlans.size() == 1 && "Not a single VPlan to execute.");
|
2017-10-31 14:58:22 +00:00
|
|
|
VPlans.front()->execute(&State);
|
2017-05-11 11:36:33 +00:00
|
|
|
|
|
|
|
// 3. Fix the vectorized code: take care of header phi's, live-outs,
|
|
|
|
// predication, updating analyses.
|
|
|
|
ILV.fixVectorizedLoop();
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
|
|
|
|
SmallPtrSetImpl<Instruction *> &DeadInstructions) {
|
|
|
|
BasicBlock *Latch = OrigLoop->getLoopLatch();
|
|
|
|
|
|
|
|
// We create new control-flow for the vectorized loop, so the original
|
|
|
|
// condition will be dead after vectorization if it's only used by the
|
|
|
|
// branch.
|
|
|
|
auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
|
|
|
|
if (Cmp && Cmp->hasOneUse())
|
|
|
|
DeadInstructions.insert(Cmp);
|
|
|
|
|
|
|
|
// We create new "steps" for induction variable updates to which the original
|
|
|
|
// induction variables map. An original update instruction will be dead if
|
|
|
|
// all its users except the induction variable are dead.
|
|
|
|
for (auto &Induction : *Legal->getInductionVars()) {
|
|
|
|
PHINode *Ind = Induction.first;
|
|
|
|
auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
|
2017-10-12 23:30:03 +00:00
|
|
|
if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
|
2017-05-11 11:36:33 +00:00
|
|
|
return U == Ind || DeadInstructions.count(cast<Instruction>(U));
|
|
|
|
}))
|
|
|
|
DeadInstructions.insert(IndUpdate);
|
[LV] Support efficient vectorization of an induction with redundant casts
D30041 extended SCEVPredicateRewriter to improve handling of Phi nodes whose
update chain involves casts; PSCEV can now build an AddRecurrence for some
forms of such phi nodes, under the proper runtime overflow test. This means
that we can identify such phi nodes as an induction, and the loop-vectorizer
can now vectorize such inductions, however inefficiently. The vectorizer
doesn't know that it can ignore the casts, and so it vectorizes them.
This patch records the casts in the InductionDescriptor, so that they could
be marked to be ignored for cost calculation (we use VecValuesToIgnore for
that) and ignored for vectorization/widening/scalarization (i.e. treated as
TriviallyDead).
In addition to marking all these casts to be ignored, we also need to make
sure that each cast is mapped to the right vector value in the vector loop body
(be it a widened, vectorized, or scalarized induction). So whenever an
induction phi is mapped to a vector value (during vectorization/widening/
scalarization), we also map the respective cast instruction (if exists) to that
vector value. (If the phi-update sequence of an induction involves more than one
cast, then the above mapping to vector value is relevant only for the last cast
of the sequence as we allow only the "last cast" to be used outside the
induction update chain itself).
This is the last step in addressing PR30654.
llvm-svn: 320672
2017-12-14 07:56:31 +00:00
|
|
|
|
|
|
|
// We record as "Dead" also the type-casting instructions we had identified
|
|
|
|
// during induction analysis. We don't need any handling for them in the
|
|
|
|
// vectorized loop because we have proven that, under a proper runtime
|
|
|
|
// test guarding the vectorized loop, the value of the phi, and the casted
|
|
|
|
// value of the phi, are the same. The last instruction in this casting chain
|
|
|
|
// will get its scalar/vector/widened def from the scalar/vector/widened def
|
|
|
|
// of the respective phi node. Any other casts in the induction def-use chain
|
|
|
|
// have no other uses outside the phi update chain, and will be ignored.
|
|
|
|
InductionDescriptor &IndDes = Induction.second;
|
|
|
|
const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
|
|
|
|
DeadInstructions.insert(Casts.begin(), Casts.end());
|
2017-05-11 11:36:33 +00:00
|
|
|
}
|
|
|
|
}
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2016-05-05 00:54:54 +00:00
|
|
|
Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
|
2013-08-26 22:33:26 +00:00
|
|
|
|
2016-07-24 07:24:54 +00:00
|
|
|
Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step,
|
|
|
|
Instruction::BinaryOps BinOp) {
|
2013-08-26 22:33:26 +00:00
|
|
|
// When unrolling and the VF is 1, we only need to add a simple scalar.
|
2016-07-24 07:24:54 +00:00
|
|
|
Type *Ty = Val->getType();
|
|
|
|
assert(!Ty->isVectorTy() && "Val must be a scalar");
|
|
|
|
|
|
|
|
if (Ty->isFloatingPointTy()) {
|
|
|
|
Constant *C = ConstantFP::get(Ty, (double)StartIdx);
|
|
|
|
|
|
|
|
// Floating point operations had to be 'fast' to enable the unrolling.
|
|
|
|
Value *MulOp = addFastMathFlag(Builder.CreateFMul(C, Step));
|
|
|
|
return addFastMathFlag(Builder.CreateBinOp(BinOp, Val, MulOp));
|
|
|
|
}
|
|
|
|
Constant *C = ConstantInt::get(Ty, StartIdx);
|
2015-01-30 05:02:21 +00:00
|
|
|
return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction");
|
2013-08-26 22:33:26 +00:00
|
|
|
}
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
static void AddRuntimeUnrollDisableMetaData(Loop *L) {
|
|
|
|
SmallVector<Metadata *, 4> MDs;
|
|
|
|
// Reserve first location for self reference to the LoopID metadata node.
|
|
|
|
MDs.push_back(nullptr);
|
|
|
|
bool IsUnrollMetadata = false;
|
|
|
|
MDNode *LoopID = L->getLoopID();
|
|
|
|
if (LoopID) {
|
|
|
|
// First find existing loop unrolling disable metadata.
|
|
|
|
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
|
2016-07-12 19:35:15 +00:00
|
|
|
auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
|
2016-07-09 22:56:50 +00:00
|
|
|
if (MD) {
|
2016-07-12 19:35:15 +00:00
|
|
|
const auto *S = dyn_cast<MDString>(MD->getOperand(0));
|
2016-07-09 22:56:50 +00:00
|
|
|
IsUnrollMetadata =
|
|
|
|
S && S->getString().startswith("llvm.loop.unroll.disable");
|
|
|
|
}
|
|
|
|
MDs.push_back(LoopID->getOperand(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IsUnrollMetadata) {
|
|
|
|
// Add runtime unroll disable metadata.
|
|
|
|
LLVMContext &Context = L->getHeader()->getContext();
|
|
|
|
SmallVector<Metadata *, 1> DisableOperands;
|
|
|
|
DisableOperands.push_back(
|
|
|
|
MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
|
|
|
|
MDNode *DisableNode = MDNode::get(Context, DisableOperands);
|
|
|
|
MDs.push_back(DisableNode);
|
|
|
|
MDNode *NewLoopID = MDNode::get(Context, MDs);
|
|
|
|
// Set operand 0 to refer to the loop id itself.
|
|
|
|
NewLoopID->replaceOperandWith(0, NewLoopID);
|
|
|
|
L->setLoopID(NewLoopID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
bool LoopVectorizationPlanner::getDecisionAndClampRange(
|
|
|
|
const std::function<bool(unsigned)> &Predicate, VFRange &Range) {
|
|
|
|
assert(Range.End > Range.Start && "Trying to test an empty VF range.");
|
|
|
|
bool PredicateAtRangeStart = Predicate(Range.Start);
|
|
|
|
|
|
|
|
for (unsigned TmpVF = Range.Start * 2; TmpVF < Range.End; TmpVF *= 2)
|
|
|
|
if (Predicate(TmpVF) != PredicateAtRangeStart) {
|
|
|
|
Range.End = TmpVF;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return PredicateAtRangeStart;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
|
|
|
|
/// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
|
|
|
|
/// of VF's starting at a given VF and extending it as much as possible. Each
|
|
|
|
/// vectorization decision can potentially shorten this sub-range during
|
|
|
|
/// buildVPlan().
|
|
|
|
void LoopVectorizationPlanner::buildVPlans(unsigned MinVF, unsigned MaxVF) {
|
2017-11-20 12:01:47 +00:00
|
|
|
|
|
|
|
// Collect conditions feeding internal conditional branches; they need to be
|
|
|
|
// represented in VPlan for it to model masking.
|
|
|
|
SmallPtrSet<Value *, 1> NeedDef;
|
|
|
|
|
|
|
|
auto *Latch = OrigLoop->getLoopLatch();
|
|
|
|
for (BasicBlock *BB : OrigLoop->blocks()) {
|
|
|
|
if (BB == Latch)
|
|
|
|
continue;
|
|
|
|
BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
|
|
|
|
if (Branch && Branch->isConditional())
|
|
|
|
NeedDef.insert(Branch->getCondition());
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
for (unsigned VF = MinVF; VF < MaxVF + 1;) {
|
|
|
|
VFRange SubRange = {VF, MaxVF + 1};
|
2017-11-20 12:01:47 +00:00
|
|
|
VPlans.push_back(buildVPlan(SubRange, NeedDef));
|
2017-08-27 12:55:46 +00:00
|
|
|
VF = SubRange.End;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *LoopVectorizationPlanner::createEdgeMask(BasicBlock *Src,
|
|
|
|
BasicBlock *Dst,
|
|
|
|
VPlanPtr &Plan) {
|
2017-11-14 12:09:30 +00:00
|
|
|
assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
|
|
|
|
|
|
|
|
// Look for cached value.
|
|
|
|
std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
|
|
|
|
EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
|
|
|
|
if (ECEntryIt != EdgeMaskCache.end())
|
|
|
|
return ECEntryIt->second;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *SrcMask = createBlockInMask(Src, Plan);
|
2017-11-14 12:09:30 +00:00
|
|
|
|
|
|
|
// The terminator has to be a branch inst!
|
|
|
|
BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
|
|
|
|
assert(BI && "Unexpected terminator found");
|
|
|
|
|
|
|
|
if (!BI->isConditional())
|
|
|
|
return EdgeMaskCache[Edge] = SrcMask;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *EdgeMask = Plan->getVPValue(BI->getCondition());
|
|
|
|
assert(EdgeMask && "No Edge Mask found for condition");
|
2017-11-14 12:09:30 +00:00
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
if (BI->getSuccessor(0) != Dst)
|
|
|
|
EdgeMask = Builder.createNot(EdgeMask);
|
2017-11-14 12:09:30 +00:00
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
if (SrcMask) // Otherwise block in-mask is all-one, no need to AND.
|
|
|
|
EdgeMask = Builder.createAnd(EdgeMask, SrcMask);
|
2017-11-14 12:09:30 +00:00
|
|
|
|
|
|
|
return EdgeMaskCache[Edge] = EdgeMask;
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *LoopVectorizationPlanner::createBlockInMask(BasicBlock *BB,
|
|
|
|
VPlanPtr &Plan) {
|
2017-11-14 12:09:30 +00:00
|
|
|
assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
|
|
|
|
|
|
|
|
// Look for cached value.
|
|
|
|
BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
|
|
|
|
if (BCEntryIt != BlockMaskCache.end())
|
|
|
|
return BCEntryIt->second;
|
|
|
|
|
|
|
|
// All-one mask is modelled as no-mask following the convention for masked
|
|
|
|
// load/store/gather/scatter. Initialize BlockMask to no-mask.
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *BlockMask = nullptr;
|
2017-11-14 12:09:30 +00:00
|
|
|
|
|
|
|
// Loop incoming mask is all-one.
|
|
|
|
if (OrigLoop->getHeader() == BB)
|
|
|
|
return BlockMaskCache[BB] = BlockMask;
|
|
|
|
|
|
|
|
// This is the block mask. We OR all incoming edges.
|
|
|
|
for (auto *Predecessor : predecessors(BB)) {
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
|
|
|
|
if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
|
2017-11-14 12:09:30 +00:00
|
|
|
return BlockMaskCache[BB] = EdgeMask;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
if (!BlockMask) { // BlockMask has its initialized nullptr value.
|
2017-11-14 12:09:30 +00:00
|
|
|
BlockMask = EdgeMask;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
BlockMask = Builder.createOr(BlockMask, EdgeMask);
|
2017-11-14 12:09:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return BlockMaskCache[BB] = BlockMask;
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
VPInterleaveRecipe *
|
|
|
|
LoopVectorizationPlanner::tryToInterleaveMemory(Instruction *I,
|
|
|
|
VFRange &Range) {
|
|
|
|
const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(I);
|
|
|
|
if (!IG)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Now check if IG is relevant for VF's in the given range.
|
|
|
|
auto isIGMember = [&](Instruction *I) -> std::function<bool(unsigned)> {
|
|
|
|
return [=](unsigned VF) -> bool {
|
|
|
|
return (VF >= 2 && // Query is illegal for VF == 1
|
|
|
|
CM.getWideningDecision(I, VF) ==
|
|
|
|
LoopVectorizationCostModel::CM_Interleave);
|
|
|
|
};
|
|
|
|
};
|
|
|
|
if (!getDecisionAndClampRange(isIGMember(I), Range))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// I is a member of an InterleaveGroup for VF's in the (possibly trimmed)
|
|
|
|
// range. If it's the primary member of the IG construct a VPInterleaveRecipe.
|
|
|
|
// Otherwise, it's an adjunct member of the IG, do not construct any Recipe.
|
|
|
|
assert(I == IG->getInsertPos() &&
|
|
|
|
"Generating a recipe for an adjunct member of an interleave group");
|
|
|
|
|
|
|
|
return new VPInterleaveRecipe(IG);
|
|
|
|
}
|
|
|
|
|
2017-11-14 12:09:30 +00:00
|
|
|
VPWidenMemoryInstructionRecipe *
|
2017-11-20 12:01:47 +00:00
|
|
|
LoopVectorizationPlanner::tryToWidenMemory(Instruction *I, VFRange &Range,
|
|
|
|
VPlanPtr &Plan) {
|
2017-11-14 12:09:30 +00:00
|
|
|
if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto willWiden = [&](unsigned VF) -> bool {
|
|
|
|
if (VF == 1)
|
|
|
|
return false;
|
|
|
|
if (CM.isScalarAfterVectorization(I, VF) ||
|
|
|
|
CM.isProfitableToScalarize(I, VF))
|
|
|
|
return false;
|
|
|
|
LoopVectorizationCostModel::InstWidening Decision =
|
|
|
|
CM.getWideningDecision(I, VF);
|
|
|
|
assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
|
|
|
|
"CM decision should be taken at this point.");
|
|
|
|
assert(Decision != LoopVectorizationCostModel::CM_Interleave &&
|
|
|
|
"Interleave memory opportunity should be caught earlier.");
|
|
|
|
return Decision != LoopVectorizationCostModel::CM_Scalarize;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!getDecisionAndClampRange(willWiden, Range))
|
|
|
|
return nullptr;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPValue *Mask = nullptr;
|
|
|
|
if (Legal->isMaskRequired(I))
|
|
|
|
Mask = createBlockInMask(I->getParent(), Plan);
|
|
|
|
|
|
|
|
return new VPWidenMemoryInstructionRecipe(*I, Mask);
|
2017-11-14 12:09:30 +00:00
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
VPWidenIntOrFpInductionRecipe *
|
|
|
|
LoopVectorizationPlanner::tryToOptimizeInduction(Instruction *I,
|
|
|
|
VFRange &Range) {
|
|
|
|
if (PHINode *Phi = dyn_cast<PHINode>(I)) {
|
|
|
|
// Check if this is an integer or fp induction. If so, build the recipe that
|
|
|
|
// produces its scalar and vector values.
|
|
|
|
InductionDescriptor II = Legal->getInductionVars()->lookup(Phi);
|
|
|
|
if (II.getKind() == InductionDescriptor::IK_IntInduction ||
|
|
|
|
II.getKind() == InductionDescriptor::IK_FpInduction)
|
|
|
|
return new VPWidenIntOrFpInductionRecipe(Phi);
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Optimize the special case where the source is a constant integer
|
|
|
|
// induction variable. Notice that we can only optimize the 'trunc' case
|
|
|
|
// because (a) FP conversions lose precision, (b) sext/zext may wrap, and
|
|
|
|
// (c) other casts depend on pointer size.
|
|
|
|
|
|
|
|
// Determine whether \p K is a truncation based on an induction variable that
|
|
|
|
// can be optimized.
|
|
|
|
auto isOptimizableIVTruncate =
|
|
|
|
[&](Instruction *K) -> std::function<bool(unsigned)> {
|
|
|
|
return
|
|
|
|
[=](unsigned VF) -> bool { return CM.isOptimizableIVTruncate(K, VF); };
|
|
|
|
};
|
|
|
|
|
|
|
|
if (isa<TruncInst>(I) &&
|
|
|
|
getDecisionAndClampRange(isOptimizableIVTruncate(I), Range))
|
|
|
|
return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
|
|
|
|
cast<TruncInst>(I));
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
VPBlendRecipe *
|
|
|
|
LoopVectorizationPlanner::tryToBlend(Instruction *I, VPlanPtr &Plan) {
|
2017-11-14 12:09:30 +00:00
|
|
|
PHINode *Phi = dyn_cast<PHINode>(I);
|
|
|
|
if (!Phi || Phi->getParent() == OrigLoop->getHeader())
|
|
|
|
return nullptr;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
// We know that all PHIs in non-header blocks are converted into selects, so
|
|
|
|
// we don't have to worry about the insertion order and we can just use the
|
|
|
|
// builder. At this point we generate the predication tree. There may be
|
|
|
|
// duplications since this is a simple recursive scan, but future
|
|
|
|
// optimizations will clean it up.
|
|
|
|
|
|
|
|
SmallVector<VPValue *, 2> Masks;
|
|
|
|
unsigned NumIncoming = Phi->getNumIncomingValues();
|
|
|
|
for (unsigned In = 0; In < NumIncoming; In++) {
|
|
|
|
VPValue *EdgeMask =
|
|
|
|
createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
|
|
|
|
assert((EdgeMask || NumIncoming == 1) &&
|
|
|
|
"Multiple predecessors with one having a full mask");
|
|
|
|
if (EdgeMask)
|
|
|
|
Masks.push_back(EdgeMask);
|
|
|
|
}
|
|
|
|
return new VPBlendRecipe(Phi, Masks);
|
2017-11-14 12:09:30 +00:00
|
|
|
}
|
|
|
|
|
2017-10-05 12:41:49 +00:00
|
|
|
bool LoopVectorizationPlanner::tryToWiden(Instruction *I, VPBasicBlock *VPBB,
|
|
|
|
VFRange &Range) {
|
2018-02-26 11:06:36 +00:00
|
|
|
if (CM.isScalarWithPredication(I))
|
2017-10-05 12:41:49 +00:00
|
|
|
return false;
|
2017-08-27 12:55:46 +00:00
|
|
|
|
2017-09-02 16:41:55 +00:00
|
|
|
auto IsVectorizableOpcode = [](unsigned Opcode) {
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::BitCast:
|
|
|
|
case Instruction::Br:
|
|
|
|
case Instruction::Call:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::FCmp:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::GetElementPtr:
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::Load:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::PHI:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::Select:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::Store:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::Xor:
|
|
|
|
case Instruction::ZExt:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!IsVectorizableOpcode(I->getOpcode()))
|
2017-10-05 12:41:49 +00:00
|
|
|
return false;
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
|
|
|
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
|
|
|
if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
|
Add an @llvm.sideeffect intrinsic
This patch implements Chandler's idea [0] for supporting languages that
require support for infinite loops with side effects, such as Rust, providing
part of a solution to bug 965 [1].
Specifically, it adds an `llvm.sideeffect()` intrinsic, which has no actual
effect, but which appears to optimization passes to have obscure side effects,
such that they don't optimize away loops containing it. It also teaches
several optimization passes to ignore this intrinsic, so that it doesn't
significantly impact optimization in most cases.
As discussed on llvm-dev [2], this patch is the first of two major parts.
The second part, to change LLVM's semantics to have defined behavior
on infinite loops by default, with a function attribute for opting into
potential-undefined-behavior, will be implemented and posted for review in
a separate patch.
[0] http://lists.llvm.org/pipermail/llvm-dev/2015-July/088103.html
[1] https://bugs.llvm.org/show_bug.cgi?id=965
[2] http://lists.llvm.org/pipermail/llvm-dev/2017-October/118632.html
Differential Revision: https://reviews.llvm.org/D38336
llvm-svn: 317729
2017-11-08 21:59:51 +00:00
|
|
|
ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect))
|
2017-10-05 12:41:49 +00:00
|
|
|
return false;
|
2017-08-27 12:55:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto willWiden = [&](unsigned VF) -> bool {
|
|
|
|
if (!isa<PHINode>(I) && (CM.isScalarAfterVectorization(I, VF) ||
|
|
|
|
CM.isProfitableToScalarize(I, VF)))
|
|
|
|
return false;
|
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(I)) {
|
|
|
|
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
|
|
|
// The following case may be scalarized depending on the VF.
|
|
|
|
// The flag shows whether we use Intrinsic or a usual Call for vectorized
|
|
|
|
// version of the instruction.
|
|
|
|
// Is it beneficial to perform intrinsic call compared to lib call?
|
|
|
|
bool NeedToScalarize;
|
|
|
|
unsigned CallCost = getVectorCallCost(CI, VF, *TTI, TLI, NeedToScalarize);
|
|
|
|
bool UseVectorIntrinsic =
|
|
|
|
ID && getVectorIntrinsicCost(CI, VF, *TTI, TLI) <= CallCost;
|
|
|
|
return UseVectorIntrinsic || !NeedToScalarize;
|
|
|
|
}
|
|
|
|
if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
|
2017-11-20 12:01:47 +00:00
|
|
|
assert(CM.getWideningDecision(I, VF) ==
|
|
|
|
LoopVectorizationCostModel::CM_Scalarize &&
|
|
|
|
"Memory widening decisions should have been taken care by now");
|
|
|
|
return false;
|
2017-08-27 12:55:46 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!getDecisionAndClampRange(willWiden, Range))
|
2017-10-05 12:41:49 +00:00
|
|
|
return false;
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
// Success: widen this instruction. We optimize the common case where
|
|
|
|
// consecutive instructions can be represented by a single recipe.
|
2017-10-05 12:41:49 +00:00
|
|
|
if (!VPBB->empty()) {
|
|
|
|
VPWidenRecipe *LastWidenRecipe = dyn_cast<VPWidenRecipe>(&VPBB->back());
|
|
|
|
if (LastWidenRecipe && LastWidenRecipe->appendInstruction(I))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
VPBB->appendRecipe(new VPWidenRecipe(I));
|
|
|
|
return true;
|
2017-08-27 12:55:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VPBasicBlock *LoopVectorizationPlanner::handleReplication(
|
|
|
|
Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
|
2017-11-20 12:01:47 +00:00
|
|
|
DenseMap<Instruction *, VPReplicateRecipe *> &PredInst2Recipe,
|
|
|
|
VPlanPtr &Plan) {
|
2017-08-27 12:55:46 +00:00
|
|
|
bool IsUniform = getDecisionAndClampRange(
|
|
|
|
[&](unsigned VF) { return CM.isUniformAfterVectorization(I, VF); },
|
|
|
|
Range);
|
|
|
|
|
2018-02-26 11:06:36 +00:00
|
|
|
bool IsPredicated = CM.isScalarWithPredication(I);
|
2017-08-27 12:55:46 +00:00
|
|
|
auto *Recipe = new VPReplicateRecipe(I, IsUniform, IsPredicated);
|
|
|
|
|
|
|
|
// Find if I uses a predicated instruction. If so, it will use its scalar
|
|
|
|
// value. Avoid hoisting the insert-element which packs the scalar value into
|
|
|
|
// a vector value, as that happens iff all users use the vector value.
|
|
|
|
for (auto &Op : I->operands())
|
|
|
|
if (auto *PredInst = dyn_cast<Instruction>(Op))
|
|
|
|
if (PredInst2Recipe.find(PredInst) != PredInst2Recipe.end())
|
|
|
|
PredInst2Recipe[PredInst]->setAlsoPack(false);
|
|
|
|
|
|
|
|
// Finalize the recipe for Instr, first if it is not predicated.
|
|
|
|
if (!IsPredicated) {
|
|
|
|
DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
|
|
|
|
VPBB->appendRecipe(Recipe);
|
|
|
|
return VPBB;
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
|
|
|
|
assert(VPBB->getSuccessors().empty() &&
|
|
|
|
"VPBB has successors when handling predicated replication.");
|
|
|
|
// Record predicated instructions for above packing optimizations.
|
|
|
|
PredInst2Recipe[I] = Recipe;
|
2017-11-20 12:01:47 +00:00
|
|
|
VPBlockBase *Region =
|
|
|
|
VPBB->setOneSuccessor(createReplicateRegion(I, Recipe, Plan));
|
2017-08-27 12:55:46 +00:00
|
|
|
return cast<VPBasicBlock>(Region->setOneSuccessor(new VPBasicBlock()));
|
|
|
|
}
|
|
|
|
|
|
|
|
VPRegionBlock *
|
|
|
|
LoopVectorizationPlanner::createReplicateRegion(Instruction *Instr,
|
2017-11-20 12:01:47 +00:00
|
|
|
VPRecipeBase *PredRecipe,
|
|
|
|
VPlanPtr &Plan) {
|
2017-08-27 12:55:46 +00:00
|
|
|
// Instructions marked for predication are replicated and placed under an
|
|
|
|
// if-then construct to prevent side-effects.
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
// Generate recipes to compute the block mask for this region.
|
|
|
|
VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Build the triangular if-then region.
|
|
|
|
std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
|
|
|
|
assert(Instr->getParent() && "Predicated instruction not in any basic block");
|
2017-11-20 12:01:47 +00:00
|
|
|
auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
|
2017-08-27 12:55:46 +00:00
|
|
|
auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
|
|
|
|
auto *PHIRecipe =
|
|
|
|
Instr->getType()->isVoidTy() ? nullptr : new VPPredInstPHIRecipe(Instr);
|
|
|
|
auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
|
|
|
|
auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
|
|
|
|
VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
|
|
|
|
|
|
|
|
// Note: first set Entry as region entry and then connect successors starting
|
|
|
|
// from it in order, to propagate the "parent" of each VPBasicBlock.
|
|
|
|
Entry->setTwoSuccessors(Pred, Exit);
|
|
|
|
Pred->setOneSuccessor(Exit);
|
|
|
|
|
|
|
|
return Region;
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
LoopVectorizationPlanner::VPlanPtr
|
|
|
|
LoopVectorizationPlanner::buildVPlan(VFRange &Range,
|
|
|
|
const SmallPtrSetImpl<Value *> &NeedDef) {
|
|
|
|
EdgeMaskCache.clear();
|
|
|
|
BlockMaskCache.clear();
|
2017-08-27 12:55:46 +00:00
|
|
|
DenseMap<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
|
|
|
|
DenseMap<Instruction *, Instruction *> SinkAfterInverse;
|
|
|
|
|
|
|
|
// Collect instructions from the original loop that will become trivially dead
|
|
|
|
// in the vectorized loop. We don't need to vectorize these instructions. For
|
|
|
|
// example, original induction update instructions can become dead because we
|
|
|
|
// separately emit induction "steps" when generating code for the new loop.
|
|
|
|
// Similarly, we create a new latch condition when setting up the structure
|
|
|
|
// of the new loop, so the old one can become dead.
|
|
|
|
SmallPtrSet<Instruction *, 4> DeadInstructions;
|
|
|
|
collectTriviallyDeadInstructions(DeadInstructions);
|
|
|
|
|
|
|
|
// Hold a mapping from predicated instructions to their recipes, in order to
|
|
|
|
// fix their AlsoPack behavior if a user is determined to replicate and use a
|
|
|
|
// scalar instead of vector value.
|
|
|
|
DenseMap<Instruction *, VPReplicateRecipe *> PredInst2Recipe;
|
|
|
|
|
|
|
|
// Create a dummy pre-entry VPBasicBlock to start building the VPlan.
|
|
|
|
VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry");
|
2017-10-31 14:58:22 +00:00
|
|
|
auto Plan = llvm::make_unique<VPlan>(VPBB);
|
2017-08-27 12:55:46 +00:00
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
// Represent values that will have defs inside VPlan.
|
|
|
|
for (Value *V : NeedDef)
|
|
|
|
Plan->addVPValue(V);
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Scan the body of the loop in a topological order to visit each basic block
|
|
|
|
// after having visited its predecessor basic blocks.
|
|
|
|
LoopBlocksDFS DFS(OrigLoop);
|
|
|
|
DFS.perform(LI);
|
|
|
|
|
|
|
|
for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
|
|
|
|
// Relevant instructions from basic block BB will be grouped into VPRecipe
|
|
|
|
// ingredients and fill a new VPBasicBlock.
|
|
|
|
unsigned VPBBsForBB = 0;
|
|
|
|
auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
|
|
|
|
VPBB->setOneSuccessor(FirstVPBBForBB);
|
|
|
|
VPBB = FirstVPBBForBB;
|
2017-11-20 12:01:47 +00:00
|
|
|
Builder.setInsertPoint(VPBB);
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
std::vector<Instruction *> Ingredients;
|
|
|
|
|
|
|
|
// Organize the ingredients to vectorize from current basic block in the
|
|
|
|
// right order.
|
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
Instruction *Instr = &I;
|
|
|
|
|
|
|
|
// First filter out irrelevant instructions, to ensure no recipes are
|
|
|
|
// built for them.
|
|
|
|
if (isa<BranchInst>(Instr) || isa<DbgInfoIntrinsic>(Instr) ||
|
|
|
|
DeadInstructions.count(Instr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// I is a member of an InterleaveGroup for Range.Start. If it's an adjunct
|
|
|
|
// member of the IG, do not construct any Recipe for it.
|
|
|
|
const InterleaveGroup *IG = Legal->getInterleavedAccessGroup(Instr);
|
|
|
|
if (IG && Instr != IG->getInsertPos() &&
|
|
|
|
Range.Start >= 2 && // Query is illegal for VF == 1
|
|
|
|
CM.getWideningDecision(Instr, Range.Start) ==
|
2017-10-05 15:45:14 +00:00
|
|
|
LoopVectorizationCostModel::CM_Interleave) {
|
|
|
|
if (SinkAfterInverse.count(Instr))
|
|
|
|
Ingredients.push_back(SinkAfterInverse.find(Instr)->second);
|
2017-08-27 12:55:46 +00:00
|
|
|
continue;
|
2017-10-05 15:45:14 +00:00
|
|
|
}
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
// Move instructions to handle first-order recurrences, step 1: avoid
|
|
|
|
// handling this instruction until after we've handled the instruction it
|
|
|
|
// should follow.
|
|
|
|
auto SAIt = SinkAfter.find(Instr);
|
|
|
|
if (SAIt != SinkAfter.end()) {
|
|
|
|
DEBUG(dbgs() << "Sinking" << *SAIt->first << " after" << *SAIt->second
|
|
|
|
<< " to vectorize a 1st order recurrence.\n");
|
|
|
|
SinkAfterInverse[SAIt->second] = Instr;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ingredients.push_back(Instr);
|
|
|
|
|
|
|
|
// Move instructions to handle first-order recurrences, step 2: push the
|
|
|
|
// instruction to be sunk at its insertion point.
|
|
|
|
auto SAInvIt = SinkAfterInverse.find(Instr);
|
|
|
|
if (SAInvIt != SinkAfterInverse.end())
|
|
|
|
Ingredients.push_back(SAInvIt->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Introduce each ingredient into VPlan.
|
|
|
|
for (Instruction *Instr : Ingredients) {
|
|
|
|
VPRecipeBase *Recipe = nullptr;
|
|
|
|
|
|
|
|
// Check if Instr should belong to an interleave memory recipe, or already
|
|
|
|
// does. In the latter case Instr is irrelevant.
|
|
|
|
if ((Recipe = tryToInterleaveMemory(Instr, Range))) {
|
|
|
|
VPBB->appendRecipe(Recipe);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-11-14 12:09:30 +00:00
|
|
|
// Check if Instr is a memory operation that should be widened.
|
2017-11-20 12:01:47 +00:00
|
|
|
if ((Recipe = tryToWidenMemory(Instr, Range, Plan))) {
|
2017-11-14 12:09:30 +00:00
|
|
|
VPBB->appendRecipe(Recipe);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
// Check if Instr should form some PHI recipe.
|
|
|
|
if ((Recipe = tryToOptimizeInduction(Instr, Range))) {
|
|
|
|
VPBB->appendRecipe(Recipe);
|
|
|
|
continue;
|
|
|
|
}
|
2017-11-20 12:01:47 +00:00
|
|
|
if ((Recipe = tryToBlend(Instr, Plan))) {
|
2017-11-14 12:09:30 +00:00
|
|
|
VPBB->appendRecipe(Recipe);
|
|
|
|
continue;
|
|
|
|
}
|
2017-08-27 12:55:46 +00:00
|
|
|
if (PHINode *Phi = dyn_cast<PHINode>(Instr)) {
|
|
|
|
VPBB->appendRecipe(new VPWidenPHIRecipe(Phi));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if Instr is to be widened by a general VPWidenRecipe, after
|
|
|
|
// having first checked for specific widening recipes that deal with
|
|
|
|
// Interleave Groups, Inductions and Phi nodes.
|
2017-10-05 12:41:49 +00:00
|
|
|
if (tryToWiden(Instr, VPBB, Range))
|
2017-08-27 12:55:46 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Otherwise, if all widening options failed, Instruction is to be
|
|
|
|
// replicated. This may create a successor for VPBB.
|
|
|
|
VPBasicBlock *NextVPBB =
|
2017-11-20 12:01:47 +00:00
|
|
|
handleReplication(Instr, Range, VPBB, PredInst2Recipe, Plan);
|
2017-08-27 12:55:46 +00:00
|
|
|
if (NextVPBB != VPBB) {
|
|
|
|
VPBB = NextVPBB;
|
|
|
|
VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
|
|
|
|
: "");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks
|
|
|
|
// may also be empty, such as the last one VPBB, reflecting original
|
|
|
|
// basic-blocks with no recipes.
|
|
|
|
VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry());
|
|
|
|
assert(PreEntry->empty() && "Expecting empty pre-entry block.");
|
|
|
|
VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor());
|
|
|
|
PreEntry->disconnectSuccessor(Entry);
|
|
|
|
delete PreEntry;
|
|
|
|
|
|
|
|
std::string PlanName;
|
|
|
|
raw_string_ostream RSO(PlanName);
|
|
|
|
unsigned VF = Range.Start;
|
|
|
|
Plan->addVF(VF);
|
|
|
|
RSO << "Initial VPlan for VF={" << VF;
|
|
|
|
for (VF *= 2; VF < Range.End; VF *= 2) {
|
|
|
|
Plan->addVF(VF);
|
|
|
|
RSO << "," << VF;
|
|
|
|
}
|
|
|
|
RSO << "},UF>=1";
|
|
|
|
RSO.flush();
|
|
|
|
Plan->setName(PlanName);
|
|
|
|
|
|
|
|
return Plan;
|
|
|
|
}
|
|
|
|
|
2018-01-07 16:02:58 +00:00
|
|
|
Value* LoopVectorizationPlanner::VPCallbackILV::
|
|
|
|
getOrCreateVectorValues(Value *V, unsigned Part) {
|
|
|
|
return ILV.getOrCreateVectorValue(V, Part);
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent) const {
|
|
|
|
O << " +\n"
|
|
|
|
<< Indent << "\"INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
|
|
|
|
IG->getInsertPos()->printAsOperand(O, false);
|
|
|
|
O << "\\l\"";
|
|
|
|
for (unsigned i = 0; i < IG->getFactor(); ++i)
|
|
|
|
if (Instruction *I = IG->getMember(i))
|
|
|
|
O << " +\n"
|
|
|
|
<< Indent << "\" " << VPlanIngredient(I) << " " << i << "\\l\"";
|
|
|
|
}
|
|
|
|
|
2017-12-16 01:12:50 +00:00
|
|
|
void VPWidenRecipe::execute(VPTransformState &State) {
|
|
|
|
for (auto &Instr : make_range(Begin, End))
|
|
|
|
State.ILV->widenInstruction(Instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
|
|
|
|
assert(!State.Instance && "Int or FP induction being replicated.");
|
|
|
|
State.ILV->widenIntOrFpInduction(IV, Trunc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPWidenPHIRecipe::execute(VPTransformState &State) {
|
|
|
|
State.ILV->widenPHIInstruction(Phi, State.UF, State.VF);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPBlendRecipe::execute(VPTransformState &State) {
|
|
|
|
State.ILV->setDebugLocFromInst(State.Builder, Phi);
|
|
|
|
// We know that all PHIs in non-header blocks are converted into
|
|
|
|
// selects, so we don't have to worry about the insertion order and we
|
|
|
|
// can just use the builder.
|
|
|
|
// At this point we generate the predication tree. There may be
|
|
|
|
// duplications since this is a simple recursive scan, but future
|
|
|
|
// optimizations will clean it up.
|
|
|
|
|
|
|
|
unsigned NumIncoming = Phi->getNumIncomingValues();
|
|
|
|
|
|
|
|
assert((User || NumIncoming == 1) &&
|
|
|
|
"Multiple predecessors with predecessors having a full mask");
|
|
|
|
// Generate a sequence of selects of the form:
|
|
|
|
// SELECT(Mask3, In3,
|
|
|
|
// SELECT(Mask2, In2,
|
|
|
|
// ( ...)))
|
|
|
|
InnerLoopVectorizer::VectorParts Entry(State.UF);
|
|
|
|
for (unsigned In = 0; In < NumIncoming; ++In) {
|
|
|
|
for (unsigned Part = 0; Part < State.UF; ++Part) {
|
|
|
|
// We might have single edge PHIs (blocks) - use an identity
|
|
|
|
// 'select' for the first PHI operand.
|
|
|
|
Value *In0 =
|
|
|
|
State.ILV->getOrCreateVectorValue(Phi->getIncomingValue(In), Part);
|
|
|
|
if (In == 0)
|
|
|
|
Entry[Part] = In0; // Initialize with the first incoming value.
|
|
|
|
else {
|
|
|
|
// Select between the current value and the previous incoming edge
|
|
|
|
// based on the incoming mask.
|
|
|
|
Value *Cond = State.get(User->getOperand(In), Part);
|
|
|
|
Entry[Part] =
|
|
|
|
State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (unsigned Part = 0; Part < State.UF; ++Part)
|
|
|
|
State.ValueMap.setVectorValue(Phi, Part, Entry[Part]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPInterleaveRecipe::execute(VPTransformState &State) {
|
|
|
|
assert(!State.Instance && "Interleave group being replicated.");
|
|
|
|
State.ILV->vectorizeInterleaveGroup(IG->getInsertPos());
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
void VPReplicateRecipe::execute(VPTransformState &State) {
|
|
|
|
if (State.Instance) { // Generate a single instance.
|
|
|
|
State.ILV->scalarizeInstruction(Ingredient, *State.Instance, IsPredicated);
|
|
|
|
// Insert scalar instance packing it into a vector.
|
|
|
|
if (AlsoPack && State.VF > 1) {
|
|
|
|
// If we're constructing lane 0, initialize to start from undef.
|
|
|
|
if (State.Instance->Lane == 0) {
|
|
|
|
Value *Undef =
|
|
|
|
UndefValue::get(VectorType::get(Ingredient->getType(), State.VF));
|
|
|
|
State.ValueMap.setVectorValue(Ingredient, State.Instance->Part, Undef);
|
|
|
|
}
|
|
|
|
State.ILV->packScalarIntoVectorValue(Ingredient, *State.Instance);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate scalar instances for all VF lanes of all UF parts, unless the
|
|
|
|
// instruction is uniform inwhich case generate only the first lane for each
|
|
|
|
// of the UF parts.
|
|
|
|
unsigned EndLane = IsUniform ? 1 : State.VF;
|
|
|
|
for (unsigned Part = 0; Part < State.UF; ++Part)
|
|
|
|
for (unsigned Lane = 0; Lane < EndLane; ++Lane)
|
|
|
|
State.ILV->scalarizeInstruction(Ingredient, {Part, Lane}, IsPredicated);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
|
|
|
|
assert(State.Instance && "Branch on Mask works only on single instance.");
|
|
|
|
|
|
|
|
unsigned Part = State.Instance->Part;
|
|
|
|
unsigned Lane = State.Instance->Lane;
|
|
|
|
|
2017-11-20 12:01:47 +00:00
|
|
|
Value *ConditionBit = nullptr;
|
|
|
|
if (!User) // Block in mask is all-one.
|
2017-08-27 12:55:46 +00:00
|
|
|
ConditionBit = State.Builder.getTrue();
|
2017-11-20 12:01:47 +00:00
|
|
|
else {
|
|
|
|
VPValue *BlockInMask = User->getOperand(0);
|
|
|
|
ConditionBit = State.get(BlockInMask, Part);
|
|
|
|
if (ConditionBit->getType()->isVectorTy())
|
|
|
|
ConditionBit = State.Builder.CreateExtractElement(
|
|
|
|
ConditionBit, State.Builder.getInt32(Lane));
|
|
|
|
}
|
2017-08-27 12:55:46 +00:00
|
|
|
|
|
|
|
// Replace the temporary unreachable terminator with a new conditional branch,
|
|
|
|
// whose two destinations will be set later when they are created.
|
|
|
|
auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
|
|
|
|
assert(isa<UnreachableInst>(CurrentTerminator) &&
|
|
|
|
"Expected to replace unreachable terminator with conditional branch.");
|
|
|
|
auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
|
|
|
|
CondBr->setSuccessor(0, nullptr);
|
|
|
|
ReplaceInstWithInst(CurrentTerminator, CondBr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VPPredInstPHIRecipe::execute(VPTransformState &State) {
|
|
|
|
assert(State.Instance && "Predicated instruction PHI works per instance.");
|
|
|
|
Instruction *ScalarPredInst = cast<Instruction>(
|
|
|
|
State.ValueMap.getScalarValue(PredInst, *State.Instance));
|
|
|
|
BasicBlock *PredicatedBB = ScalarPredInst->getParent();
|
|
|
|
BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
|
|
|
|
assert(PredicatingBB && "Predicated block has no single predecessor.");
|
|
|
|
|
|
|
|
// By current pack/unpack logic we need to generate only a single phi node: if
|
|
|
|
// a vector value for the predicated instruction exists at this point it means
|
|
|
|
// the instruction has vector users only, and a phi for the vector value is
|
|
|
|
// needed. In this case the recipe of the predicated instruction is marked to
|
|
|
|
// also do that packing, thereby "hoisting" the insert-element sequence.
|
|
|
|
// Otherwise, a phi node for the scalar value is needed.
|
|
|
|
unsigned Part = State.Instance->Part;
|
|
|
|
if (State.ValueMap.hasVectorValue(PredInst, Part)) {
|
|
|
|
Value *VectorValue = State.ValueMap.getVectorValue(PredInst, Part);
|
|
|
|
InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
|
|
|
|
PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
|
|
|
|
VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
|
|
|
|
VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
|
|
|
|
State.ValueMap.resetVectorValue(PredInst, Part, VPhi); // Update cache.
|
|
|
|
} else {
|
|
|
|
Type *PredInstType = PredInst->getType();
|
|
|
|
PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
|
|
|
|
Phi->addIncoming(UndefValue::get(ScalarPredInst->getType()), PredicatingBB);
|
|
|
|
Phi->addIncoming(ScalarPredInst, PredicatedBB);
|
|
|
|
State.ValueMap.resetScalarValue(PredInst, *State.Instance, Phi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-16 01:12:50 +00:00
|
|
|
void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
|
|
|
|
if (!User)
|
|
|
|
return State.ILV->vectorizeMemoryInstruction(&Instr);
|
|
|
|
|
|
|
|
// Last (and currently only) operand is a mask.
|
|
|
|
InnerLoopVectorizer::VectorParts MaskValues(State.UF);
|
|
|
|
VPValue *Mask = User->getOperand(User->getNumOperands() - 1);
|
|
|
|
for (unsigned Part = 0; Part < State.UF; ++Part)
|
|
|
|
MaskValues[Part] = State.get(Mask, Part);
|
|
|
|
State.ILV->vectorizeMemoryInstruction(&Instr, &MaskValues);
|
|
|
|
}
|
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
bool LoopVectorizePass::processLoop(Loop *L) {
|
|
|
|
assert(L->empty() && "Only process inner loops.");
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
const std::string DebugLocStr = getDebugLocString(L);
|
|
|
|
#endif /* NDEBUG */
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "\nLV: Checking a loop in \""
|
|
|
|
<< L->getHeader()->getParent()->getName() << "\" from "
|
|
|
|
<< DebugLocStr << "\n");
|
|
|
|
|
2016-07-20 21:44:26 +00:00
|
|
|
LoopVectorizeHints Hints(L, DisableUnrolling, *ORE);
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "LV: Loop hints:"
|
|
|
|
<< " force="
|
|
|
|
<< (Hints.getForce() == LoopVectorizeHints::FK_Disabled
|
|
|
|
? "disabled"
|
|
|
|
: (Hints.getForce() == LoopVectorizeHints::FK_Enabled
|
|
|
|
? "enabled"
|
|
|
|
: "?"))
|
|
|
|
<< " width=" << Hints.getWidth()
|
|
|
|
<< " unroll=" << Hints.getInterleave() << "\n");
|
|
|
|
|
|
|
|
// Function containing loop
|
|
|
|
Function *F = L->getHeader()->getParent();
|
|
|
|
|
|
|
|
// Looking at the diagnostic output is the only way to determine if a loop
|
|
|
|
// was vectorized (other than looking at the IR or machine code), so it
|
|
|
|
// is important to generate an optimization remark for each loop. Most of
|
2016-09-30 00:29:30 +00:00
|
|
|
// these messages are generated as OptimizationRemarkAnalysis. Remarks
|
|
|
|
// generated as OptimizationRemark and OptimizationRemarkMissed are
|
2016-07-09 22:56:50 +00:00
|
|
|
// less verbose reporting vectorized loops and unvectorized loops that may
|
|
|
|
// benefit from vectorization, respectively.
|
|
|
|
|
|
|
|
if (!Hints.allowVectorization(F, L, AlwaysVectorize)) {
|
|
|
|
DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-06-30 08:02:35 +00:00
|
|
|
PredicatedScalarEvolution PSE(*SE, *L);
|
|
|
|
|
|
|
|
// Check if it is legal to vectorize the loop.
|
|
|
|
LoopVectorizationRequirements Requirements(*ORE);
|
|
|
|
LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, GetLAA, LI, ORE,
|
2018-02-04 15:42:24 +00:00
|
|
|
&Requirements, &Hints, DB, AC);
|
2017-06-30 08:02:35 +00:00
|
|
|
if (!LVL.canVectorize()) {
|
|
|
|
DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
|
|
|
|
emitMissedWarning(F, L, Hints, ORE);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the function attributes to find out if this function should be
|
|
|
|
// optimized for size.
|
|
|
|
bool OptForSize =
|
|
|
|
Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
|
|
|
|
|
|
|
|
// Check the loop for a trip count threshold: vectorize loops with a tiny trip
|
|
|
|
// count by optimizing for size, to minimize overheads.
|
Verify profile data confirms large loop trip counts.
Summary:
Loops with inequality comparers, such as:
// unsigned bound
for (unsigned i = 1; i < bound; ++i) {...}
have getSmallConstantMaxTripCount report a large maximum static
trip count - in this case, 0xffff fffe. However, profiling info
may show that the trip count is much smaller, and thus
counter-recommend vectorization.
This change:
- flips loop-vectorize-with-block-frequency on by default.
- validates profiled loop frequency data supports vectorization,
when static info appears to not counter-recommend it. Absence
of profile data means we rely on static data, just as we've
done so far.
Reviewers: twoh, mkuper, davidxl, tejohnson, Ayal
Reviewed By: davidxl
Subscribers: bkramer, llvm-commits
Differential Revision: https://reviews.llvm.org/D42946
llvm-svn: 324543
2018-02-07 23:29:52 +00:00
|
|
|
// Prefer constant trip counts over profile data, over upper bound estimate.
|
|
|
|
unsigned ExpectedTC = 0;
|
|
|
|
bool HasExpectedTC = false;
|
|
|
|
if (const SCEVConstant *ConstExits =
|
|
|
|
dyn_cast<SCEVConstant>(SE->getBackedgeTakenCount(L))) {
|
|
|
|
const APInt &ExitsCount = ConstExits->getAPInt();
|
|
|
|
// We are interested in small values for ExpectedTC. Skip over those that
|
|
|
|
// can't fit an unsigned.
|
|
|
|
if (ExitsCount.ult(std::numeric_limits<unsigned>::max())) {
|
|
|
|
ExpectedTC = static_cast<unsigned>(ExitsCount.getZExtValue()) + 1;
|
|
|
|
HasExpectedTC = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// ExpectedTC may be large because it's bound by a variable. Check
|
|
|
|
// profiling information to validate we should vectorize.
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
if (!HasExpectedTC && LoopVectorizeWithBlockFrequency) {
|
|
|
|
auto EstimatedTC = getLoopEstimatedTripCount(L);
|
|
|
|
if (EstimatedTC) {
|
|
|
|
ExpectedTC = *EstimatedTC;
|
|
|
|
HasExpectedTC = true;
|
|
|
|
}
|
|
|
|
}
|
Verify profile data confirms large loop trip counts.
Summary:
Loops with inequality comparers, such as:
// unsigned bound
for (unsigned i = 1; i < bound; ++i) {...}
have getSmallConstantMaxTripCount report a large maximum static
trip count - in this case, 0xffff fffe. However, profiling info
may show that the trip count is much smaller, and thus
counter-recommend vectorization.
This change:
- flips loop-vectorize-with-block-frequency on by default.
- validates profiled loop frequency data supports vectorization,
when static info appears to not counter-recommend it. Absence
of profile data means we rely on static data, just as we've
done so far.
Reviewers: twoh, mkuper, davidxl, tejohnson, Ayal
Reviewed By: davidxl
Subscribers: bkramer, llvm-commits
Differential Revision: https://reviews.llvm.org/D42946
llvm-svn: 324543
2018-02-07 23:29:52 +00:00
|
|
|
if (!HasExpectedTC) {
|
|
|
|
ExpectedTC = SE->getSmallConstantMaxTripCount(L);
|
|
|
|
HasExpectedTC = (ExpectedTC > 0);
|
|
|
|
}
|
Improve profile-guided heuristics to use estimated trip count.
Summary:
Existing heuristic uses the ratio between the function entry
frequency and the loop invocation frequency to find cold loops. However,
even if the loop executes frequently, if it has a small trip count per
each invocation, vectorization is not beneficial. On the other hand,
even if the loop invocation frequency is much smaller than the function
invocation frequency, if the trip count is high it is still beneficial
to vectorize the loop.
This patch uses estimated trip count computed from the profile metadata
as a primary metric to determine coldness of the loop. If the estimated
trip count cannot be computed, it falls back to the original heuristics.
Reviewers: Ayal, mssimpso, mkuper, danielcdh, wmi, tejohnson
Reviewed By: tejohnson
Subscribers: tejohnson, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32451
llvm-svn: 305729
2017-06-19 18:48:58 +00:00
|
|
|
|
|
|
|
if (HasExpectedTC && ExpectedTC < TinyTripCountVectorThreshold) {
|
2016-07-09 22:56:50 +00:00
|
|
|
DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
|
2017-06-30 08:02:35 +00:00
|
|
|
<< "This loop is worth vectorizing only if no scalar "
|
|
|
|
<< "iteration overheads are incurred.");
|
2016-07-09 22:56:50 +00:00
|
|
|
if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
|
|
|
|
DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
|
|
|
|
else {
|
|
|
|
DEBUG(dbgs() << "\n");
|
2017-06-30 08:02:35 +00:00
|
|
|
// Loops with a very small trip count are considered for vectorization
|
|
|
|
// under OptForSize, thereby making sure the cost of their loop body is
|
|
|
|
// dominant, free of runtime guards and scalar iteration overheads.
|
|
|
|
OptForSize = true;
|
2016-07-09 22:56:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the function attributes to see if implicit floats are allowed.
|
|
|
|
// FIXME: This check doesn't seem possibly correct -- what if the loop is
|
|
|
|
// an integer loop and the vector instructions selected are purely integer
|
|
|
|
// vector instructions?
|
|
|
|
if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
|
|
|
|
DEBUG(dbgs() << "LV: Can't vectorize when the NoImplicitFloat"
|
|
|
|
"attribute is used.\n");
|
2016-09-29 17:55:13 +00:00
|
|
|
ORE->emit(createMissedAnalysis(Hints.vectorizeAnalysisPassName(),
|
|
|
|
"NoImplicitFloat", L)
|
|
|
|
<< "loop not vectorized due to NoImplicitFloat attribute");
|
2016-07-20 04:03:43 +00:00
|
|
|
emitMissedWarning(F, L, Hints, ORE);
|
2016-07-09 22:56:50 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the target supports potentially unsafe FP vectorization.
|
|
|
|
// FIXME: Add a check for the type of safety issue (denormal, signaling)
|
|
|
|
// for the target we're vectorizing for, to make sure none of the
|
|
|
|
// additional fp-math flags can help.
|
|
|
|
if (Hints.isPotentiallyUnsafe() &&
|
|
|
|
TTI->isFPVectorizationPotentiallyUnsafe()) {
|
|
|
|
DEBUG(dbgs() << "LV: Potentially unsafe FP op prevents vectorization.\n");
|
2016-09-29 17:55:13 +00:00
|
|
|
ORE->emit(
|
|
|
|
createMissedAnalysis(Hints.vectorizeAnalysisPassName(), "UnsafeFP", L)
|
|
|
|
<< "loop not vectorized due to unsafe FP support.");
|
2016-07-20 04:03:43 +00:00
|
|
|
emitMissedWarning(F, L, Hints, ORE);
|
2016-07-09 22:56:50 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-14 13:07:04 +00:00
|
|
|
// Use the cost model.
|
|
|
|
LoopVectorizationCostModel CM(L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, F,
|
|
|
|
&Hints);
|
|
|
|
CM.collectValuesToIgnore();
|
|
|
|
|
|
|
|
// Use the planner for vectorization.
|
2017-08-27 12:55:46 +00:00
|
|
|
LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM);
|
2017-03-14 13:07:04 +00:00
|
|
|
|
|
|
|
// Get user vectorization factor.
|
|
|
|
unsigned UserVF = Hints.getWidth();
|
|
|
|
|
|
|
|
// Plan how to best vectorize, return the best VF and its cost.
|
2018-01-07 16:02:58 +00:00
|
|
|
VectorizationFactor VF = LVP.plan(OptForSize, UserVF);
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
// Select the interleave count.
|
|
|
|
unsigned IC = CM.selectInterleaveCount(OptForSize, VF.Width, VF.Cost);
|
|
|
|
|
|
|
|
// Get user interleave count.
|
|
|
|
unsigned UserIC = Hints.getInterleave();
|
|
|
|
|
|
|
|
// Identify the diagnostic messages that should be produced.
|
2016-09-30 00:29:30 +00:00
|
|
|
std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
|
2016-07-09 22:56:50 +00:00
|
|
|
bool VectorizeLoop = true, InterleaveLoop = true;
|
|
|
|
if (Requirements.doesNotMeet(F, L, Hints)) {
|
|
|
|
DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization "
|
|
|
|
"requirements.\n");
|
2016-07-20 04:03:43 +00:00
|
|
|
emitMissedWarning(F, L, Hints, ORE);
|
2016-07-09 22:56:50 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VF.Width == 1) {
|
|
|
|
DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
|
2016-09-30 00:29:30 +00:00
|
|
|
VecDiagMsg = std::make_pair(
|
|
|
|
"VectorizationNotBeneficial",
|
|
|
|
"the cost-model indicates that vectorization is not beneficial");
|
2016-07-09 22:56:50 +00:00
|
|
|
VectorizeLoop = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IC == 1 && UserIC <= 1) {
|
|
|
|
// Tell the user interleaving is not beneficial.
|
|
|
|
DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
|
2016-09-30 00:29:30 +00:00
|
|
|
IntDiagMsg = std::make_pair(
|
|
|
|
"InterleavingNotBeneficial",
|
|
|
|
"the cost-model indicates that interleaving is not beneficial");
|
2016-07-09 22:56:50 +00:00
|
|
|
InterleaveLoop = false;
|
2016-09-30 00:29:30 +00:00
|
|
|
if (UserIC == 1) {
|
|
|
|
IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
|
|
|
|
IntDiagMsg.second +=
|
2016-07-09 22:56:50 +00:00
|
|
|
" and is explicitly disabled or interleave count is set to 1";
|
2016-09-30 00:29:30 +00:00
|
|
|
}
|
2016-07-09 22:56:50 +00:00
|
|
|
} else if (IC > 1 && UserIC == 1) {
|
|
|
|
// Tell the user interleaving is beneficial, but it explicitly disabled.
|
|
|
|
DEBUG(dbgs()
|
|
|
|
<< "LV: Interleaving is beneficial but is explicitly disabled.");
|
2016-09-30 00:29:30 +00:00
|
|
|
IntDiagMsg = std::make_pair(
|
|
|
|
"InterleavingBeneficialButDisabled",
|
|
|
|
"the cost-model indicates that interleaving is beneficial "
|
|
|
|
"but is explicitly disabled or interleave count is set to 1");
|
2016-07-09 22:56:50 +00:00
|
|
|
InterleaveLoop = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Override IC if user provided an interleave count.
|
|
|
|
IC = UserIC > 0 ? UserIC : IC;
|
|
|
|
|
|
|
|
// Emit diagnostic messages, if any.
|
|
|
|
const char *VAPassName = Hints.vectorizeAnalysisPassName();
|
|
|
|
if (!VectorizeLoop && !InterleaveLoop) {
|
|
|
|
// Do not vectorize or interleaving the loop.
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
|
|
|
|
L->getStartLoc(), L->getHeader())
|
|
|
|
<< VecDiagMsg.second;
|
|
|
|
});
|
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
|
|
|
|
L->getStartLoc(), L->getHeader())
|
|
|
|
<< IntDiagMsg.second;
|
|
|
|
});
|
2016-07-09 22:56:50 +00:00
|
|
|
return false;
|
|
|
|
} else if (!VectorizeLoop && InterleaveLoop) {
|
|
|
|
DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
|
|
|
|
L->getStartLoc(), L->getHeader())
|
|
|
|
<< VecDiagMsg.second;
|
|
|
|
});
|
2016-07-09 22:56:50 +00:00
|
|
|
} else if (VectorizeLoop && !InterleaveLoop) {
|
|
|
|
DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
|
|
|
|
<< DebugLocStr << '\n');
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
|
|
|
|
L->getStartLoc(), L->getHeader())
|
|
|
|
<< IntDiagMsg.second;
|
|
|
|
});
|
2016-07-09 22:56:50 +00:00
|
|
|
} else if (VectorizeLoop && InterleaveLoop) {
|
|
|
|
DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width << ") in "
|
|
|
|
<< DebugLocStr << '\n');
|
|
|
|
DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
|
|
|
|
}
|
|
|
|
|
2017-08-27 12:55:46 +00:00
|
|
|
LVP.setBestPlan(VF.Width, IC);
|
|
|
|
|
2016-09-30 00:29:30 +00:00
|
|
|
using namespace ore;
|
2017-10-12 23:30:03 +00:00
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
if (!VectorizeLoop) {
|
|
|
|
assert(IC > 1 && "interleave count should not be 1 or 0");
|
|
|
|
// If we decided that it is not legal to vectorize the loop, then
|
|
|
|
// interleave it.
|
2016-12-19 08:22:17 +00:00
|
|
|
InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
|
|
|
|
&CM);
|
2017-08-27 12:55:46 +00:00
|
|
|
LVP.executePlan(Unroller, DT);
|
2016-07-09 22:56:50 +00:00
|
|
|
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
|
|
|
|
L->getHeader())
|
|
|
|
<< "interleaved loop (interleaved count: "
|
|
|
|
<< NV("InterleaveCount", IC) << ")";
|
|
|
|
});
|
2016-07-09 22:56:50 +00:00
|
|
|
} else {
|
|
|
|
// If we decided that it is *legal* to vectorize the loop, then do it.
|
2016-12-19 08:22:17 +00:00
|
|
|
InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
|
|
|
|
&LVL, &CM);
|
2017-08-27 12:55:46 +00:00
|
|
|
LVP.executePlan(LB, DT);
|
2016-07-09 22:56:50 +00:00
|
|
|
++LoopsVectorized;
|
|
|
|
|
|
|
|
// Add metadata to disable runtime unrolling a scalar loop when there are
|
|
|
|
// no runtime checks about strides and memory. A scalar loop that is
|
|
|
|
// rarely used is not worth unrolling.
|
|
|
|
if (!LB.areSafetyChecksAdded())
|
|
|
|
AddRuntimeUnrollDisableMetaData(L);
|
|
|
|
|
|
|
|
// Report the vectorization decision.
|
2017-10-11 17:12:59 +00:00
|
|
|
ORE->emit([&]() {
|
|
|
|
return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
|
|
|
|
L->getHeader())
|
|
|
|
<< "vectorized loop (vectorization width: "
|
|
|
|
<< NV("VectorizationFactor", VF.Width)
|
|
|
|
<< ", interleaved count: " << NV("InterleaveCount", IC) << ")";
|
|
|
|
});
|
2016-07-09 22:56:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the loop as already vectorized to avoid vectorizing again.
|
|
|
|
Hints.setAlreadyVectorized();
|
|
|
|
|
|
|
|
DEBUG(verifyFunction(*L->getHeader()->getParent()));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LoopVectorizePass::runImpl(
|
|
|
|
Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
|
|
|
|
DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
|
2016-12-19 08:22:17 +00:00
|
|
|
DemandedBits &DB_, AliasAnalysis &AA_, AssumptionCache &AC_,
|
2016-07-20 04:03:43 +00:00
|
|
|
std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
|
|
|
|
OptimizationRemarkEmitter &ORE_) {
|
2016-07-09 22:56:50 +00:00
|
|
|
SE = &SE_;
|
|
|
|
LI = &LI_;
|
|
|
|
TTI = &TTI_;
|
|
|
|
DT = &DT_;
|
|
|
|
BFI = &BFI_;
|
|
|
|
TLI = TLI_;
|
|
|
|
AA = &AA_;
|
2016-12-19 08:22:17 +00:00
|
|
|
AC = &AC_;
|
2016-07-09 22:56:50 +00:00
|
|
|
GetLAA = &GetLAA_;
|
|
|
|
DB = &DB_;
|
2016-07-20 04:03:43 +00:00
|
|
|
ORE = &ORE_;
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
// Don't attempt if
|
|
|
|
// 1. the target claims to have no vector registers, and
|
|
|
|
// 2. interleaving won't help ILP.
|
|
|
|
//
|
|
|
|
// The second condition is necessary because, even if the target has no
|
|
|
|
// vector registers, loop vectorization may still enable scalar
|
|
|
|
// interleaving.
|
|
|
|
if (!TTI->getNumberOfRegisters(true) && TTI->getMaxInterleaveFactor(1) < 2)
|
|
|
|
return false;
|
|
|
|
|
2017-01-19 00:42:28 +00:00
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// The vectorizer requires loops to be in simplified form.
|
|
|
|
// Since simplification may add new inner loops, it has to run before the
|
|
|
|
// legality and profitability checks. This means running the loop vectorizer
|
|
|
|
// will simplify all loops, regardless of whether anything end up being
|
|
|
|
// vectorized.
|
|
|
|
for (auto &L : *LI)
|
|
|
|
Changed |= simplifyLoop(L, DT, LI, SE, AC, false /* PreserveLCSSA */);
|
|
|
|
|
2016-07-09 22:56:50 +00:00
|
|
|
// Build up a worklist of inner-loops to vectorize. This is necessary as
|
|
|
|
// the act of vectorizing or partially unrolling a loop creates new loops
|
|
|
|
// and can invalidate iterators across the loops.
|
|
|
|
SmallVector<Loop *, 8> Worklist;
|
|
|
|
|
|
|
|
for (Loop *L : *LI)
|
2018-03-02 12:24:25 +00:00
|
|
|
addAcyclicInnerLoop(*L, *LI, Worklist);
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
LoopsAnalyzed += Worklist.size();
|
|
|
|
|
|
|
|
// Now walk the identified inner loops.
|
2017-01-26 10:41:09 +00:00
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Loop *L = Worklist.pop_back_val();
|
|
|
|
|
|
|
|
// For the inner loops we actually process, form LCSSA to simplify the
|
|
|
|
// transform.
|
|
|
|
Changed |= formLCSSARecursively(*L, *DT, LI, SE);
|
|
|
|
|
|
|
|
Changed |= processLoop(L);
|
|
|
|
}
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
// Process each loop nest in the function.
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
PreservedAnalyses LoopVectorizePass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
|
|
|
auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
|
|
|
|
auto &LI = AM.getResult<LoopAnalysis>(F);
|
|
|
|
auto &TTI = AM.getResult<TargetIRAnalysis>(F);
|
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
|
2017-01-11 06:23:21 +00:00
|
|
|
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
|
2016-07-09 22:56:50 +00:00
|
|
|
auto &AA = AM.getResult<AAManager>(F);
|
2016-12-19 08:22:17 +00:00
|
|
|
auto &AC = AM.getResult<AssumptionAnalysis>(F);
|
2016-07-09 22:56:50 +00:00
|
|
|
auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
|
2016-07-20 04:03:43 +00:00
|
|
|
auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
|
2016-07-09 22:56:50 +00:00
|
|
|
|
|
|
|
auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
|
|
|
|
std::function<const LoopAccessInfo &(Loop &)> GetLAA =
|
|
|
|
[&](Loop &L) -> const LoopAccessInfo & {
|
2017-11-21 15:45:46 +00:00
|
|
|
LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, nullptr};
|
2017-01-11 06:23:21 +00:00
|
|
|
return LAM.getResult<LoopAccessAnalysis>(L, AR);
|
2016-07-09 22:56:50 +00:00
|
|
|
};
|
2016-07-20 04:03:43 +00:00
|
|
|
bool Changed =
|
2017-01-11 06:23:21 +00:00
|
|
|
runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE);
|
2016-07-09 22:56:50 +00:00
|
|
|
if (!Changed)
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
PreservedAnalyses PA;
|
|
|
|
PA.preserve<LoopAnalysis>();
|
|
|
|
PA.preserve<DominatorTreeAnalysis>();
|
|
|
|
PA.preserve<BasicAA>();
|
|
|
|
PA.preserve<GlobalsAA>();
|
|
|
|
return PA;
|
|
|
|
}
|