2018-09-12 10:21:23 -07:00
|
|
|
//===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This file implements miscellaneous loop analysis routines.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/Analysis/LoopAnalysis.h"
|
|
|
|
|
2019-01-28 21:23:53 -08:00
|
|
|
#include "mlir/AffineOps/AffineOps.h"
|
2018-09-12 10:21:23 -07:00
|
|
|
#include "mlir/Analysis/AffineAnalysis.h"
|
2019-02-22 16:51:08 -08:00
|
|
|
#include "mlir/Analysis/AffineStructures.h"
|
2019-01-26 06:59:23 -08:00
|
|
|
#include "mlir/Analysis/NestedMatcher.h"
|
2018-11-20 08:36:07 -08:00
|
|
|
#include "mlir/Analysis/VectorAnalysis.h"
|
2019-03-12 08:00:52 -07:00
|
|
|
#include "mlir/IR/AffineMap.h"
|
2018-10-17 18:01:44 -07:00
|
|
|
#include "mlir/IR/Builders.h"
|
2019-03-26 14:45:38 -07:00
|
|
|
#include "mlir/IR/Operation.h"
|
2019-03-01 13:48:24 -08:00
|
|
|
#include "mlir/StandardOps/Ops.h"
|
2018-12-14 09:31:17 -08:00
|
|
|
#include "mlir/SuperVectorOps/SuperVectorOps.h"
|
2018-10-30 07:54:23 -07:00
|
|
|
#include "mlir/Support/Functional.h"
|
2018-10-03 10:07:54 -07:00
|
|
|
#include "mlir/Support/MathExtras.h"
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
|
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2018-11-14 04:04:10 -08:00
|
|
|
#include "llvm/ADT/SmallString.h"
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
#include <type_traits>
|
2018-09-12 10:21:23 -07:00
|
|
|
|
2018-10-03 15:39:12 -07:00
|
|
|
using namespace mlir;
|
2018-09-12 10:21:23 -07:00
|
|
|
|
|
|
|
/// Returns the trip count of the loop as an affine expression if the latter is
|
|
|
|
/// expressible as an affine expression, and nullptr otherwise. The trip count
|
2019-03-12 08:00:52 -07:00
|
|
|
/// expression is simplified before returning. This method only utilizes map
|
|
|
|
/// composition to construct lower and upper bounds before computing the trip
|
|
|
|
/// count expressions.
|
|
|
|
// TODO(mlir-team): this should be moved into 'Transforms/' and be replaced by a
|
|
|
|
// pure analysis method relying on FlatAffineConstraints; the latter will also
|
|
|
|
// be more powerful (since both inequalities and equalities will be considered).
|
|
|
|
void mlir::buildTripCountMapAndOperands(
|
2019-03-24 19:53:05 -07:00
|
|
|
AffineForOp forOp, AffineMap *map,
|
2019-03-12 08:00:52 -07:00
|
|
|
SmallVectorImpl<Value *> *tripCountOperands) {
|
2018-09-12 10:21:23 -07:00
|
|
|
int64_t loopSpan;
|
|
|
|
|
2019-03-25 11:13:31 -07:00
|
|
|
int64_t step = forOp.getStep();
|
2019-03-26 17:05:09 -07:00
|
|
|
FuncBuilder b(forOp.getOperation());
|
2018-09-12 10:21:23 -07:00
|
|
|
|
2019-03-25 11:13:31 -07:00
|
|
|
if (forOp.hasConstantBounds()) {
|
|
|
|
int64_t lb = forOp.getConstantLowerBound();
|
|
|
|
int64_t ub = forOp.getConstantUpperBound();
|
2018-11-07 05:44:50 -08:00
|
|
|
loopSpan = ub - lb;
|
2019-03-12 08:00:52 -07:00
|
|
|
if (loopSpan < 0)
|
|
|
|
loopSpan = 0;
|
|
|
|
*map = b.getConstantAffineMap(ceilDiv(loopSpan, step));
|
|
|
|
tripCountOperands->clear();
|
|
|
|
return;
|
2018-09-12 10:21:23 -07:00
|
|
|
}
|
2019-03-25 11:13:31 -07:00
|
|
|
auto lbMap = forOp.getLowerBoundMap();
|
|
|
|
auto ubMap = forOp.getUpperBoundMap();
|
2019-03-12 08:00:52 -07:00
|
|
|
if (lbMap.getNumResults() != 1) {
|
|
|
|
*map = AffineMap();
|
|
|
|
return;
|
|
|
|
}
|
2019-03-25 11:13:31 -07:00
|
|
|
SmallVector<Value *, 4> lbOperands(forOp.getLowerBoundOperands());
|
|
|
|
SmallVector<Value *, 4> ubOperands(forOp.getUpperBoundOperands());
|
|
|
|
auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
|
2019-03-12 08:00:52 -07:00
|
|
|
SmallVector<Value *, 4> ubs;
|
|
|
|
ubs.reserve(ubMap.getNumResults());
|
|
|
|
for (auto ubExpr : ubMap.getResults())
|
|
|
|
ubs.push_back(b.create<AffineApplyOp>(
|
2019-03-25 11:13:31 -07:00
|
|
|
forOp.getLoc(),
|
2019-03-12 08:00:52 -07:00
|
|
|
b.getAffineMap(ubMap.getNumDims(), ubMap.getNumSymbols(), {ubExpr}, {}),
|
|
|
|
ubOperands));
|
|
|
|
|
|
|
|
tripCountOperands->clear();
|
|
|
|
tripCountOperands->reserve(1 + ubs.size());
|
|
|
|
tripCountOperands->push_back(lb);
|
|
|
|
tripCountOperands->append(ubs.begin(), ubs.end());
|
|
|
|
|
|
|
|
SmallVector<AffineExpr, 4> tripCountExprs(ubs.size());
|
|
|
|
for (unsigned i = 0, e = ubs.size(); i < e; i++)
|
|
|
|
tripCountExprs[i] =
|
|
|
|
(b.getAffineDimExpr(1 + i) - b.getAffineDimExpr(0)).ceilDiv(step);
|
|
|
|
*map = b.getAffineMap(1 + ubs.size(), 0, tripCountExprs, {});
|
2019-03-12 09:40:04 -07:00
|
|
|
|
2019-03-12 08:00:52 -07:00
|
|
|
fullyComposeAffineMapAndOperands(map, tripCountOperands);
|
|
|
|
*map = simplifyAffineMap(*map);
|
|
|
|
canonicalizeMapAndOperands(map, tripCountOperands);
|
|
|
|
// Remove any affine.apply's that became dead as a result of composition,
|
|
|
|
// simplification, and canonicalization above.
|
|
|
|
for (auto *v : ubs)
|
|
|
|
if (v->use_empty())
|
2019-03-26 17:05:09 -07:00
|
|
|
v->getDefiningOp()->erase();
|
2019-03-25 11:13:31 -07:00
|
|
|
if (lb.use_empty())
|
|
|
|
lb.erase();
|
2018-09-12 10:21:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the trip count of the loop if it's a constant, None otherwise. This
|
|
|
|
/// method uses affine expression analysis (in turn using getTripCount) and is
|
|
|
|
/// able to determine constant trip count in non-trivial cases.
|
2019-03-12 08:00:52 -07:00
|
|
|
// FIXME(mlir-team): this is really relying on buildTripCountMapAndOperands;
|
|
|
|
// being an analysis utility, it shouldn't. Replace with a version that just
|
|
|
|
// works with analysis structures (FlatAffineConstraints) and thus doesn't
|
|
|
|
// update the IR.
|
2019-03-24 19:53:05 -07:00
|
|
|
llvm::Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
|
2019-03-12 08:00:52 -07:00
|
|
|
SmallVector<Value *, 4> operands;
|
|
|
|
AffineMap map;
|
|
|
|
buildTripCountMapAndOperands(forOp, &map, &operands);
|
2018-09-12 10:21:23 -07:00
|
|
|
|
2019-03-12 08:00:52 -07:00
|
|
|
if (!map)
|
2018-10-08 08:09:50 -07:00
|
|
|
return None;
|
|
|
|
|
2019-03-12 08:00:52 -07:00
|
|
|
// Take the min if all trip counts are constant.
|
|
|
|
Optional<uint64_t> tripCount;
|
|
|
|
for (auto resultExpr : map.getResults()) {
|
|
|
|
if (auto constExpr = resultExpr.dyn_cast<AffineConstantExpr>()) {
|
|
|
|
if (tripCount.hasValue())
|
|
|
|
tripCount = std::min(tripCount.getValue(),
|
|
|
|
static_cast<uint64_t>(constExpr.getValue()));
|
|
|
|
else
|
|
|
|
tripCount = constExpr.getValue();
|
|
|
|
} else
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
return tripCount;
|
2018-09-12 10:21:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the greatest known integral divisor of the trip count. Affine
|
|
|
|
/// expression analysis is used (indirectly through getTripCount), and
|
|
|
|
/// this method is thus able to determine non-trivial divisors.
|
2019-03-24 19:53:05 -07:00
|
|
|
uint64_t mlir::getLargestDivisorOfTripCount(AffineForOp forOp) {
|
2019-03-12 08:00:52 -07:00
|
|
|
SmallVector<Value *, 4> operands;
|
|
|
|
AffineMap map;
|
|
|
|
buildTripCountMapAndOperands(forOp, &map, &operands);
|
2018-09-12 10:21:23 -07:00
|
|
|
|
2019-03-12 08:00:52 -07:00
|
|
|
if (!map)
|
2018-09-12 10:21:23 -07:00
|
|
|
return 1;
|
|
|
|
|
2019-03-12 08:00:52 -07:00
|
|
|
// The largest divisor of the trip count is the GCD of the individual largest
|
|
|
|
// divisors.
|
|
|
|
assert(map.getNumResults() >= 1 && "expected one or more results");
|
|
|
|
Optional<uint64_t> gcd;
|
|
|
|
for (auto resultExpr : map.getResults()) {
|
|
|
|
uint64_t thisGcd;
|
|
|
|
if (auto constExpr = resultExpr.dyn_cast<AffineConstantExpr>()) {
|
|
|
|
uint64_t tripCount = constExpr.getValue();
|
|
|
|
// 0 iteration loops (greatest divisor is 2^64 - 1).
|
|
|
|
if (tripCount == 0)
|
|
|
|
thisGcd = std::numeric_limits<uint64_t>::max();
|
|
|
|
else
|
|
|
|
// The greatest divisor is the trip count.
|
|
|
|
thisGcd = tripCount;
|
|
|
|
} else {
|
|
|
|
// Trip count is not a known constant; return its largest known divisor.
|
|
|
|
thisGcd = resultExpr.getLargestKnownDivisor();
|
|
|
|
}
|
|
|
|
if (gcd.hasValue())
|
|
|
|
gcd = llvm::GreatestCommonDivisor64(gcd.getValue(), thisGcd);
|
|
|
|
else
|
|
|
|
gcd = thisGcd;
|
2018-09-12 10:21:23 -07:00
|
|
|
}
|
2019-03-12 08:00:52 -07:00
|
|
|
assert(gcd.hasValue() && "value expected per above logic");
|
|
|
|
return gcd.getValue();
|
2018-09-12 10:21:23 -07:00
|
|
|
}
|
2018-10-17 18:01:44 -07:00
|
|
|
|
2019-03-23 15:09:06 -07:00
|
|
|
bool mlir::isAccessInvariant(Value &iv, Value &index) {
|
2019-02-01 16:42:18 -08:00
|
|
|
assert(isForInductionVar(&iv) && "iv must be a AffineForOp");
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
assert(index.getType().isa<IndexType>() && "index must be of IndexType");
|
2019-02-03 10:03:46 -08:00
|
|
|
SmallVector<Instruction *, 4> affineApplyOps;
|
2019-03-24 23:26:39 -07:00
|
|
|
getReachableAffineApplyOps({&index}, affineApplyOps);
|
2018-10-17 18:01:44 -07:00
|
|
|
|
|
|
|
if (affineApplyOps.empty()) {
|
2018-12-27 14:35:10 -08:00
|
|
|
// Pointer equality test because of Value pointer semantics.
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
return &index != &iv;
|
2018-10-17 18:01:44 -07:00
|
|
|
}
|
|
|
|
|
2018-12-06 11:38:09 -08:00
|
|
|
if (affineApplyOps.size() > 1) {
|
|
|
|
affineApplyOps[0]->emitError(
|
|
|
|
"CompositionAffineMapsPass must have been run: there should be at most "
|
|
|
|
"one AffineApplyOp");
|
|
|
|
return false;
|
|
|
|
}
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
|
2018-10-19 09:07:58 -07:00
|
|
|
auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
|
2018-10-30 07:54:23 -07:00
|
|
|
// We need yet another level of indirection because the `dim` index of the
|
|
|
|
// access may not correspond to the `dim` index of composeOp.
|
2019-03-24 23:26:39 -07:00
|
|
|
return !(AffineValueMap(composeOp).isFunctionOf(0, &iv));
|
2018-10-17 18:01:44 -07:00
|
|
|
}
|
|
|
|
|
2019-03-23 15:09:06 -07:00
|
|
|
llvm::DenseSet<Value *>
|
|
|
|
mlir::getInvariantAccesses(Value &iv, llvm::ArrayRef<Value *> indices) {
|
|
|
|
llvm::DenseSet<Value *> res;
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) {
|
|
|
|
auto *val = indices[idx];
|
|
|
|
if (isAccessInvariant(iv, *val)) {
|
|
|
|
res.insert(val);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Given:
|
2019-02-01 16:42:18 -08:00
|
|
|
/// 1. an induction variable `iv` of type AffineForOp;
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
|
|
|
|
/// 3. the index of the `fastestVaryingDim` along which to check;
|
|
|
|
/// determines whether `memoryOp`[`fastestVaryingDim`] is a contiguous access
|
|
|
|
/// along `iv`.
|
|
|
|
/// Contiguous is defined as either invariant or varying only along
|
|
|
|
/// `fastestVaryingDim`.
|
|
|
|
///
|
|
|
|
/// Prerequisites:
|
|
|
|
/// 1. `iv` of the proper type;
|
|
|
|
/// 2. the MemRef accessed by `memoryOp` has no layout map or at most an
|
|
|
|
/// identity layout map.
|
|
|
|
///
|
2018-12-06 11:38:09 -08:00
|
|
|
/// Currently only supports no layoutMap or identity layoutMap in the MemRef.
|
|
|
|
/// Returns false if the MemRef has a non-identity layoutMap or more than
|
|
|
|
/// 1 layoutMap. This is conservative.
|
|
|
|
///
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
// TODO(ntv): check strides.
|
|
|
|
template <typename LoadOrStoreOp>
|
2019-03-24 19:53:05 -07:00
|
|
|
static bool isContiguousAccess(Value &iv, LoadOrStoreOp memoryOp,
|
2018-10-30 07:54:23 -07:00
|
|
|
unsigned fastestVaryingDim) {
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
static_assert(std::is_same<LoadOrStoreOp, LoadOp>::value ||
|
|
|
|
std::is_same<LoadOrStoreOp, StoreOp>::value,
|
|
|
|
"Must be called on either const LoadOp & or const StoreOp &");
|
2019-03-25 13:02:06 -07:00
|
|
|
auto memRefType = memoryOp.getMemRefType();
|
2018-12-27 08:16:39 -08:00
|
|
|
if (fastestVaryingDim >= memRefType.getRank()) {
|
2019-03-25 13:02:06 -07:00
|
|
|
memoryOp.emitError("fastest varying dim out of bounds");
|
2018-12-27 08:16:39 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
auto layoutMap = memRefType.getAffineMaps();
|
2018-12-06 11:38:09 -08:00
|
|
|
// TODO(ntv): remove dependence on Builder once we support non-identity
|
|
|
|
// layout map.
|
2019-03-25 00:29:00 -07:00
|
|
|
Builder b(memoryOp.getContext());
|
2018-12-06 11:38:09 -08:00
|
|
|
if (layoutMap.size() >= 2 ||
|
|
|
|
(layoutMap.size() == 1 &&
|
|
|
|
!(layoutMap[0] ==
|
|
|
|
b.getMultiDimIdentityMap(layoutMap[0].getNumDims())))) {
|
2019-03-25 13:02:06 -07:00
|
|
|
return memoryOp.emitError("NYI: non-trivial layoutMap"), false;
|
2018-12-06 11:38:09 -08:00
|
|
|
}
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
|
2019-03-25 13:02:06 -07:00
|
|
|
auto indices = memoryOp.getIndices();
|
2018-12-27 08:16:39 -08:00
|
|
|
auto numIndices = llvm::size(indices);
|
[MLIR] Add support for permutation_map
This CL hooks up and uses permutation_map in vector_transfer ops.
In particular, when going into the nuts and bolts of the implementation, it
became clear that cases arose that required supporting broadcast semantics.
Broadcast semantics are thus added to the general permutation_map.
The verify methods and tests are updated accordingly.
Examples of interest include.
Example 1:
The following MLIR snippet:
```mlir
for %i3 = 0 to %M {
for %i4 = 0 to %N {
for %i5 = 0 to %P {
%a5 = load %A[%i4, %i5, %i3] : memref<?x?x?xf32>
}}}
```
may vectorize with {permutation_map: (d0, d1, d2) -> (d2, d1)} into:
```mlir
for %i3 = 0 to %0 step 32 {
for %i4 = 0 to %1 {
for %i5 = 0 to %2 step 256 {
%4 = vector_transfer_read %arg0, %i4, %i5, %i3
{permutation_map: (d0, d1, d2) -> (d2, d1)} :
(memref<?x?x?xf32>, index, index) -> vector<32x256xf32>
}}}
````
Meaning that vector_transfer_read will be responsible for reading the 2-D slice:
`%arg0[%i4, %i5:%15+256, %i3:%i3+32]` into vector<32x256xf32>. This will
require a transposition when vector_transfer_read is further lowered.
Example 2:
The following MLIR snippet:
```mlir
%cst0 = constant 0 : index
for %i0 = 0 to %M {
%a0 = load %A[%cst0, %cst0] : memref<?x?xf32>
}
```
may vectorize with {permutation_map: (d0) -> (0)} into:
```mlir
for %i0 = 0 to %0 step 128 {
%3 = vector_transfer_read %arg0, %c0_0, %c0_0
{permutation_map: (d0, d1) -> (0)} :
(memref<?x?xf32>, index, index) -> vector<128xf32>
}
````
Meaning that vector_transfer_read will be responsible of reading the 0-D slice
`%arg0[%c0, %c0]` into vector<128xf32>. This will require a 1-D vector
broadcast when vector_transfer_read is further lowered.
Additionally, some minor cleanups and refactorings are performed.
One notable thing missing here is the composition with a projection map during
materialization. This is because I could not find an AffineMap composition
that operates on AffineMap directly: everything related to composition seems
to require going through SSAValue and only operates on AffinMap at a distance
via AffineValueMap. I have raised this concern a bunch of times already, the
followup CL will actually do something about it.
In the meantime, the projection is hacked at a minimum to pass verification
and materialiation tests are temporarily incorrect.
PiperOrigin-RevId: 224376828
2018-12-06 11:37:25 -08:00
|
|
|
unsigned d = 0;
|
|
|
|
for (auto index : indices) {
|
|
|
|
if (fastestVaryingDim == (numIndices - 1) - d++) {
|
2018-10-30 07:54:23 -07:00
|
|
|
continue;
|
|
|
|
}
|
2018-12-27 14:35:10 -08:00
|
|
|
if (!isAccessInvariant(iv, *index)) {
|
2018-10-17 18:01:44 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-10-30 07:54:23 -07:00
|
|
|
template <typename LoadOrStoreOpPointer>
|
|
|
|
static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
|
2019-03-25 13:02:06 -07:00
|
|
|
auto memRefType = memoryOp.getMemRefType();
|
2018-10-30 14:59:22 -07:00
|
|
|
return memRefType.getElementType().template isa<VectorType>();
|
2018-10-30 07:54:23 -07:00
|
|
|
}
|
|
|
|
|
2019-03-23 15:09:06 -07:00
|
|
|
static bool isVectorTransferReadOrWrite(Instruction &inst) {
|
2019-02-03 10:03:46 -08:00
|
|
|
return inst.isa<VectorTransferReadOp>() || inst.isa<VectorTransferWriteOp>();
|
2018-11-14 04:04:10 -08:00
|
|
|
}
|
|
|
|
|
2019-03-24 19:53:05 -07:00
|
|
|
using VectorizableInstFun = std::function<bool(AffineForOp, Instruction &)>;
|
[MLIR] Extend vectorization to 2+-D patterns
This CL adds support for vectorization using more interesting 2-D and 3-D
patterns. Note in particular the fact that we match some pretty complex
imperfectly nested 2-D patterns with a quite minimal change to the
implementation: we just add a bit of recursion to traverse the matched
patterns and actually vectorize the loops.
For instance, vectorizing the following loop by 128:
```
for %i3 = 0 to %0 {
%7 = affine_apply (d0) -> (d0)(%i3)
%8 = load %arg0[%c0_0, %7] : memref<?x?xf32>
}
```
Currently generates:
```
#map0 = ()[s0] -> (s0 + 127)
#map1 = (d0) -> (d0)
for %i3 = 0 to #map0()[%0] step 128 {
%9 = affine_apply #map1(%i3)
%10 = alloc() : memref<1xvector<128xf32>>
%11 = "n_d_unaligned_load"(%arg0, %c0_0, %9, %10, %c0) :
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index) ->
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index)
%12 = load %10[%c0] : memref<1xvector<128xf32>>
}
```
The above is subject to evolution.
PiperOrigin-RevId: 219629745
2018-11-01 07:14:14 -07:00
|
|
|
|
2019-03-24 19:53:05 -07:00
|
|
|
static bool isVectorizableLoopWithCond(AffineForOp loop,
|
2018-12-28 16:05:35 -08:00
|
|
|
VectorizableInstFun isVectorizableInst) {
|
2019-03-26 17:05:09 -07:00
|
|
|
auto *forInst = loop.getOperation();
|
2019-02-01 16:42:18 -08:00
|
|
|
if (!matcher::isParallelLoop(*forInst) &&
|
|
|
|
!matcher::isReductionLoop(*forInst)) {
|
2018-10-30 07:54:23 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No vectorization across conditionals for now.
|
|
|
|
auto conditionals = matcher::If();
|
2019-01-31 07:16:29 -08:00
|
|
|
SmallVector<NestedMatch, 8> conditionalsMatched;
|
|
|
|
conditionals.match(forInst, &conditionalsMatched);
|
2018-10-30 07:54:23 -07:00
|
|
|
if (!conditionalsMatched.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-28 21:23:53 -08:00
|
|
|
// No vectorization across unknown regions.
|
2019-03-23 15:09:06 -07:00
|
|
|
auto regions = matcher::Op([](Instruction &inst) -> bool {
|
2019-03-14 10:38:44 -07:00
|
|
|
return inst.getNumRegions() != 0 &&
|
2019-02-03 10:03:46 -08:00
|
|
|
!(inst.isa<AffineIfOp>() || inst.isa<AffineForOp>());
|
2019-01-28 21:23:53 -08:00
|
|
|
});
|
2019-01-31 07:16:29 -08:00
|
|
|
SmallVector<NestedMatch, 8> regionsMatched;
|
|
|
|
regions.match(forInst, ®ionsMatched);
|
2019-01-28 21:23:53 -08:00
|
|
|
if (!regionsMatched.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-14 04:04:10 -08:00
|
|
|
auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite);
|
2019-01-31 07:16:29 -08:00
|
|
|
SmallVector<NestedMatch, 8> vectorTransfersMatched;
|
|
|
|
vectorTransfers.match(forInst, &vectorTransfersMatched);
|
2018-11-14 04:04:10 -08:00
|
|
|
if (!vectorTransfersMatched.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-30 07:54:23 -07:00
|
|
|
auto loadAndStores = matcher::Op(matcher::isLoadOrStore);
|
2019-01-31 07:16:29 -08:00
|
|
|
SmallVector<NestedMatch, 8> loadAndStoresMatched;
|
|
|
|
loadAndStores.match(forInst, &loadAndStoresMatched);
|
2018-10-30 07:54:23 -07:00
|
|
|
for (auto ls : loadAndStoresMatched) {
|
2019-02-03 10:03:46 -08:00
|
|
|
auto *op = ls.getMatchedInstruction();
|
2018-10-19 09:07:58 -07:00
|
|
|
auto load = op->dyn_cast<LoadOp>();
|
|
|
|
auto store = op->dyn_cast<StoreOp>();
|
2018-10-30 07:54:23 -07:00
|
|
|
// Only scalar types are considered vectorizable, all load/store must be
|
|
|
|
// vectorizable for a loop to qualify as vectorizable.
|
|
|
|
// TODO(ntv): ponder whether we want to be more general here.
|
|
|
|
bool vector = load ? isVectorElement(load) : isVectorElement(store);
|
|
|
|
if (vector) {
|
|
|
|
return false;
|
|
|
|
}
|
2018-12-28 16:05:35 -08:00
|
|
|
if (!isVectorizableInst(loop, *op)) {
|
2018-10-17 18:01:44 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2018-10-18 11:14:26 -07:00
|
|
|
|
[MLIR] Extend vectorization to 2+-D patterns
This CL adds support for vectorization using more interesting 2-D and 3-D
patterns. Note in particular the fact that we match some pretty complex
imperfectly nested 2-D patterns with a quite minimal change to the
implementation: we just add a bit of recursion to traverse the matched
patterns and actually vectorize the loops.
For instance, vectorizing the following loop by 128:
```
for %i3 = 0 to %0 {
%7 = affine_apply (d0) -> (d0)(%i3)
%8 = load %arg0[%c0_0, %7] : memref<?x?xf32>
}
```
Currently generates:
```
#map0 = ()[s0] -> (s0 + 127)
#map1 = (d0) -> (d0)
for %i3 = 0 to #map0()[%0] step 128 {
%9 = affine_apply #map1(%i3)
%10 = alloc() : memref<1xvector<128xf32>>
%11 = "n_d_unaligned_load"(%arg0, %c0_0, %9, %10, %c0) :
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index) ->
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index)
%12 = load %10[%c0] : memref<1xvector<128xf32>>
}
```
The above is subject to evolution.
PiperOrigin-RevId: 219629745
2018-11-01 07:14:14 -07:00
|
|
|
bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
|
2019-03-24 19:53:05 -07:00
|
|
|
AffineForOp loop, unsigned fastestVaryingDim) {
|
2019-03-23 15:09:06 -07:00
|
|
|
VectorizableInstFun fun(
|
2019-03-24 19:53:05 -07:00
|
|
|
[fastestVaryingDim](AffineForOp loop, Instruction &op) {
|
2019-03-23 15:09:06 -07:00
|
|
|
auto load = op.dyn_cast<LoadOp>();
|
|
|
|
auto store = op.dyn_cast<StoreOp>();
|
2019-03-25 11:13:31 -07:00
|
|
|
return load ? isContiguousAccess(*loop.getInductionVar(), load,
|
2019-03-23 15:09:06 -07:00
|
|
|
fastestVaryingDim)
|
2019-03-25 11:13:31 -07:00
|
|
|
: isContiguousAccess(*loop.getInductionVar(), store,
|
2019-03-23 15:09:06 -07:00
|
|
|
fastestVaryingDim);
|
|
|
|
});
|
[MLIR] Extend vectorization to 2+-D patterns
This CL adds support for vectorization using more interesting 2-D and 3-D
patterns. Note in particular the fact that we match some pretty complex
imperfectly nested 2-D patterns with a quite minimal change to the
implementation: we just add a bit of recursion to traverse the matched
patterns and actually vectorize the loops.
For instance, vectorizing the following loop by 128:
```
for %i3 = 0 to %0 {
%7 = affine_apply (d0) -> (d0)(%i3)
%8 = load %arg0[%c0_0, %7] : memref<?x?xf32>
}
```
Currently generates:
```
#map0 = ()[s0] -> (s0 + 127)
#map1 = (d0) -> (d0)
for %i3 = 0 to #map0()[%0] step 128 {
%9 = affine_apply #map1(%i3)
%10 = alloc() : memref<1xvector<128xf32>>
%11 = "n_d_unaligned_load"(%arg0, %c0_0, %9, %10, %c0) :
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index) ->
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index)
%12 = load %10[%c0] : memref<1xvector<128xf32>>
}
```
The above is subject to evolution.
PiperOrigin-RevId: 219629745
2018-11-01 07:14:14 -07:00
|
|
|
return isVectorizableLoopWithCond(loop, fun);
|
|
|
|
}
|
|
|
|
|
2019-03-24 19:53:05 -07:00
|
|
|
bool mlir::isVectorizableLoop(AffineForOp loop) {
|
2018-12-28 16:05:35 -08:00
|
|
|
VectorizableInstFun fun(
|
[MLIR] Extend vectorization to 2+-D patterns
This CL adds support for vectorization using more interesting 2-D and 3-D
patterns. Note in particular the fact that we match some pretty complex
imperfectly nested 2-D patterns with a quite minimal change to the
implementation: we just add a bit of recursion to traverse the matched
patterns and actually vectorize the loops.
For instance, vectorizing the following loop by 128:
```
for %i3 = 0 to %0 {
%7 = affine_apply (d0) -> (d0)(%i3)
%8 = load %arg0[%c0_0, %7] : memref<?x?xf32>
}
```
Currently generates:
```
#map0 = ()[s0] -> (s0 + 127)
#map1 = (d0) -> (d0)
for %i3 = 0 to #map0()[%0] step 128 {
%9 = affine_apply #map1(%i3)
%10 = alloc() : memref<1xvector<128xf32>>
%11 = "n_d_unaligned_load"(%arg0, %c0_0, %9, %10, %c0) :
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index) ->
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index)
%12 = load %10[%c0] : memref<1xvector<128xf32>>
}
```
The above is subject to evolution.
PiperOrigin-RevId: 219629745
2018-11-01 07:14:14 -07:00
|
|
|
// TODO: implement me
|
2019-03-24 19:53:05 -07:00
|
|
|
[](AffineForOp loop, Instruction &op) { return true; });
|
[MLIR] Extend vectorization to 2+-D patterns
This CL adds support for vectorization using more interesting 2-D and 3-D
patterns. Note in particular the fact that we match some pretty complex
imperfectly nested 2-D patterns with a quite minimal change to the
implementation: we just add a bit of recursion to traverse the matched
patterns and actually vectorize the loops.
For instance, vectorizing the following loop by 128:
```
for %i3 = 0 to %0 {
%7 = affine_apply (d0) -> (d0)(%i3)
%8 = load %arg0[%c0_0, %7] : memref<?x?xf32>
}
```
Currently generates:
```
#map0 = ()[s0] -> (s0 + 127)
#map1 = (d0) -> (d0)
for %i3 = 0 to #map0()[%0] step 128 {
%9 = affine_apply #map1(%i3)
%10 = alloc() : memref<1xvector<128xf32>>
%11 = "n_d_unaligned_load"(%arg0, %c0_0, %9, %10, %c0) :
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index) ->
(memref<?x?xf32>, index, index, memref<1xvector<128xf32>>, index)
%12 = load %10[%c0] : memref<1xvector<128xf32>>
}
```
The above is subject to evolution.
PiperOrigin-RevId: 219629745
2018-11-01 07:14:14 -07:00
|
|
|
return isVectorizableLoopWithCond(loop, fun);
|
|
|
|
}
|
|
|
|
|
2018-12-28 16:05:35 -08:00
|
|
|
/// Checks whether SSA dominance would be violated if a for inst's body
|
|
|
|
/// instructions are shifted by the specified shifts. This method checks if a
|
2018-10-18 11:14:26 -07:00
|
|
|
/// 'def' and all its uses have the same shift factor.
|
|
|
|
// TODO(mlir-team): extend this to check for memory-based dependence
|
|
|
|
// violation when we have the support.
|
2019-03-24 19:53:05 -07:00
|
|
|
bool mlir::isInstwiseShiftValid(AffineForOp forOp, ArrayRef<uint64_t> shifts) {
|
2019-03-25 11:13:31 -07:00
|
|
|
auto *forBody = forOp.getBody();
|
2019-03-26 17:05:09 -07:00
|
|
|
assert(shifts.size() == forBody->getOperations().size());
|
2019-02-06 10:54:57 -08:00
|
|
|
|
2019-02-06 11:36:16 -08:00
|
|
|
// Work backwards over the body of the block so that the shift of a use's
|
|
|
|
// ancestor instruction in the block gets recorded before it's looked up.
|
2019-03-23 15:09:06 -07:00
|
|
|
DenseMap<Instruction *, uint64_t> forBodyShift;
|
2019-03-26 17:05:09 -07:00
|
|
|
for (auto it : llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
|
2019-03-23 15:09:06 -07:00
|
|
|
auto &inst = it.value();
|
2019-02-06 10:54:57 -08:00
|
|
|
|
|
|
|
// Get the index of the current instruction, note that we are iterating in
|
|
|
|
// reverse so we need to fix it up.
|
|
|
|
size_t index = shifts.size() - it.index() - 1;
|
|
|
|
|
|
|
|
// Remember the shift of this instruction.
|
|
|
|
uint64_t shift = shifts[index];
|
|
|
|
forBodyShift.try_emplace(&inst, shift);
|
|
|
|
|
|
|
|
// Validate the results of this instruction if it were to be shifted.
|
2019-02-03 10:03:46 -08:00
|
|
|
for (unsigned i = 0, e = inst.getNumResults(); i < e; ++i) {
|
2019-03-23 15:09:06 -07:00
|
|
|
Value *result = inst.getResult(i);
|
2019-02-03 10:03:46 -08:00
|
|
|
for (const InstOperand &use : result->getUses()) {
|
|
|
|
// If an ancestor instruction doesn't lie in the block of forOp,
|
2019-02-06 10:54:57 -08:00
|
|
|
// there is no shift to check.
|
2019-02-06 11:36:16 -08:00
|
|
|
if (auto *ancInst = forBody->findAncestorInstInBlock(*use.getOwner())) {
|
|
|
|
assert(forBodyShift.count(ancInst) > 0 && "ancestor expected in map");
|
2019-02-06 10:54:57 -08:00
|
|
|
if (shift != forBodyShift[ancInst])
|
2019-02-03 10:03:46 -08:00
|
|
|
return false;
|
2019-02-06 11:36:16 -08:00
|
|
|
}
|
2018-10-18 11:14:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|