2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-10-12 23:14:57 +00:00
|
|
|
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
|
2022-01-26 12:56:18 -08:00
|
|
|
#include "mlir/Dialect/Arithmetic/Utils/Utils.h"
|
2021-02-10 13:53:11 +01:00
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
2021-03-31 09:34:03 +02:00
|
|
|
#include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
|
2021-06-27 15:15:44 +09:00
|
|
|
#include "mlir/Dialect/Utils/StaticValueUtils.h"
|
2021-02-10 13:53:11 +01:00
|
|
|
#include "mlir/IR/AffineMap.h"
|
|
|
|
#include "mlir/IR/Builders.h"
|
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
|
|
|
#include "mlir/IR/Matchers.h"
|
|
|
|
#include "mlir/IR/PatternMatch.h"
|
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
2021-03-29 10:57:23 -07:00
|
|
|
#include "mlir/Interfaces/InferTypeOpInterface.h"
|
2022-02-23 14:08:51 -05:00
|
|
|
#include "mlir/Interfaces/SideEffectInterfaces.h"
|
2021-05-25 11:35:14 +02:00
|
|
|
#include "mlir/Interfaces/ViewLikeInterface.h"
|
2021-02-10 13:53:11 +01:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2022-02-06 14:06:34 +01:00
|
|
|
#include "llvm/ADT/SmallBitVector.h"
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::memref;
|
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
namespace {
|
|
|
|
/// Idiomatic saturated operations on offsets, sizes and strides.
|
|
|
|
namespace saturated_arith {
|
|
|
|
struct Wrapper {
|
|
|
|
static Wrapper stride(int64_t v) {
|
|
|
|
return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
|
|
|
|
: Wrapper{false, v};
|
|
|
|
}
|
|
|
|
static Wrapper offset(int64_t v) {
|
|
|
|
return (ShapedType::isDynamicStrideOrOffset(v)) ? Wrapper{true, 0}
|
|
|
|
: Wrapper{false, v};
|
|
|
|
}
|
|
|
|
static Wrapper size(int64_t v) {
|
|
|
|
return (ShapedType::isDynamic(v)) ? Wrapper{true, 0} : Wrapper{false, v};
|
|
|
|
}
|
|
|
|
int64_t asOffset() {
|
|
|
|
return saturated ? ShapedType::kDynamicStrideOrOffset : v;
|
|
|
|
}
|
|
|
|
int64_t asSize() { return saturated ? ShapedType::kDynamicSize : v; }
|
|
|
|
int64_t asStride() {
|
|
|
|
return saturated ? ShapedType::kDynamicStrideOrOffset : v;
|
|
|
|
}
|
|
|
|
bool operator==(Wrapper other) {
|
|
|
|
return (saturated && other.saturated) ||
|
|
|
|
(!saturated && !other.saturated && v == other.v);
|
|
|
|
}
|
|
|
|
bool operator!=(Wrapper other) { return !(*this == other); }
|
|
|
|
Wrapper operator+(Wrapper other) {
|
|
|
|
if (saturated || other.saturated)
|
|
|
|
return Wrapper{true, 0};
|
|
|
|
return Wrapper{false, other.v + v};
|
|
|
|
}
|
|
|
|
Wrapper operator*(Wrapper other) {
|
|
|
|
if (saturated || other.saturated)
|
|
|
|
return Wrapper{true, 0};
|
|
|
|
return Wrapper{false, other.v * v};
|
|
|
|
}
|
|
|
|
bool saturated;
|
|
|
|
int64_t v;
|
|
|
|
};
|
|
|
|
} // namespace saturated_arith
|
|
|
|
} // namespace
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
/// Materialize a single constant operation from a given attribute value with
|
|
|
|
/// the desired resultant type.
|
|
|
|
Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
|
|
|
|
Attribute value, Type type,
|
|
|
|
Location loc) {
|
2021-10-12 23:14:57 +00:00
|
|
|
if (arith::ConstantOp::isBuildableWith(value, type))
|
|
|
|
return builder.create<arith::ConstantOp>(loc, value, type);
|
|
|
|
return nullptr;
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Common canonicalization pattern support logic
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// This is a common class used for patterns of the form
|
|
|
|
/// "someop(memrefcast) -> someop". It folds the source of any memref.cast
|
|
|
|
/// into the root operation directly.
|
2021-11-25 11:42:16 +01:00
|
|
|
LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) {
|
2021-02-10 13:53:11 +01:00
|
|
|
bool folded = false;
|
|
|
|
for (OpOperand &operand : op->getOpOperands()) {
|
|
|
|
auto cast = operand.get().getDefiningOp<CastOp>();
|
2021-06-07 13:44:07 -04:00
|
|
|
if (cast && operand.get() != inner &&
|
|
|
|
!cast.getOperand().getType().isa<UnrankedMemRefType>()) {
|
2021-02-10 13:53:11 +01:00
|
|
|
operand.set(cast.getOperand());
|
|
|
|
folded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return success(folded);
|
|
|
|
}
|
|
|
|
|
2021-11-25 11:42:16 +01:00
|
|
|
/// Return an unranked/ranked tensor type for the given unranked/ranked memref
|
|
|
|
/// type.
|
|
|
|
Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
|
2021-02-10 13:53:11 +01:00
|
|
|
if (auto memref = type.dyn_cast<MemRefType>())
|
|
|
|
return RankedTensorType::get(memref.getShape(), memref.getElementType());
|
|
|
|
if (auto memref = type.dyn_cast<UnrankedMemRefType>())
|
|
|
|
return UnrankedTensorType::get(memref.getElementType());
|
|
|
|
return NoneType::get(type.getContext());
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AllocOp / AllocaOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
template <typename AllocLikeOp>
|
|
|
|
static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
|
|
|
|
static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
|
|
|
|
"applies to only alloc or alloca");
|
|
|
|
auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
|
|
|
|
if (!memRefType)
|
|
|
|
return op.emitOpError("result must be a memref");
|
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
if (static_cast<int64_t>(op.getDynamicSizes().size()) !=
|
2021-02-10 13:53:11 +01:00
|
|
|
memRefType.getNumDynamicDims())
|
|
|
|
return op.emitOpError("dimension operand count does not equal memref "
|
|
|
|
"dynamic dimension count");
|
|
|
|
|
|
|
|
unsigned numSymbols = 0;
|
2021-10-11 18:25:14 +03:00
|
|
|
if (!memRefType.getLayout().isIdentity())
|
|
|
|
numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols();
|
2022-07-10 21:19:11 -07:00
|
|
|
if (op.getSymbolOperands().size() != numSymbols)
|
2021-05-19 22:04:29 +03:00
|
|
|
return op.emitOpError("symbol operand count does not equal memref symbol "
|
|
|
|
"count: expected ")
|
2022-07-10 21:19:11 -07:00
|
|
|
<< numSymbols << ", got " << op.getSymbolOperands().size();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult AllocOp::verify() { return verifyAllocLikeOp(*this); }
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult AllocaOp::verify() {
|
2021-02-10 13:53:11 +01:00
|
|
|
// An alloca op needs to have an ancestor with an allocation scope trait.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (!(*this)->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
|
|
|
|
return emitOpError(
|
2021-02-10 13:53:11 +01:00
|
|
|
"requires an ancestor op with AutomaticAllocationScope trait");
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
return verifyAllocLikeOp(*this);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// Fold constant dimensions into an alloc like operation.
|
|
|
|
template <typename AllocLikeOp>
|
|
|
|
struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
|
|
|
|
using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(AllocLikeOp alloc,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Check to see if any dimensions operands are constants. If so, we can
|
|
|
|
// substitute and drop them.
|
2022-07-10 21:19:11 -07:00
|
|
|
if (llvm::none_of(alloc.getDynamicSizes(), [](Value operand) {
|
2021-02-10 13:53:11 +01:00
|
|
|
return matchPattern(operand, matchConstantIndex());
|
|
|
|
}))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto memrefType = alloc.getType();
|
|
|
|
|
|
|
|
// Ok, we have one or more constant operands. Collect the non-constant ones
|
|
|
|
// and keep track of the resultant memref type to build.
|
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
2021-05-19 22:04:29 +03:00
|
|
|
SmallVector<Value, 4> dynamicSizes;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
unsigned dynamicDimPos = 0;
|
|
|
|
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
|
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (dimSize != -1) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
2022-07-10 21:19:11 -07:00
|
|
|
auto dynamicSize = alloc.getDynamicSizes()[dynamicDimPos];
|
2021-05-19 22:04:29 +03:00
|
|
|
auto *defOp = dynamicSize.getDefiningOp();
|
2021-10-12 23:14:57 +00:00
|
|
|
if (auto constantIndexOp =
|
|
|
|
dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
|
2021-02-10 13:53:11 +01:00
|
|
|
// Dynamic shape dimension will be folded.
|
2021-10-12 23:14:57 +00:00
|
|
|
newShapeConstants.push_back(constantIndexOp.value());
|
2021-02-10 13:53:11 +01:00
|
|
|
} else {
|
2021-05-19 22:04:29 +03:00
|
|
|
// Dynamic shape dimension not folded; copy dynamicSize from old memref.
|
2021-02-10 13:53:11 +01:00
|
|
|
newShapeConstants.push_back(-1);
|
2021-05-19 22:04:29 +03:00
|
|
|
dynamicSizes.push_back(dynamicSize);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new memref type (which will have fewer dynamic dimensions).
|
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(memrefType).setShape(newShapeConstants);
|
2021-05-19 22:04:29 +03:00
|
|
|
assert(static_cast<int64_t>(dynamicSizes.size()) ==
|
2021-02-10 13:53:11 +01:00
|
|
|
newMemRefType.getNumDynamicDims());
|
|
|
|
|
|
|
|
// Create and insert the alloc op for the new memref.
|
2021-04-07 19:17:19 +00:00
|
|
|
auto newAlloc = rewriter.create<AllocLikeOp>(
|
2022-07-10 21:19:11 -07:00
|
|
|
alloc.getLoc(), newMemRefType, dynamicSizes, alloc.getSymbolOperands(),
|
|
|
|
alloc.getAlignmentAttr());
|
2021-02-10 13:53:11 +01:00
|
|
|
// Insert a cast so we have the same type as the old alloc.
|
|
|
|
auto resultCast =
|
2022-02-06 12:32:47 -08:00
|
|
|
rewriter.create<CastOp>(alloc.getLoc(), alloc.getType(), newAlloc);
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
rewriter.replaceOp(alloc, {resultCast});
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-04-10 19:38:11 +03:00
|
|
|
/// Fold alloc operations with no users or only store and dealloc uses.
|
|
|
|
template <typename T>
|
|
|
|
struct SimplifyDeadAlloc : public OpRewritePattern<T> {
|
|
|
|
using OpRewritePattern<T>::OpRewritePattern;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-04-10 19:38:11 +03:00
|
|
|
LogicalResult matchAndRewrite(T alloc,
|
2021-02-10 13:53:11 +01:00
|
|
|
PatternRewriter &rewriter) const override {
|
2021-06-25 16:16:23 -04:00
|
|
|
if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
|
2021-07-21 17:13:40 -07:00
|
|
|
if (auto storeOp = dyn_cast<StoreOp>(op))
|
2022-07-10 21:19:11 -07:00
|
|
|
return storeOp.getValue() == alloc;
|
2021-07-21 17:13:40 -07:00
|
|
|
return !isa<DeallocOp>(op);
|
2021-04-10 19:38:11 +03:00
|
|
|
}))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
|
|
|
|
rewriter.eraseOp(user);
|
|
|
|
|
|
|
|
rewriter.eraseOp(alloc);
|
|
|
|
return success();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
};
|
2021-12-07 18:27:58 +00:00
|
|
|
} // namespace
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-03-22 16:58:34 -07:00
|
|
|
void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
2021-02-10 13:53:11 +01:00
|
|
|
MLIRContext *context) {
|
2021-04-10 19:38:11 +03:00
|
|
|
results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2021-03-22 16:58:34 -07:00
|
|
|
void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
2021-02-10 13:53:11 +01:00
|
|
|
MLIRContext *context) {
|
2021-04-10 19:38:11 +03:00
|
|
|
results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
|
|
|
|
context);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2021-06-11 17:53:45 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AllocaScopeOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
void AllocaScopeOp::print(OpAsmPrinter &p) {
|
2021-06-11 17:53:45 +02:00
|
|
|
bool printBlockTerminators = false;
|
|
|
|
|
2022-01-18 07:47:25 +00:00
|
|
|
p << ' ';
|
2022-07-10 21:19:11 -07:00
|
|
|
if (!getResults().empty()) {
|
2022-02-07 17:54:04 -08:00
|
|
|
p << " -> (" << getResultTypes() << ")";
|
2021-06-11 17:53:45 +02:00
|
|
|
printBlockTerminators = true;
|
|
|
|
}
|
2022-01-18 07:47:25 +00:00
|
|
|
p << ' ';
|
2022-07-10 21:19:11 -07:00
|
|
|
p.printRegion(getBodyRegion(),
|
2021-06-11 17:53:45 +02:00
|
|
|
/*printEntryBlockArgs=*/false,
|
|
|
|
/*printBlockTerminators=*/printBlockTerminators);
|
2022-02-07 17:54:04 -08:00
|
|
|
p.printOptionalAttrDict((*this)->getAttrs());
|
2021-06-11 17:53:45 +02:00
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
|
2021-06-11 17:53:45 +02:00
|
|
|
// Create a region for the body.
|
|
|
|
result.regions.reserve(1);
|
|
|
|
Region *bodyRegion = result.addRegion();
|
|
|
|
|
|
|
|
// Parse optional results type list.
|
|
|
|
if (parser.parseOptionalArrowTypeList(result.types))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse the body region.
|
2022-04-28 17:26:43 -07:00
|
|
|
if (parser.parseRegion(*bodyRegion, /*arguments=*/{}))
|
2021-06-11 17:53:45 +02:00
|
|
|
return failure();
|
|
|
|
AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
|
|
|
|
result.location);
|
|
|
|
|
|
|
|
// Parse the optional attribute list.
|
|
|
|
if (parser.parseOptionalAttrDict(result.attributes))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
void AllocaScopeOp::getSuccessorRegions(
|
|
|
|
Optional<unsigned> index, ArrayRef<Attribute> operands,
|
|
|
|
SmallVectorImpl<RegionSuccessor> ®ions) {
|
2022-06-20 11:22:37 -07:00
|
|
|
if (index) {
|
2021-06-11 17:53:45 +02:00
|
|
|
regions.push_back(RegionSuccessor(getResults()));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
regions.push_back(RegionSuccessor(&getBodyRegion()));
|
2021-06-11 17:53:45 +02:00
|
|
|
}
|
|
|
|
|
2022-02-23 14:08:51 -05:00
|
|
|
/// Given an operation, return whether this op is guaranteed to
|
|
|
|
/// allocate an AutomaticAllocationScopeResource
|
2022-03-02 15:13:07 -05:00
|
|
|
static bool isGuaranteedAutomaticAllocation(Operation *op) {
|
2022-02-23 14:08:51 -05:00
|
|
|
MemoryEffectOpInterface interface = dyn_cast<MemoryEffectOpInterface>(op);
|
|
|
|
if (!interface)
|
|
|
|
return false;
|
|
|
|
for (auto res : op->getResults()) {
|
|
|
|
if (auto effect =
|
|
|
|
interface.getEffectOnValue<MemoryEffects::Allocate>(res)) {
|
|
|
|
if (isa<SideEffects::AutomaticAllocationScopeResource>(
|
|
|
|
effect->getResource()))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-03-02 15:13:07 -05:00
|
|
|
/// Given an operation, return whether this op itself could
|
|
|
|
/// allocate an AutomaticAllocationScopeResource. Note that
|
|
|
|
/// this will not check whether an operation contained within
|
|
|
|
/// the op can allocate.
|
|
|
|
static bool isOpItselfPotentialAutomaticAllocation(Operation *op) {
|
|
|
|
// This op itself doesn't create a stack allocation,
|
|
|
|
// the inner allocation should be handled separately.
|
|
|
|
if (op->hasTrait<OpTrait::HasRecursiveSideEffects>())
|
|
|
|
return false;
|
2022-02-23 14:08:51 -05:00
|
|
|
MemoryEffectOpInterface interface = dyn_cast<MemoryEffectOpInterface>(op);
|
|
|
|
if (!interface)
|
|
|
|
return true;
|
|
|
|
for (auto res : op->getResults()) {
|
|
|
|
if (auto effect =
|
|
|
|
interface.getEffectOnValue<MemoryEffects::Allocate>(res)) {
|
|
|
|
if (isa<SideEffects::AutomaticAllocationScopeResource>(
|
|
|
|
effect->getResource()))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return whether this op is the last non terminating op
|
|
|
|
/// in a region. That is to say, it is in a one-block region
|
|
|
|
/// and is only followed by a terminator. This prevents
|
|
|
|
/// extending the lifetime of allocations.
|
|
|
|
static bool lastNonTerminatorInRegion(Operation *op) {
|
|
|
|
return op->getNextNode() == op->getBlock()->getTerminator() &&
|
|
|
|
op->getParentRegion()->getBlocks().size() == 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Inline an AllocaScopeOp if either the direct parent is an allocation scope
|
|
|
|
/// or it contains no allocation.
|
|
|
|
struct AllocaScopeInliner : public OpRewritePattern<AllocaScopeOp> {
|
|
|
|
using OpRewritePattern<AllocaScopeOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(AllocaScopeOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2022-03-18 02:53:55 -04:00
|
|
|
bool hasPotentialAlloca =
|
|
|
|
op->walk<WalkOrder::PreOrder>([&](Operation *alloc) {
|
|
|
|
if (alloc == op)
|
2022-03-02 15:13:07 -05:00
|
|
|
return WalkResult::advance();
|
2022-03-18 02:53:55 -04:00
|
|
|
if (isOpItselfPotentialAutomaticAllocation(alloc))
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
if (alloc->hasTrait<OpTrait::AutomaticAllocationScope>())
|
|
|
|
return WalkResult::skip();
|
|
|
|
return WalkResult::advance();
|
|
|
|
}).wasInterrupted();
|
|
|
|
|
|
|
|
// If this contains no potential allocation, it is always legal to
|
|
|
|
// inline. Otherwise, consider two conditions:
|
|
|
|
if (hasPotentialAlloca) {
|
|
|
|
// If the parent isn't an allocation scope, or we are not the last
|
|
|
|
// non-terminator op in the parent, we will extend the lifetime.
|
|
|
|
if (!op->getParentOp()->hasTrait<OpTrait::AutomaticAllocationScope>())
|
|
|
|
return failure();
|
|
|
|
if (!lastNonTerminatorInRegion(op))
|
2022-02-23 14:08:51 -05:00
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
|
|
|
|
Block *block = &op.getRegion().front();
|
|
|
|
Operation *terminator = block->getTerminator();
|
|
|
|
ValueRange results = terminator->getOperands();
|
|
|
|
rewriter.mergeBlockBefore(block, op);
|
|
|
|
rewriter.replaceOp(op, results);
|
|
|
|
rewriter.eraseOp(terminator);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Move allocations into an allocation scope, if it is legal to
|
|
|
|
/// move them (e.g. their operands are available at the location
|
|
|
|
/// the op would be moved to).
|
|
|
|
struct AllocaScopeHoister : public OpRewritePattern<AllocaScopeOp> {
|
|
|
|
using OpRewritePattern<AllocaScopeOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(AllocaScopeOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
|
|
|
|
if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
Operation *lastParentWithoutScope = op->getParentOp();
|
|
|
|
|
|
|
|
if (!lastParentWithoutScope ||
|
|
|
|
lastParentWithoutScope->hasTrait<OpTrait::AutomaticAllocationScope>())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Only apply to if this is this last non-terminator
|
|
|
|
// op in the block (lest lifetime be extended) of a one
|
|
|
|
// block region
|
|
|
|
if (!lastNonTerminatorInRegion(op) ||
|
|
|
|
!lastNonTerminatorInRegion(lastParentWithoutScope))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
while (!lastParentWithoutScope->getParentOp()
|
|
|
|
->hasTrait<OpTrait::AutomaticAllocationScope>()) {
|
|
|
|
lastParentWithoutScope = lastParentWithoutScope->getParentOp();
|
|
|
|
if (!lastParentWithoutScope ||
|
|
|
|
!lastNonTerminatorInRegion(lastParentWithoutScope))
|
|
|
|
return failure();
|
|
|
|
}
|
2022-03-01 19:44:12 +00:00
|
|
|
assert(lastParentWithoutScope->getParentOp()
|
|
|
|
->hasTrait<OpTrait::AutomaticAllocationScope>());
|
2022-02-23 14:08:51 -05:00
|
|
|
|
|
|
|
Region *containingRegion = nullptr;
|
|
|
|
for (auto &r : lastParentWithoutScope->getRegions()) {
|
|
|
|
if (r.isAncestor(op->getParentRegion())) {
|
|
|
|
assert(containingRegion == nullptr &&
|
|
|
|
"only one region can contain the op");
|
|
|
|
containingRegion = &r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(containingRegion && "op must be contained in a region");
|
|
|
|
|
|
|
|
SmallVector<Operation *> toHoist;
|
|
|
|
op->walk([&](Operation *alloc) {
|
2022-03-02 15:13:07 -05:00
|
|
|
if (!isGuaranteedAutomaticAllocation(alloc))
|
2022-02-23 14:08:51 -05:00
|
|
|
return WalkResult::skip();
|
|
|
|
|
|
|
|
// If any operand is not defined before the location of
|
|
|
|
// lastParentWithoutScope (i.e. where we would hoist to), skip.
|
|
|
|
if (llvm::any_of(alloc->getOperands(), [&](Value v) {
|
|
|
|
return containingRegion->isAncestor(v.getParentRegion());
|
|
|
|
}))
|
|
|
|
return WalkResult::skip();
|
|
|
|
toHoist.push_back(alloc);
|
|
|
|
return WalkResult::advance();
|
|
|
|
});
|
|
|
|
|
2022-03-07 09:40:49 +01:00
|
|
|
if (toHoist.empty())
|
2022-02-23 14:08:51 -05:00
|
|
|
return failure();
|
|
|
|
rewriter.setInsertionPoint(lastParentWithoutScope);
|
2022-03-07 10:12:38 +00:00
|
|
|
for (auto *op : toHoist) {
|
|
|
|
auto *cloned = rewriter.clone(*op);
|
2022-02-23 14:08:51 -05:00
|
|
|
rewriter.replaceOp(op, cloned->getResults());
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void AllocaScopeOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.add<AllocaScopeInliner, AllocaScopeHoister>(context);
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AssumeAlignmentOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult AssumeAlignmentOp::verify() {
|
2022-07-10 21:19:11 -07:00
|
|
|
if (!llvm::isPowerOf2_32(getAlignment()))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("alignment must be power of 2");
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Determines whether MemRef_CastOp casts to a more dynamic version of the
|
|
|
|
/// source memref. This is useful to to fold a memref.cast into a consuming op
|
|
|
|
/// and implement canonicalization patterns for ops in different dialects that
|
|
|
|
/// may consume the results of memref.cast operations. Such foldable memref.cast
|
|
|
|
/// operations are typically inserted as `view` and `subview` ops are
|
|
|
|
/// canonicalized, to preserve the type compatibility of their uses.
|
|
|
|
///
|
|
|
|
/// Returns true when all conditions are met:
|
|
|
|
/// 1. source and result are ranked memrefs with strided semantics and same
|
|
|
|
/// element type and rank.
|
|
|
|
/// 2. each of the source's size, offset or stride has more static information
|
|
|
|
/// than the corresponding result's size, offset or stride.
|
|
|
|
///
|
|
|
|
/// Example 1:
|
|
|
|
/// ```mlir
|
|
|
|
/// %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
|
|
|
|
/// %2 = consumer %1 ... : memref<?x?xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// may fold into:
|
|
|
|
///
|
|
|
|
/// ```mlir
|
|
|
|
/// %2 = consumer %0 ... : memref<8x16xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// Example 2:
|
|
|
|
/// ```
|
|
|
|
/// %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
|
|
|
|
/// to memref<?x?xf32>
|
|
|
|
/// consumer %1 : memref<?x?xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// may fold into:
|
|
|
|
///
|
|
|
|
/// ```
|
|
|
|
/// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
|
|
|
|
/// ```
|
|
|
|
bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
|
2022-07-10 21:19:11 -07:00
|
|
|
MemRefType sourceType = castOp.getSource().getType().dyn_cast<MemRefType>();
|
2021-02-10 13:53:11 +01:00
|
|
|
MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
|
|
|
|
|
|
|
|
// Requires ranked MemRefType.
|
|
|
|
if (!sourceType || !resultType)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same elemental type.
|
|
|
|
if (sourceType.getElementType() != resultType.getElementType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same rank.
|
|
|
|
if (sourceType.getRank() != resultType.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only fold casts between strided memref forms.
|
|
|
|
int64_t sourceOffset, resultOffset;
|
|
|
|
SmallVector<int64_t, 4> sourceStrides, resultStrides;
|
|
|
|
if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
|
|
|
|
failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If cast is towards more static sizes along any dimension, don't fold.
|
|
|
|
for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
|
|
|
|
auto ss = std::get<0>(it), st = std::get<1>(it);
|
|
|
|
if (ss != st)
|
2022-01-10 10:55:57 -08:00
|
|
|
if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st))
|
2021-02-10 13:53:11 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If cast is towards more static offset along any dimension, don't fold.
|
|
|
|
if (sourceOffset != resultOffset)
|
2022-01-10 10:55:57 -08:00
|
|
|
if (ShapedType::isDynamicStrideOrOffset(sourceOffset) &&
|
|
|
|
!ShapedType::isDynamicStrideOrOffset(resultOffset))
|
2021-02-10 13:53:11 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If cast is towards more static strides along any dimension, don't fold.
|
|
|
|
for (auto it : llvm::zip(sourceStrides, resultStrides)) {
|
|
|
|
auto ss = std::get<0>(it), st = std::get<1>(it);
|
|
|
|
if (ss != st)
|
2022-01-10 10:55:57 -08:00
|
|
|
if (ShapedType::isDynamicStrideOrOffset(ss) &&
|
|
|
|
!ShapedType::isDynamicStrideOrOffset(st))
|
2021-02-10 13:53:11 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
|
|
|
|
if (inputs.size() != 1 || outputs.size() != 1)
|
|
|
|
return false;
|
|
|
|
Type a = inputs.front(), b = outputs.front();
|
|
|
|
auto aT = a.dyn_cast<MemRefType>();
|
|
|
|
auto bT = b.dyn_cast<MemRefType>();
|
|
|
|
|
|
|
|
auto uaT = a.dyn_cast<UnrankedMemRefType>();
|
|
|
|
auto ubT = b.dyn_cast<UnrankedMemRefType>();
|
|
|
|
|
|
|
|
if (aT && bT) {
|
|
|
|
if (aT.getElementType() != bT.getElementType())
|
|
|
|
return false;
|
2021-10-11 18:25:14 +03:00
|
|
|
if (aT.getLayout() != bT.getLayout()) {
|
2021-02-10 13:53:11 +01:00
|
|
|
int64_t aOffset, bOffset;
|
|
|
|
SmallVector<int64_t, 4> aStrides, bStrides;
|
|
|
|
if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
|
|
|
|
failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
|
|
|
|
aStrides.size() != bStrides.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Strides along a dimension/offset are compatible if the value in the
|
|
|
|
// source memref is static and the value in the target memref is the
|
|
|
|
// same. They are also compatible if either one is dynamic (see
|
|
|
|
// description of MemRefCastOp for details).
|
|
|
|
auto checkCompatible = [](int64_t a, int64_t b) {
|
|
|
|
return (a == MemRefType::getDynamicStrideOrOffset() ||
|
|
|
|
b == MemRefType::getDynamicStrideOrOffset() || a == b);
|
|
|
|
};
|
|
|
|
if (!checkCompatible(aOffset, bOffset))
|
|
|
|
return false;
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &aStride : enumerate(aStrides))
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
|
|
|
|
return false;
|
|
|
|
}
|
2021-03-23 11:45:24 +03:00
|
|
|
if (aT.getMemorySpace() != bT.getMemorySpace())
|
2021-02-10 13:53:11 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// They must have the same rank, and any specified dimensions must match.
|
|
|
|
if (aT.getRank() != bT.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
|
|
|
|
int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
|
|
|
|
if (aDim != -1 && bDim != -1 && aDim != bDim)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
if (!aT && !uaT)
|
|
|
|
return false;
|
|
|
|
if (!bT && !ubT)
|
|
|
|
return false;
|
|
|
|
// Unranked to unranked casting is unsupported
|
|
|
|
if (uaT && ubT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
|
|
|
|
auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
|
|
|
|
if (aEltType != bEltType)
|
|
|
|
return false;
|
|
|
|
|
2021-03-23 11:45:24 +03:00
|
|
|
auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
|
|
|
|
auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
|
2022-01-02 01:58:56 +00:00
|
|
|
return aMemSpace == bMemSpace;
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
|
|
|
|
}
|
|
|
|
|
2022-01-14 21:50:33 +09:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CopyOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// If the source/target of a CopyOp is a CastOp that does not modify the shape
|
|
|
|
/// and element type, the cast can be skipped. Such CastOps only cast the layout
|
|
|
|
/// of the type.
|
|
|
|
struct FoldCopyOfCast : public OpRewritePattern<CopyOp> {
|
|
|
|
using OpRewritePattern<CopyOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CopyOp copyOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
bool modified = false;
|
|
|
|
|
|
|
|
// Check source.
|
2022-07-10 21:19:11 -07:00
|
|
|
if (auto castOp = copyOp.getSource().getDefiningOp<CastOp>()) {
|
|
|
|
auto fromType = castOp.getSource().getType().dyn_cast<MemRefType>();
|
|
|
|
auto toType = castOp.getSource().getType().dyn_cast<MemRefType>();
|
2022-01-14 21:50:33 +09:00
|
|
|
|
|
|
|
if (fromType && toType) {
|
|
|
|
if (fromType.getShape() == toType.getShape() &&
|
|
|
|
fromType.getElementType() == toType.getElementType()) {
|
2022-07-10 21:19:11 -07:00
|
|
|
rewriter.updateRootInPlace(copyOp, [&] {
|
|
|
|
copyOp.getSourceMutable().assign(castOp.getSource());
|
|
|
|
});
|
2022-01-14 21:50:33 +09:00
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check target.
|
2022-07-10 21:19:11 -07:00
|
|
|
if (auto castOp = copyOp.getTarget().getDefiningOp<CastOp>()) {
|
|
|
|
auto fromType = castOp.getSource().getType().dyn_cast<MemRefType>();
|
|
|
|
auto toType = castOp.getSource().getType().dyn_cast<MemRefType>();
|
2022-01-14 21:50:33 +09:00
|
|
|
|
|
|
|
if (fromType && toType) {
|
|
|
|
if (fromType.getShape() == toType.getShape() &&
|
|
|
|
fromType.getElementType() == toType.getElementType()) {
|
2022-07-10 21:19:11 -07:00
|
|
|
rewriter.updateRootInPlace(copyOp, [&] {
|
|
|
|
copyOp.getTargetMutable().assign(castOp.getSource());
|
|
|
|
});
|
2022-01-14 21:50:33 +09:00
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return success(modified);
|
|
|
|
}
|
|
|
|
};
|
2022-01-14 22:31:01 +09:00
|
|
|
|
|
|
|
/// Fold memref.copy(%x, %x).
|
|
|
|
struct FoldSelfCopy : public OpRewritePattern<CopyOp> {
|
|
|
|
using OpRewritePattern<CopyOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CopyOp copyOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2022-07-10 21:19:11 -07:00
|
|
|
if (copyOp.getSource() != copyOp.getTarget())
|
2022-01-14 22:31:01 +09:00
|
|
|
return failure();
|
|
|
|
|
|
|
|
rewriter.eraseOp(copyOp);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2022-01-14 21:50:33 +09:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
|
|
|
MLIRContext *context) {
|
2022-01-14 22:31:01 +09:00
|
|
|
results.add<FoldCopyOfCast, FoldSelfCopy>(context);
|
2022-01-14 21:50:33 +09:00
|
|
|
}
|
|
|
|
|
2022-02-01 18:07:33 +01:00
|
|
|
LogicalResult CopyOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// copy(memrefcast) -> copy
|
|
|
|
bool folded = false;
|
|
|
|
Operation *op = *this;
|
|
|
|
for (OpOperand &operand : op->getOpOperands()) {
|
|
|
|
auto castOp = operand.get().getDefiningOp<memref::CastOp>();
|
|
|
|
if (castOp && memref::CastOp::canFoldIntoConsumerOp(castOp)) {
|
|
|
|
operand.set(castOp.getOperand());
|
|
|
|
folded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return success(folded);
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DeallocOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// dealloc(memrefcast) -> dealloc
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DimOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-07-01 09:58:48 +09:00
|
|
|
void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
|
2021-02-10 13:53:11 +01:00
|
|
|
int64_t index) {
|
|
|
|
auto loc = result.location;
|
2021-10-12 23:14:57 +00:00
|
|
|
Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
|
2021-07-01 09:58:48 +09:00
|
|
|
build(builder, result, source, indexValue);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2021-07-01 09:58:48 +09:00
|
|
|
void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
|
2021-02-10 13:53:11 +01:00
|
|
|
Value index) {
|
|
|
|
auto indexTy = builder.getIndexType();
|
2021-07-01 09:58:48 +09:00
|
|
|
build(builder, result, indexTy, source, index);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Optional<int64_t> DimOp::getConstantIndex() {
|
2022-07-10 21:19:11 -07:00
|
|
|
if (auto constantOp = getIndex().getDefiningOp<arith::ConstantOp>())
|
2021-10-24 18:36:33 -07:00
|
|
|
return constantOp.getValue().cast<IntegerAttr>().getInt();
|
2021-02-10 13:53:11 +01:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult DimOp::verify() {
|
2021-02-10 13:53:11 +01:00
|
|
|
// Assume unknown index to be in range.
|
2022-02-02 10:18:06 -08:00
|
|
|
Optional<int64_t> index = getConstantIndex();
|
2022-06-20 11:22:37 -07:00
|
|
|
if (!index)
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
|
|
|
|
// Check that constant index is not knowingly out of range.
|
2022-07-10 21:19:11 -07:00
|
|
|
auto type = getSource().getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (auto memrefType = type.dyn_cast<MemRefType>()) {
|
2022-06-20 23:20:25 -07:00
|
|
|
if (*index >= memrefType.getRank())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("index is out of range");
|
2021-07-01 09:58:48 +09:00
|
|
|
} else if (type.isa<UnrankedMemRefType>()) {
|
2021-02-10 13:53:11 +01:00
|
|
|
// Assume index to be in range.
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("expected operand with memref type");
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-09-20 11:04:15 -07:00
|
|
|
/// Return a map with key being elements in `vals` and data being number of
|
|
|
|
/// occurences of it. Use std::map, since the `vals` here are strides and the
|
|
|
|
/// dynamic stride value is the same as the tombstone value for
|
|
|
|
/// `DenseMap<int64_t>`.
|
|
|
|
static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
|
|
|
|
std::map<int64_t, unsigned> numOccurences;
|
|
|
|
for (auto val : vals)
|
|
|
|
numOccurences[val]++;
|
|
|
|
return numOccurences;
|
|
|
|
}
|
|
|
|
|
2021-11-29 16:22:45 +00:00
|
|
|
/// Given the `originalType` and a `candidateReducedType` whose shape is assumed
|
|
|
|
/// to be a subset of `originalType` with some `1` entries erased, return the
|
|
|
|
/// set of indices that specifies which of the entries of `originalShape` are
|
|
|
|
/// dropped to obtain `reducedShape`.
|
|
|
|
/// This accounts for cases where there are multiple unit-dims, but only a
|
|
|
|
/// subset of those are dropped. For MemRefTypes these can be disambiguated
|
|
|
|
/// using the strides. If a dimension is dropped the stride must be dropped too.
|
2022-02-06 14:06:34 +01:00
|
|
|
static llvm::Optional<llvm::SmallBitVector>
|
2021-09-20 11:04:15 -07:00
|
|
|
computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
|
2021-11-30 15:46:21 +00:00
|
|
|
ArrayRef<OpFoldResult> sizes) {
|
2022-02-06 14:06:34 +01:00
|
|
|
llvm::SmallBitVector unusedDims(originalType.getRank());
|
2021-09-20 11:04:15 -07:00
|
|
|
if (originalType.getRank() == reducedType.getRank())
|
|
|
|
return unusedDims;
|
|
|
|
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &dim : llvm::enumerate(sizes))
|
2021-11-30 15:46:21 +00:00
|
|
|
if (auto attr = dim.value().dyn_cast<Attribute>())
|
|
|
|
if (attr.cast<IntegerAttr>().getInt() == 1)
|
2022-02-06 14:06:34 +01:00
|
|
|
unusedDims.set(dim.index());
|
2021-11-30 15:46:21 +00:00
|
|
|
|
2022-03-18 16:36:16 +00:00
|
|
|
// Early exit for the case where the number of unused dims matches the number
|
|
|
|
// of ranks reduced.
|
2022-03-23 12:45:45 -07:00
|
|
|
if (static_cast<int64_t>(unusedDims.count()) + reducedType.getRank() ==
|
|
|
|
originalType.getRank())
|
2022-03-18 16:36:16 +00:00
|
|
|
return unusedDims;
|
|
|
|
|
2021-09-20 11:04:15 -07:00
|
|
|
SmallVector<int64_t> originalStrides, candidateStrides;
|
|
|
|
int64_t originalOffset, candidateOffset;
|
|
|
|
if (failed(
|
|
|
|
getStridesAndOffset(originalType, originalStrides, originalOffset)) ||
|
|
|
|
failed(
|
|
|
|
getStridesAndOffset(reducedType, candidateStrides, candidateOffset)))
|
|
|
|
return llvm::None;
|
|
|
|
|
|
|
|
// For memrefs, a dimension is truly dropped if its corresponding stride is
|
|
|
|
// also dropped. This is particularly important when more than one of the dims
|
|
|
|
// is 1. Track the number of occurences of the strides in the original type
|
|
|
|
// and the candidate type. For each unused dim that stride should not be
|
|
|
|
// present in the candidate type. Note that there could be multiple dimensions
|
|
|
|
// that have the same size. We dont need to exactly figure out which dim
|
|
|
|
// corresponds to which stride, we just need to verify that the number of
|
|
|
|
// reptitions of a stride in the original + number of unused dims with that
|
|
|
|
// stride == number of repititions of a stride in the candidate.
|
|
|
|
std::map<int64_t, unsigned> currUnaccountedStrides =
|
|
|
|
getNumOccurences(originalStrides);
|
|
|
|
std::map<int64_t, unsigned> candidateStridesNumOccurences =
|
|
|
|
getNumOccurences(candidateStrides);
|
2022-02-06 14:06:34 +01:00
|
|
|
for (size_t dim = 0, e = unusedDims.size(); dim != e; ++dim) {
|
|
|
|
if (!unusedDims.test(dim))
|
|
|
|
continue;
|
2021-09-20 11:04:15 -07:00
|
|
|
int64_t originalStride = originalStrides[dim];
|
|
|
|
if (currUnaccountedStrides[originalStride] >
|
|
|
|
candidateStridesNumOccurences[originalStride]) {
|
|
|
|
// This dim can be treated as dropped.
|
|
|
|
currUnaccountedStrides[originalStride]--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (currUnaccountedStrides[originalStride] ==
|
|
|
|
candidateStridesNumOccurences[originalStride]) {
|
|
|
|
// The stride for this is not dropped. Keep as is.
|
2022-02-06 14:06:34 +01:00
|
|
|
unusedDims.reset(dim);
|
2021-09-20 11:04:15 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (currUnaccountedStrides[originalStride] <
|
|
|
|
candidateStridesNumOccurences[originalStride]) {
|
|
|
|
// This should never happen. Cant have a stride in the reduced rank type
|
|
|
|
// that wasnt in the original one.
|
|
|
|
return llvm::None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-06 14:06:34 +01:00
|
|
|
if ((int64_t)unusedDims.count() + reducedType.getRank() !=
|
|
|
|
originalType.getRank())
|
2021-09-20 11:04:15 -07:00
|
|
|
return llvm::None;
|
|
|
|
return unusedDims;
|
|
|
|
}
|
|
|
|
|
2022-02-06 14:06:34 +01:00
|
|
|
llvm::SmallBitVector SubViewOp::getDroppedDims() {
|
2021-09-20 11:04:15 -07:00
|
|
|
MemRefType sourceType = getSourceType();
|
|
|
|
MemRefType resultType = getType();
|
2022-02-06 14:06:34 +01:00
|
|
|
llvm::Optional<llvm::SmallBitVector> unusedDims =
|
2021-11-30 15:46:21 +00:00
|
|
|
computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
|
2021-09-20 11:04:15 -07:00
|
|
|
assert(unusedDims && "unable to find unused dims of subview");
|
|
|
|
return *unusedDims;
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
// All forms of folding require a known index.
|
2021-07-01 09:58:48 +09:00
|
|
|
auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!index)
|
|
|
|
return {};
|
|
|
|
|
2021-07-01 09:58:48 +09:00
|
|
|
// Folding for unranked types (UnrankedMemRefType) is not supported.
|
2022-07-10 21:19:11 -07:00
|
|
|
auto memrefType = getSource().getType().dyn_cast<MemRefType>();
|
2021-07-01 09:58:48 +09:00
|
|
|
if (!memrefType)
|
|
|
|
return {};
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-07-01 09:58:48 +09:00
|
|
|
// Fold if the shape extent along the given index is known.
|
|
|
|
if (!memrefType.isDynamicDim(index.getInt())) {
|
|
|
|
Builder builder(getContext());
|
|
|
|
return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// The size at the given index is now known to be a dynamic size.
|
|
|
|
unsigned unsignedIndex = index.getValue().getZExtValue();
|
|
|
|
|
|
|
|
// Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
|
2022-07-10 21:19:11 -07:00
|
|
|
Operation *definingOp = getSource().getDefiningOp();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
|
|
|
|
return *(alloc.getDynamicSizes().begin() +
|
|
|
|
memrefType.getDynamicDimIndex(unsignedIndex));
|
|
|
|
|
2021-03-03 09:47:24 +01:00
|
|
|
if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
|
|
|
|
return *(alloca.getDynamicSizes().begin() +
|
|
|
|
memrefType.getDynamicDimIndex(unsignedIndex));
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
|
|
|
|
return *(view.getDynamicSizes().begin() +
|
|
|
|
memrefType.getDynamicDimIndex(unsignedIndex));
|
|
|
|
|
2021-09-20 11:04:15 -07:00
|
|
|
if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
|
2022-02-06 14:06:34 +01:00
|
|
|
llvm::SmallBitVector unusedDims = subview.getDroppedDims();
|
2021-09-20 11:04:15 -07:00
|
|
|
unsigned resultIndex = 0;
|
|
|
|
unsigned sourceRank = subview.getSourceType().getRank();
|
|
|
|
unsigned sourceIndex = 0;
|
|
|
|
for (auto i : llvm::seq<unsigned>(0, sourceRank)) {
|
2022-02-06 14:06:34 +01:00
|
|
|
if (unusedDims.test(i))
|
2021-09-20 11:04:15 -07:00
|
|
|
continue;
|
|
|
|
if (resultIndex == unsignedIndex) {
|
|
|
|
sourceIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
resultIndex++;
|
|
|
|
}
|
|
|
|
assert(subview.isDynamicSize(sourceIndex) &&
|
|
|
|
"expected dynamic subview size");
|
|
|
|
return subview.getDynamicSize(sourceIndex);
|
|
|
|
}
|
|
|
|
|
2021-05-25 11:35:14 +02:00
|
|
|
if (auto sizeInterface =
|
|
|
|
dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
|
2021-07-08 10:09:00 +00:00
|
|
|
assert(sizeInterface.isDynamicSize(unsignedIndex) &&
|
|
|
|
"Expected dynamic subview size");
|
|
|
|
return sizeInterface.getDynamicSize(unsignedIndex);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// dim(memrefcast) -> dim
|
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// Fold dim of a memref reshape operation to a load into the reshape's shape
|
|
|
|
/// operand.
|
|
|
|
struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
|
|
|
|
using OpRewritePattern<DimOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(DimOp dim,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2022-07-10 21:19:11 -07:00
|
|
|
auto reshape = dim.getSource().getDefiningOp<ReshapeOp>();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
if (!reshape)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Place the load directly after the reshape to ensure that the shape memref
|
|
|
|
// was not mutated.
|
|
|
|
rewriter.setInsertionPointAfter(reshape);
|
2021-06-15 12:32:16 +02:00
|
|
|
Location loc = dim.getLoc();
|
2022-07-10 21:19:11 -07:00
|
|
|
Value load =
|
|
|
|
rewriter.create<LoadOp>(loc, reshape.getShape(), dim.getIndex());
|
2021-06-15 12:32:16 +02:00
|
|
|
if (load.getType() != dim.getType())
|
2021-10-12 23:14:57 +00:00
|
|
|
load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
|
2021-06-15 12:32:16 +02:00
|
|
|
rewriter.replaceOp(dim, load);
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-12-07 18:27:58 +00:00
|
|
|
} // namespace
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-03-22 16:58:34 -07:00
|
|
|
void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
2021-02-10 13:53:11 +01:00
|
|
|
MLIRContext *context) {
|
2021-11-25 11:42:16 +01:00
|
|
|
results.add<DimOfMemRefReshape>(context);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaStartOp
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
void DmaStartOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Value srcMemRef, ValueRange srcIndices, Value destMemRef,
|
|
|
|
ValueRange destIndices, Value numElements,
|
|
|
|
Value tagMemRef, ValueRange tagIndices, Value stride,
|
|
|
|
Value elementsPerStride) {
|
|
|
|
result.addOperands(srcMemRef);
|
|
|
|
result.addOperands(srcIndices);
|
|
|
|
result.addOperands(destMemRef);
|
|
|
|
result.addOperands(destIndices);
|
|
|
|
result.addOperands({numElements, tagMemRef});
|
|
|
|
result.addOperands(tagIndices);
|
|
|
|
if (stride)
|
|
|
|
result.addOperands({stride, elementsPerStride});
|
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
void DmaStartOp::print(OpAsmPrinter &p) {
|
|
|
|
p << " " << getSrcMemRef() << '[' << getSrcIndices() << "], "
|
|
|
|
<< getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
|
|
|
|
<< ", " << getTagMemRef() << '[' << getTagIndices() << ']';
|
|
|
|
if (isStrided())
|
|
|
|
p << ", " << getStride() << ", " << getNumElementsPerStride();
|
2021-09-24 19:32:23 +00:00
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
p.printOptionalAttrDict((*this)->getAttrs());
|
|
|
|
p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
|
|
|
|
<< ", " << getTagMemRef().getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse DmaStartOp.
|
|
|
|
// Ex:
|
|
|
|
// %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
|
|
|
|
// %tag[%index], %stride, %num_elt_per_stride :
|
|
|
|
// : memref<3076 x f32, 0>,
|
|
|
|
// memref<1024 x f32, 2>,
|
|
|
|
// memref<1 x i32>
|
|
|
|
//
|
2022-02-07 17:54:04 -08:00
|
|
|
ParseResult DmaStartOp::parse(OpAsmParser &parser, OperationState &result) {
|
2022-03-21 21:42:13 +01:00
|
|
|
OpAsmParser::UnresolvedOperand srcMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 4> srcIndexInfos;
|
|
|
|
OpAsmParser::UnresolvedOperand dstMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 4> dstIndexInfos;
|
|
|
|
OpAsmParser::UnresolvedOperand numElementsInfo;
|
|
|
|
OpAsmParser::UnresolvedOperand tagMemrefInfo;
|
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 4> tagIndexInfos;
|
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 2> strideInfo;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
SmallVector<Type, 3> types;
|
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
|
|
|
|
|
|
|
// Parse and resolve the following list of operands:
|
|
|
|
// *) source memref followed by its indices (in square brackets).
|
|
|
|
// *) destination memref followed by its indices (in square brackets).
|
|
|
|
// *) dma size in KiB.
|
|
|
|
if (parser.parseOperand(srcMemRefInfo) ||
|
|
|
|
parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
|
|
|
|
parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(numElementsInfo) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
|
|
|
|
parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse optional stride and elements per stride.
|
|
|
|
if (parser.parseTrailingOperandList(strideInfo))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
bool isStrided = strideInfo.size() == 2;
|
|
|
|
if (!strideInfo.empty() && !isStrided) {
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected two stride related operands");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parser.parseColonTypeList(types))
|
|
|
|
return failure();
|
|
|
|
if (types.size() != 3)
|
|
|
|
return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
|
|
|
|
|
|
|
|
if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
|
|
|
|
parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
|
|
|
|
parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
|
|
|
|
// size should be an index.
|
|
|
|
parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
|
|
|
|
// tag indices should be index.
|
|
|
|
parser.resolveOperands(tagIndexInfos, indexType, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (isStrided) {
|
|
|
|
if (parser.resolveOperands(strideInfo, indexType, result.operands))
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult DmaStartOp::verify() {
|
|
|
|
unsigned numOperands = getNumOperands();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Mandatory non-variadic operands are: src memref, dst memref, tag memref and
|
|
|
|
// the number of elements.
|
|
|
|
if (numOperands < 4)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected at least 4 operands");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Check types of operands. The order of these calls is important: the later
|
|
|
|
// calls rely on some type properties to compute the operand position.
|
|
|
|
// 1. Source memref.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (!getSrcMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected source to be of memref type");
|
|
|
|
if (numOperands < getSrcMemRefRank() + 4)
|
|
|
|
return emitOpError() << "expected at least " << getSrcMemRefRank() + 4
|
|
|
|
<< " operands";
|
|
|
|
if (!getSrcIndices().empty() &&
|
|
|
|
!llvm::all_of(getSrcIndices().getTypes(),
|
2021-02-10 13:53:11 +01:00
|
|
|
[](Type t) { return t.isIndex(); }))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected source indices to be of index type");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// 2. Destination memref.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (!getDstMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected destination to be of memref type");
|
|
|
|
unsigned numExpectedOperands = getSrcMemRefRank() + getDstMemRefRank() + 4;
|
2021-02-10 13:53:11 +01:00
|
|
|
if (numOperands < numExpectedOperands)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "expected at least " << numExpectedOperands
|
|
|
|
<< " operands";
|
|
|
|
if (!getDstIndices().empty() &&
|
|
|
|
!llvm::all_of(getDstIndices().getTypes(),
|
2021-02-10 13:53:11 +01:00
|
|
|
[](Type t) { return t.isIndex(); }))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected destination indices to be of index type");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// 3. Number of elements.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (!getNumElements().getType().isIndex())
|
|
|
|
return emitOpError("expected num elements to be of index type");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// 4. Tag memref.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (!getTagMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected tag to be of memref type");
|
|
|
|
numExpectedOperands += getTagMemRefRank();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (numOperands < numExpectedOperands)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "expected at least " << numExpectedOperands
|
|
|
|
<< " operands";
|
|
|
|
if (!getTagIndices().empty() &&
|
|
|
|
!llvm::all_of(getTagIndices().getTypes(),
|
2021-02-10 13:53:11 +01:00
|
|
|
[](Type t) { return t.isIndex(); }))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected tag indices to be of index type");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Optional stride-related operands must be either both present or both
|
|
|
|
// absent.
|
|
|
|
if (numOperands != numExpectedOperands &&
|
|
|
|
numOperands != numExpectedOperands + 2)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("incorrect number of operands");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// 5. Strides.
|
2022-02-02 10:18:06 -08:00
|
|
|
if (isStrided()) {
|
|
|
|
if (!getStride().getType().isIndex() ||
|
|
|
|
!getNumElementsPerStride().getType().isIndex())
|
|
|
|
return emitOpError(
|
2021-02-10 13:53:11 +01:00
|
|
|
"expected stride and num elements per stride to be of type index");
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// dma_start(memrefcast) -> dma_start
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaWaitOp
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// dma_wait(memrefcast) -> dma_wait
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult DmaWaitOp::verify() {
|
2021-09-24 19:32:23 +00:00
|
|
|
// Check that the number of tag indices matches the tagMemRef rank.
|
2022-07-10 21:19:11 -07:00
|
|
|
unsigned numTagIndices = getTagIndices().size();
|
2022-02-02 10:18:06 -08:00
|
|
|
unsigned tagMemRefRank = getTagMemRefRank();
|
2021-09-24 19:32:23 +00:00
|
|
|
if (numTagIndices != tagMemRefRank)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "expected tagIndices to have the same number of "
|
|
|
|
"elements as the tagMemRef rank, expected "
|
|
|
|
<< tagMemRefRank << ", but got " << numTagIndices;
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-01-25 18:41:02 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GenericAtomicRMWOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
void GenericAtomicRMWOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Value memref, ValueRange ivs) {
|
|
|
|
result.addOperands(memref);
|
|
|
|
result.addOperands(ivs);
|
|
|
|
|
|
|
|
if (auto memrefType = memref.getType().dyn_cast<MemRefType>()) {
|
|
|
|
Type elementType = memrefType.getElementType();
|
|
|
|
result.addTypes(elementType);
|
|
|
|
|
|
|
|
Region *bodyRegion = result.addRegion();
|
|
|
|
bodyRegion->push_back(new Block());
|
|
|
|
bodyRegion->addArgument(elementType, memref.getLoc());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult GenericAtomicRMWOp::verify() {
|
|
|
|
auto &body = getRegion();
|
2022-01-25 18:41:02 -08:00
|
|
|
if (body.getNumArguments() != 1)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected single number of entry block arguments");
|
2022-01-25 18:41:02 -08:00
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
if (getResult().getType() != body.getArgument(0).getType())
|
|
|
|
return emitOpError("expected block argument of the same type result type");
|
2022-01-25 18:41:02 -08:00
|
|
|
|
|
|
|
bool hasSideEffects =
|
|
|
|
body.walk([&](Operation *nestedOp) {
|
|
|
|
if (MemoryEffectOpInterface::hasNoEffect(nestedOp))
|
|
|
|
return WalkResult::advance();
|
|
|
|
nestedOp->emitError(
|
|
|
|
"body of 'memref.generic_atomic_rmw' should contain "
|
|
|
|
"only operations with no side effects");
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
})
|
|
|
|
.wasInterrupted();
|
|
|
|
return hasSideEffects ? failure() : success();
|
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
ParseResult GenericAtomicRMWOp::parse(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
2022-03-21 21:42:13 +01:00
|
|
|
OpAsmParser::UnresolvedOperand memref;
|
2022-01-25 18:41:02 -08:00
|
|
|
Type memrefType;
|
2022-03-21 21:42:13 +01:00
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 4> ivs;
|
2022-01-25 18:41:02 -08:00
|
|
|
|
|
|
|
Type indexType = parser.getBuilder().getIndexType();
|
|
|
|
if (parser.parseOperand(memref) ||
|
|
|
|
parser.parseOperandList(ivs, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseColonType(memrefType) ||
|
|
|
|
parser.resolveOperand(memref, memrefType, result.operands) ||
|
|
|
|
parser.resolveOperands(ivs, indexType, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
Region *body = result.addRegion();
|
2022-04-28 17:26:43 -07:00
|
|
|
if (parser.parseRegion(*body, {}) ||
|
2022-01-25 18:41:02 -08:00
|
|
|
parser.parseOptionalAttrDict(result.attributes))
|
|
|
|
return failure();
|
|
|
|
result.types.push_back(memrefType.cast<MemRefType>().getElementType());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
void GenericAtomicRMWOp::print(OpAsmPrinter &p) {
|
2022-07-10 21:19:11 -07:00
|
|
|
p << ' ' << getMemref() << "[" << getIndices()
|
|
|
|
<< "] : " << getMemref().getType() << ' ';
|
2022-02-07 17:54:04 -08:00
|
|
|
p.printRegion(getRegion());
|
|
|
|
p.printOptionalAttrDict((*this)->getAttrs());
|
2022-01-25 18:41:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicYieldOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult AtomicYieldOp::verify() {
|
|
|
|
Type parentType = (*this)->getParentOp()->getResultTypes().front();
|
2022-07-10 21:19:11 -07:00
|
|
|
Type resultType = getResult().getType();
|
2022-01-25 18:41:02 -08:00
|
|
|
if (parentType != resultType)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "types mismatch between yield op: " << resultType
|
|
|
|
<< " and its parent: " << parentType;
|
2022-01-25 18:41:02 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GlobalOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
|
|
|
|
TypeAttr type,
|
|
|
|
Attribute initialValue) {
|
|
|
|
p << type;
|
|
|
|
if (!op.isExternal()) {
|
|
|
|
p << " = ";
|
|
|
|
if (op.isUninitialized())
|
|
|
|
p << "uninitialized";
|
|
|
|
else
|
|
|
|
p.printAttributeWithoutType(initialValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult
|
|
|
|
parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
|
|
|
|
Attribute &initialValue) {
|
|
|
|
Type type;
|
|
|
|
if (parser.parseType(type))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto memrefType = type.dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType || !memrefType.hasStaticShape())
|
|
|
|
return parser.emitError(parser.getNameLoc())
|
|
|
|
<< "type should be static shaped memref, but got " << type;
|
|
|
|
typeAttr = TypeAttr::get(type);
|
|
|
|
|
|
|
|
if (parser.parseOptionalEqual())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
|
2021-09-29 17:47:08 -07:00
|
|
|
initialValue = UnitAttr::get(parser.getContext());
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
Type tensorType = getTensorTypeFromMemRefType(memrefType);
|
|
|
|
if (parser.parseAttribute(initialValue, tensorType))
|
|
|
|
return failure();
|
|
|
|
if (!initialValue.isa<ElementsAttr>())
|
|
|
|
return parser.emitError(parser.getNameLoc())
|
|
|
|
<< "initial value should be a unit or elements attribute";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult GlobalOp::verify() {
|
2022-07-10 21:19:11 -07:00
|
|
|
auto memrefType = getType().dyn_cast<MemRefType>();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!memrefType || !memrefType.hasStaticShape())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("type should be static shaped memref, but got ")
|
2022-07-10 21:19:11 -07:00
|
|
|
<< getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Verify that the initial value, if present, is either a unit attribute or
|
|
|
|
// an elements attribute.
|
2022-07-13 00:57:02 -07:00
|
|
|
if (getInitialValue().has_value()) {
|
2022-07-14 00:19:59 -07:00
|
|
|
Attribute initValue = getInitialValue().value();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("initial value should be a unit or elements "
|
|
|
|
"attribute, but got ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< initValue;
|
|
|
|
|
|
|
|
// Check that the type of the initial value is compatible with the type of
|
|
|
|
// the global variable.
|
[mlir] Remove types from attributes
This patch removes the `type` field from `Attribute` along with the
`Attribute::getType` accessor.
Going forward, this means that attributes in MLIR will no longer have
types as a first-class concept. This patch lays the groundwork to
incrementally remove or refactor code that relies on generic attributes
being typed. The immediate impact will be on attributes that rely on
`Attribute` containing a type, such as `IntegerAttr`,
`DenseElementsAttr`, and `ml_program::ExternAttr`, which will now need
to define a type parameter on their storage classes. This will save
memory as all other attribute kinds will no longer contain a type.
Moreover, it will not be possible to generically query the type of an
attribute directly. This patch provides an attribute interface
`TypedAttr` that implements only one method, `getType`, which can be
used to generically query the types of attributes that implement the
interface. This interface can be used to retain the concept of a "typed
attribute". The ODS-generated accessor for a `type` parameter
automatically implements this method.
Next steps will be to refactor the assembly formats of certain operations
that rely on `parseAttribute(type)` and `printAttributeWithoutType` to
remove special handling of type elision until `type` can be removed from
the dialect parsing hook entirely; and incrementally remove uses of
`TypedAttr`.
Reviewed By: lattner, rriddle, jpienaar
Differential Revision: https://reviews.llvm.org/D130092
2022-07-18 21:32:38 -07:00
|
|
|
if (auto elementsAttr = initValue.dyn_cast<ElementsAttr>()) {
|
|
|
|
Type initType = elementsAttr.getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
Type tensorType = getTensorTypeFromMemRefType(memrefType);
|
|
|
|
if (initType != tensorType)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("initial value expected to be of type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< tensorType << ", but was of type " << initType;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
if (Optional<uint64_t> alignAttr = getAlignment()) {
|
2022-06-20 23:20:25 -07:00
|
|
|
uint64_t alignment = *alignAttr;
|
2021-10-07 05:49:59 -07:00
|
|
|
|
|
|
|
if (!llvm::isPowerOf2_64(alignment))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError() << "alignment attribute value " << alignment
|
|
|
|
<< " is not a power of 2";
|
2021-10-07 05:49:59 -07:00
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
// TODO: verify visibility for declarations.
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-03-17 23:01:54 +05:30
|
|
|
ElementsAttr GlobalOp::getConstantInitValue() {
|
2022-07-10 21:19:11 -07:00
|
|
|
auto initVal = getInitialValue();
|
2022-07-13 00:57:02 -07:00
|
|
|
if (getConstant() && initVal.has_value())
|
2022-07-14 00:19:59 -07:00
|
|
|
return initVal.value().cast<ElementsAttr>();
|
2022-03-17 23:01:54 +05:30
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GetGlobalOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
|
|
|
// Verify that the result type is same as the type of the referenced
|
|
|
|
// memref.global op.
|
|
|
|
auto global =
|
2022-07-10 21:19:11 -07:00
|
|
|
symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getNameAttr());
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!global)
|
|
|
|
return emitOpError("'")
|
2022-07-10 21:19:11 -07:00
|
|
|
<< getName() << "' does not reference a valid global memref";
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
Type resultType = getResult().getType();
|
|
|
|
if (global.getType() != resultType)
|
2021-02-10 13:53:11 +01:00
|
|
|
return emitOpError("result type ")
|
2022-07-10 21:19:11 -07:00
|
|
|
<< resultType << " does not match type " << global.getType()
|
|
|
|
<< " of the global memref @" << getName();
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoadOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult LoadOp::verify() {
|
|
|
|
if (getNumOperands() != 1 + getMemRefType().getRank())
|
|
|
|
return emitOpError("incorrect number of indices for load");
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
|
|
|
|
/// load(memrefcast) -> load
|
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
return OpFoldResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PrefetchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
void PrefetchOp::print(OpAsmPrinter &p) {
|
2022-07-10 21:19:11 -07:00
|
|
|
p << " " << getMemref() << '[';
|
|
|
|
p.printOperands(getIndices());
|
|
|
|
p << ']' << ", " << (getIsWrite() ? "write" : "read");
|
|
|
|
p << ", locality<" << getLocalityHint();
|
|
|
|
p << ">, " << (getIsDataCache() ? "data" : "instr");
|
2021-02-10 13:53:11 +01:00
|
|
|
p.printOptionalAttrDict(
|
2022-02-07 17:54:04 -08:00
|
|
|
(*this)->getAttrs(),
|
2021-02-10 13:53:11 +01:00
|
|
|
/*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
|
2022-02-07 17:54:04 -08:00
|
|
|
p << " : " << getMemRefType();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
ParseResult PrefetchOp::parse(OpAsmParser &parser, OperationState &result) {
|
2022-03-21 21:42:13 +01:00
|
|
|
OpAsmParser::UnresolvedOperand memrefInfo;
|
|
|
|
SmallVector<OpAsmParser::UnresolvedOperand, 4> indexInfo;
|
2021-02-10 13:53:11 +01:00
|
|
|
IntegerAttr localityHint;
|
|
|
|
MemRefType type;
|
|
|
|
StringRef readOrWrite, cacheType;
|
|
|
|
|
|
|
|
auto indexTy = parser.getBuilder().getIndexType();
|
|
|
|
auto i32Type = parser.getBuilder().getIntegerType(32);
|
|
|
|
if (parser.parseOperand(memrefInfo) ||
|
|
|
|
parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword("locality") ||
|
|
|
|
parser.parseLess() ||
|
|
|
|
parser.parseAttribute(localityHint, i32Type, "localityHint",
|
|
|
|
result.attributes) ||
|
|
|
|
parser.parseGreater() || parser.parseComma() ||
|
|
|
|
parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
|
|
|
|
parser.resolveOperand(memrefInfo, type, result.operands) ||
|
|
|
|
parser.resolveOperands(indexInfo, indexTy, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"rw specifier has to be 'read' or 'write'");
|
|
|
|
result.addAttribute(
|
2022-06-26 20:45:25 -07:00
|
|
|
PrefetchOp::getIsWriteAttrStrName(),
|
2021-02-10 13:53:11 +01:00
|
|
|
parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
|
|
|
|
|
|
|
|
if (!cacheType.equals("data") && !cacheType.equals("instr"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"cache type has to be 'data' or 'instr'");
|
|
|
|
|
|
|
|
result.addAttribute(
|
2022-06-26 20:45:25 -07:00
|
|
|
PrefetchOp::getIsDataCacheAttrStrName(),
|
2021-02-10 13:53:11 +01:00
|
|
|
parser.getBuilder().getBoolAttr(cacheType.equals("data")));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult PrefetchOp::verify() {
|
|
|
|
if (getNumOperands() != 1 + getMemRefType().getRank())
|
|
|
|
return emitOpError("too few indices");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
// prefetch(memrefcast) -> prefetch
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2021-12-14 09:35:14 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// RankOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
// Constant fold rank when the rank of the operand is known.
|
|
|
|
auto type = getOperand().getType();
|
|
|
|
auto shapedType = type.dyn_cast<ShapedType>();
|
|
|
|
if (shapedType && shapedType.hasRank())
|
|
|
|
return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
|
|
|
|
return IntegerAttr();
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ReinterpretCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
|
|
|
|
/// `staticSizes` and `staticStrides` are automatically filled with
|
|
|
|
/// source-memref-rank sentinel values that encode dynamic entries.
|
|
|
|
void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
|
|
|
|
ArrayRef<OpFoldResult> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
|
|
|
|
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
|
|
|
|
dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
|
|
|
|
ShapedType::kDynamicSize);
|
|
|
|
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
|
|
|
|
dynamicStrides, b.getI64ArrayAttr(staticOffsets),
|
|
|
|
b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
int64_t offset, ArrayRef<int64_t> sizes,
|
|
|
|
ArrayRef<int64_t> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<OpFoldResult> sizeValues =
|
|
|
|
llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
|
|
|
|
strideValues, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source, Value offset,
|
|
|
|
ValueRange sizes, ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
|
|
|
|
SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
|
|
|
|
build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: ponder whether we want to allow missing trailing sizes/strides that are
|
2021-06-22 16:49:08 +09:00
|
|
|
// completed automatically, like we have for subview and extract_slice.
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult ReinterpretCastOp::verify() {
|
2021-02-10 13:53:11 +01:00
|
|
|
// The source and result memrefs should be in the same memory space.
|
2022-07-10 21:19:11 -07:00
|
|
|
auto srcType = getSource().getType().cast<BaseMemRefType>();
|
2022-02-02 10:18:06 -08:00
|
|
|
auto resultType = getType().cast<MemRefType>();
|
2021-03-23 11:45:24 +03:00
|
|
|
if (srcType.getMemorySpace() != resultType.getMemorySpace())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("different memory spaces specified for source type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< srcType << " and result memref type " << resultType;
|
|
|
|
if (srcType.getElementType() != resultType.getElementType())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("different element types specified for source type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< srcType << " and result memref type " << resultType;
|
|
|
|
|
|
|
|
// Match sizes in result memref type and in static_sizes attribute.
|
2022-02-02 10:18:06 -08:00
|
|
|
for (auto &en : llvm::enumerate(llvm::zip(
|
2022-07-10 21:19:11 -07:00
|
|
|
resultType.getShape(), extractFromI64ArrayAttr(getStaticSizes())))) {
|
2021-02-10 13:53:11 +01:00
|
|
|
int64_t resultSize = std::get<0>(en.value());
|
|
|
|
int64_t expectedSize = std::get<1>(en.value());
|
2022-01-07 09:53:14 +01:00
|
|
|
if (!ShapedType::isDynamic(resultSize) &&
|
|
|
|
!ShapedType::isDynamic(expectedSize) && resultSize != expectedSize)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("expected result type with size = ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< expectedSize << " instead of " << resultSize
|
|
|
|
<< " in dim = " << en.index();
|
|
|
|
}
|
|
|
|
|
2022-01-07 09:53:14 +01:00
|
|
|
// Match offset and strides in static_offset and static_strides attributes. If
|
|
|
|
// result memref type has no affine map specified, this will assume an
|
|
|
|
// identity layout.
|
|
|
|
int64_t resultOffset;
|
|
|
|
SmallVector<int64_t, 4> resultStrides;
|
|
|
|
if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("expected result type to have strided layout but found ")
|
2022-01-07 09:53:14 +01:00
|
|
|
<< resultType;
|
|
|
|
|
|
|
|
// Match offset in result memref type and in static_offsets attribute.
|
2022-07-10 21:19:11 -07:00
|
|
|
int64_t expectedOffset = extractFromI64ArrayAttr(getStaticOffsets()).front();
|
2022-01-07 09:53:14 +01:00
|
|
|
if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
|
|
|
|
!ShapedType::isDynamicStrideOrOffset(expectedOffset) &&
|
|
|
|
resultOffset != expectedOffset)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("expected result type with offset = ")
|
2022-01-07 09:53:14 +01:00
|
|
|
<< resultOffset << " instead of " << expectedOffset;
|
|
|
|
|
|
|
|
// Match strides in result memref type and in static_strides attribute.
|
|
|
|
for (auto &en : llvm::enumerate(llvm::zip(
|
2022-07-10 21:19:11 -07:00
|
|
|
resultStrides, extractFromI64ArrayAttr(getStaticStrides())))) {
|
2022-01-07 09:53:14 +01:00
|
|
|
int64_t resultStride = std::get<0>(en.value());
|
|
|
|
int64_t expectedStride = std::get<1>(en.value());
|
|
|
|
if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
|
|
|
|
!ShapedType::isDynamicStrideOrOffset(expectedStride) &&
|
|
|
|
resultStride != expectedStride)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("expected result type with stride = ")
|
2022-01-07 09:53:14 +01:00
|
|
|
<< expectedStride << " instead of " << resultStride
|
|
|
|
<< " in dim = " << en.index();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
2022-01-07 09:53:14 +01:00
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-21 15:16:34 +03:00
|
|
|
OpFoldResult ReinterpretCastOp::fold(ArrayRef<Attribute> /*operands*/) {
|
2022-07-10 21:19:11 -07:00
|
|
|
Value src = getSource();
|
2022-02-21 15:16:34 +03:00
|
|
|
auto getPrevSrc = [&]() -> Value {
|
|
|
|
// reinterpret_cast(reinterpret_cast(x)) -> reinterpret_cast(x).
|
|
|
|
if (auto prev = src.getDefiningOp<ReinterpretCastOp>())
|
2022-07-10 21:19:11 -07:00
|
|
|
return prev.getSource();
|
2022-02-21 15:16:34 +03:00
|
|
|
|
|
|
|
// reinterpret_cast(cast(x)) -> reinterpret_cast(x).
|
|
|
|
if (auto prev = src.getDefiningOp<CastOp>())
|
2022-07-10 21:19:11 -07:00
|
|
|
return prev.getSource();
|
2022-02-21 15:16:34 +03:00
|
|
|
|
|
|
|
// reinterpret_cast(subview(x)) -> reinterpret_cast(x) if subview offsets
|
|
|
|
// are 0.
|
|
|
|
if (auto prev = src.getDefiningOp<SubViewOp>())
|
|
|
|
if (llvm::all_of(prev.getMixedOffsets(), [](OpFoldResult val) {
|
|
|
|
return isConstantIntValue(val, 0);
|
|
|
|
}))
|
2022-07-10 21:19:11 -07:00
|
|
|
return prev.getSource();
|
2022-02-21 15:16:34 +03:00
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (auto prevSrc = getPrevSrc()) {
|
2022-07-10 21:19:11 -07:00
|
|
|
getSourceMutable().assign(prevSrc);
|
2022-02-21 15:16:34 +03:00
|
|
|
return getResult();
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Reassociative reshape ops
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-03-31 17:03:41 +09:00
|
|
|
/// Helper function for verifying the shape of ExpandShapeOp and ResultShapeOp
|
|
|
|
/// result and operand. Layout maps are verified separately.
|
|
|
|
///
|
|
|
|
/// If `allowMultipleDynamicDimsPerGroup`, multiple dynamic dimensions are
|
|
|
|
/// allowed in a reassocation group.
|
|
|
|
static LogicalResult
|
|
|
|
verifyCollapsedShape(Operation *op, ArrayRef<int64_t> collapsedShape,
|
|
|
|
ArrayRef<int64_t> expandedShape,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation,
|
|
|
|
bool allowMultipleDynamicDimsPerGroup) {
|
|
|
|
// There must be one reassociation group per collapsed dimension.
|
|
|
|
if (collapsedShape.size() != reassociation.size())
|
|
|
|
return op->emitOpError("invalid number of reassociation groups: found ")
|
|
|
|
<< reassociation.size() << ", expected " << collapsedShape.size();
|
|
|
|
|
|
|
|
// The next expected expanded dimension index (while iterating over
|
|
|
|
// reassociation indices).
|
|
|
|
int64_t nextDim = 0;
|
|
|
|
for (const auto &it : llvm::enumerate(reassociation)) {
|
|
|
|
ReassociationIndices group = it.value();
|
|
|
|
int64_t collapsedDim = it.index();
|
|
|
|
|
|
|
|
bool foundDynamic = false;
|
|
|
|
for (int64_t expandedDim : group) {
|
|
|
|
if (expandedDim != nextDim++)
|
|
|
|
return op->emitOpError("reassociation indices must be contiguous");
|
|
|
|
|
|
|
|
if (expandedDim >= static_cast<int64_t>(expandedShape.size()))
|
|
|
|
return op->emitOpError("reassociation index ")
|
|
|
|
<< expandedDim << " is out of bounds";
|
|
|
|
|
|
|
|
// Check if there are multiple dynamic dims in a reassociation group.
|
|
|
|
if (ShapedType::isDynamic(expandedShape[expandedDim])) {
|
|
|
|
if (foundDynamic && !allowMultipleDynamicDimsPerGroup)
|
|
|
|
return op->emitOpError(
|
|
|
|
"at most one dimension in a reassociation group may be dynamic");
|
|
|
|
foundDynamic = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExpandShapeOp/CollapseShapeOp may not be used to cast dynamicity.
|
|
|
|
if (ShapedType::isDynamic(collapsedShape[collapsedDim]) != foundDynamic)
|
|
|
|
return op->emitOpError("collapsed dim (")
|
|
|
|
<< collapsedDim
|
|
|
|
<< ") must be dynamic if and only if reassociation group is "
|
|
|
|
"dynamic";
|
|
|
|
|
|
|
|
// If all dims in the reassociation group are static, the size of the
|
|
|
|
// collapsed dim can be verified.
|
|
|
|
if (!foundDynamic) {
|
|
|
|
int64_t groupSize = 1;
|
|
|
|
for (int64_t expandedDim : group)
|
|
|
|
groupSize *= expandedShape[expandedDim];
|
|
|
|
if (groupSize != collapsedShape[collapsedDim])
|
|
|
|
return op->emitOpError("collapsed dim size (")
|
|
|
|
<< collapsedShape[collapsedDim]
|
|
|
|
<< ") must equal reassociation group size (" << groupSize << ")";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (collapsedShape.empty()) {
|
|
|
|
// Rank 0: All expanded dimensions must be 1.
|
|
|
|
for (int64_t d : expandedShape)
|
|
|
|
if (d != 1)
|
|
|
|
return op->emitOpError(
|
|
|
|
"rank 0 memrefs can only be extended/collapsed with/from ones");
|
|
|
|
} else if (nextDim != static_cast<int64_t>(expandedShape.size())) {
|
|
|
|
// Rank >= 1: Number of dimensions among all reassociation groups must match
|
|
|
|
// the result memref rank.
|
|
|
|
return op->emitOpError("expanded rank (")
|
|
|
|
<< expandedShape.size()
|
|
|
|
<< ") inconsistent with number of reassociation indices (" << nextDim
|
|
|
|
<< ")";
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
|
|
|
|
return getSymbolLessAffineMaps(getReassociationExprs());
|
|
|
|
}
|
2022-03-31 17:03:41 +09:00
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
|
2021-07-28 10:19:05 +00:00
|
|
|
return convertReassociationIndicesToExprs(getContext(),
|
|
|
|
getReassociationIndices());
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
|
|
|
|
return getSymbolLessAffineMaps(getReassociationExprs());
|
|
|
|
}
|
2022-03-31 17:03:41 +09:00
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
|
2021-07-28 10:19:05 +00:00
|
|
|
return convertReassociationIndicesToExprs(getContext(),
|
|
|
|
getReassociationIndices());
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
|
|
|
|
2022-03-31 17:03:41 +09:00
|
|
|
/// Compute the layout map after expanding a given source MemRef type with the
|
|
|
|
/// specified reassociation indices.
|
|
|
|
static FailureOr<AffineMap>
|
|
|
|
computeExpandedLayoutMap(MemRefType srcType, ArrayRef<int64_t> resultShape,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation) {
|
|
|
|
int64_t srcOffset;
|
2022-04-06 03:57:03 -04:00
|
|
|
SmallVector<int64_t> srcStrides;
|
2022-03-31 17:03:41 +09:00
|
|
|
if (failed(getStridesAndOffset(srcType, srcStrides, srcOffset)))
|
|
|
|
return failure();
|
|
|
|
assert(srcStrides.size() == reassociation.size() && "invalid reassociation");
|
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
// 1-1 mapping between srcStrides and reassociation packs.
|
|
|
|
// Each srcStride starts with the given value and gets expanded according to
|
|
|
|
// the proper entries in resultShape.
|
|
|
|
// Example:
|
|
|
|
// srcStrides = [10000, 1 , 100 ],
|
|
|
|
// reassociations = [ [0], [1], [2, 3, 4]],
|
|
|
|
// resultSizes = [2, 5, 4, 3, 2] = [ [2], [5], [4, 3, 2]]
|
|
|
|
// -> For the purpose of stride calculation, the useful sizes are:
|
|
|
|
// [x, x, x, 3, 2] = [ [x], [x], [x, 3, 2]].
|
|
|
|
// resultStrides = [10000, 1, 600, 200, 100]
|
|
|
|
// Note that a stride does not get expanded along the first entry of each
|
|
|
|
// shape pack.
|
|
|
|
SmallVector<int64_t> reverseResultStrides;
|
|
|
|
reverseResultStrides.reserve(resultShape.size());
|
|
|
|
unsigned shapeIndex = resultShape.size() - 1;
|
|
|
|
for (auto it : llvm::reverse(llvm::zip(reassociation, srcStrides))) {
|
|
|
|
ReassociationIndices reassoc = std::get<0>(it);
|
|
|
|
int64_t currentStrideToExpand = std::get<1>(it);
|
|
|
|
for (unsigned idx = 0, e = reassoc.size(); idx < e; ++idx) {
|
|
|
|
using saturated_arith::Wrapper;
|
|
|
|
reverseResultStrides.push_back(currentStrideToExpand);
|
|
|
|
currentStrideToExpand = (Wrapper::stride(currentStrideToExpand) *
|
|
|
|
Wrapper::size(resultShape[shapeIndex--]))
|
|
|
|
.asStride();
|
2022-03-31 17:03:41 +09:00
|
|
|
}
|
|
|
|
}
|
2022-06-07 19:30:10 +02:00
|
|
|
auto resultStrides = llvm::to_vector<8>(llvm::reverse(reverseResultStrides));
|
|
|
|
resultStrides.resize(resultShape.size(), 1);
|
|
|
|
return makeStridedLinearLayoutMap(resultStrides, srcOffset,
|
|
|
|
srcType.getContext());
|
2022-03-31 17:03:41 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static FailureOr<MemRefType>
|
|
|
|
computeExpandedType(MemRefType srcType, ArrayRef<int64_t> resultShape,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation) {
|
|
|
|
if (srcType.getLayout().isIdentity()) {
|
|
|
|
// If the source is contiguous (i.e., no layout map specified), so is the
|
|
|
|
// result.
|
|
|
|
MemRefLayoutAttrInterface layout;
|
|
|
|
return MemRefType::get(resultShape, srcType.getElementType(), layout,
|
|
|
|
srcType.getMemorySpace());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Source may not be contiguous. Compute the layout map.
|
|
|
|
FailureOr<AffineMap> computedLayout =
|
|
|
|
computeExpandedLayoutMap(srcType, resultShape, reassociation);
|
|
|
|
if (failed(computedLayout))
|
|
|
|
return failure();
|
|
|
|
auto computedType =
|
|
|
|
MemRefType::get(resultShape, srcType.getElementType(), *computedLayout,
|
|
|
|
srcType.getMemorySpaceAsInt());
|
|
|
|
return canonicalizeStridedLayout(computedType);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ExpandShapeOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
ArrayRef<int64_t> resultShape, Value src,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation) {
|
|
|
|
// Only ranked memref source values are supported.
|
|
|
|
auto srcType = src.getType().cast<MemRefType>();
|
|
|
|
FailureOr<MemRefType> resultType =
|
|
|
|
computeExpandedType(srcType, resultShape, reassociation);
|
|
|
|
// Failure of this assertion usually indicates a problem with the source
|
|
|
|
// type, e.g., could not get strides/offset.
|
|
|
|
assert(succeeded(resultType) && "could not compute layout");
|
|
|
|
build(builder, result, *resultType, src, reassociation);
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult ExpandShapeOp::verify() {
|
2022-03-31 17:03:41 +09:00
|
|
|
MemRefType srcType = getSrcType();
|
|
|
|
MemRefType resultType = getResultType();
|
|
|
|
|
|
|
|
// Verify result shape.
|
|
|
|
if (failed(verifyCollapsedShape(getOperation(), srcType.getShape(),
|
|
|
|
resultType.getShape(),
|
|
|
|
getReassociationIndices(),
|
|
|
|
/*allowMultipleDynamicDimsPerGroup=*/false)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Compute expected result type (including layout map).
|
|
|
|
FailureOr<MemRefType> expectedResultType = computeExpandedType(
|
|
|
|
srcType, resultType.getShape(), getReassociationIndices());
|
|
|
|
if (failed(expectedResultType))
|
|
|
|
return emitOpError("invalid source layout map");
|
|
|
|
|
|
|
|
// Check actual result type.
|
|
|
|
auto canonicalizedResultType = canonicalizeStridedLayout(resultType);
|
|
|
|
if (*expectedResultType != canonicalizedResultType)
|
|
|
|
return emitOpError("expected expanded type to be ")
|
|
|
|
<< *expectedResultType << " but found " << canonicalizedResultType;
|
|
|
|
|
|
|
|
return success();
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
|
|
|
MLIRContext *context) {
|
2022-04-06 11:58:21 +02:00
|
|
|
results.add<ComposeReassociativeReshapeOps<ExpandShapeOp>,
|
|
|
|
ComposeExpandOfCollapseOp<ExpandShapeOp, CollapseShapeOp>>(
|
|
|
|
context);
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
|
|
|
|
2022-03-31 17:04:15 +09:00
|
|
|
/// Compute the layout map after collapsing a given source MemRef type with the
|
|
|
|
/// specified reassociation indices.
|
|
|
|
///
|
|
|
|
/// Note: All collapsed dims in a reassociation group must be contiguous. It is
|
|
|
|
/// not possible to check this by inspecting a MemRefType in the general case.
|
2022-04-08 18:04:00 +09:00
|
|
|
/// If non-contiguity cannot be checked statically, the collapse is assumed to
|
|
|
|
/// be valid (and thus accepted by this function) unless `strict = true`.
|
2022-03-31 17:04:15 +09:00
|
|
|
static FailureOr<AffineMap>
|
2022-04-06 03:57:03 -04:00
|
|
|
computeCollapsedLayoutMap(MemRefType srcType,
|
2022-04-08 18:04:00 +09:00
|
|
|
ArrayRef<ReassociationIndices> reassociation,
|
|
|
|
bool strict = false) {
|
2022-03-31 17:04:15 +09:00
|
|
|
int64_t srcOffset;
|
2022-04-06 03:57:03 -04:00
|
|
|
SmallVector<int64_t> srcStrides;
|
|
|
|
auto srcShape = srcType.getShape();
|
2022-03-31 17:04:15 +09:00
|
|
|
if (failed(getStridesAndOffset(srcType, srcStrides, srcOffset)))
|
|
|
|
return failure();
|
2022-04-06 03:57:03 -04:00
|
|
|
|
2022-04-18 20:50:30 -04:00
|
|
|
// The result stride of a reassociation group is the stride of the last entry
|
|
|
|
// of the reassociation. (TODO: Should be the minimum stride in the
|
|
|
|
// reassociation because strides are not necessarily sorted. E.g., when using
|
|
|
|
// memref.transpose.) Dimensions of size 1 should be skipped, because their
|
|
|
|
// strides are meaningless and could have any arbitrary value.
|
2022-04-06 03:57:03 -04:00
|
|
|
SmallVector<int64_t> resultStrides;
|
|
|
|
resultStrides.reserve(reassociation.size());
|
2022-04-18 20:50:30 -04:00
|
|
|
for (const ReassociationIndices &reassoc : reassociation) {
|
|
|
|
ArrayRef<int64_t> ref = llvm::makeArrayRef(reassoc);
|
|
|
|
while (srcShape[ref.back()] == 1 && ref.size() > 1)
|
|
|
|
ref = ref.drop_back();
|
|
|
|
if (!ShapedType::isDynamic(srcShape[ref.back()]) || ref.size() == 1) {
|
|
|
|
resultStrides.push_back(srcStrides[ref.back()]);
|
|
|
|
} else {
|
|
|
|
// Dynamically-sized dims may turn out to be dims of size 1 at runtime, so
|
|
|
|
// the corresponding stride may have to be skipped. (See above comment.)
|
|
|
|
// Therefore, the result stride cannot be statically determined and must
|
|
|
|
// be dynamic.
|
|
|
|
resultStrides.push_back(ShapedType::kDynamicStrideOrOffset);
|
|
|
|
}
|
|
|
|
}
|
2022-04-06 03:57:03 -04:00
|
|
|
|
|
|
|
// Validate that each reassociation group is contiguous.
|
|
|
|
unsigned resultStrideIndex = resultStrides.size() - 1;
|
2022-04-16 07:43:24 +00:00
|
|
|
for (const ReassociationIndices &reassoc : llvm::reverse(reassociation)) {
|
2022-04-06 03:57:03 -04:00
|
|
|
auto trailingReassocs = ArrayRef<int64_t>(reassoc).drop_front();
|
|
|
|
using saturated_arith::Wrapper;
|
|
|
|
auto stride = Wrapper::stride(resultStrides[resultStrideIndex--]);
|
|
|
|
for (int64_t idx : llvm::reverse(trailingReassocs)) {
|
|
|
|
stride = stride * Wrapper::size(srcShape[idx]);
|
2022-04-08 18:04:00 +09:00
|
|
|
|
|
|
|
// Both source and result stride must have the same static value. In that
|
|
|
|
// case, we can be sure, that the dimensions are collapsible (because they
|
|
|
|
// are contiguous).
|
|
|
|
//
|
|
|
|
// One special case is when the srcShape is `1`, in which case it can
|
|
|
|
// never produce non-contiguity.
|
|
|
|
if (srcShape[idx] == 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If `strict = false` (default during op verification), we accept cases
|
|
|
|
// where one or both strides are dynamic. This is best effort: We reject
|
|
|
|
// ops where obviously non-contiguous dims are collapsed, but accept ops
|
|
|
|
// where we cannot be sure statically. Such ops may fail at runtime. See
|
|
|
|
// the op documentation for details.
|
|
|
|
auto srcStride = Wrapper::stride(srcStrides[idx - 1]);
|
|
|
|
if (strict && (stride.saturated || srcStride.saturated))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!stride.saturated && !srcStride.saturated && stride != srcStride)
|
2022-03-31 17:04:15 +09:00
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
}
|
2022-04-06 03:57:03 -04:00
|
|
|
return makeStridedLinearLayoutMap(resultStrides, srcOffset,
|
|
|
|
srcType.getContext());
|
2022-03-31 17:04:15 +09:00
|
|
|
}
|
|
|
|
|
2022-04-22 09:44:28 +02:00
|
|
|
bool CollapseShapeOp::isGuaranteedCollapsible(
|
2022-04-08 18:04:00 +09:00
|
|
|
MemRefType srcType, ArrayRef<ReassociationIndices> reassociation) {
|
|
|
|
// MemRefs with standard layout are always collapsible.
|
|
|
|
if (srcType.getLayout().isIdentity())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return succeeded(computeCollapsedLayoutMap(srcType, reassociation,
|
|
|
|
/*strict=*/true));
|
|
|
|
}
|
|
|
|
|
2022-03-31 17:04:15 +09:00
|
|
|
static MemRefType
|
|
|
|
computeCollapsedType(MemRefType srcType,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation) {
|
|
|
|
SmallVector<int64_t> resultShape;
|
2022-04-06 03:57:03 -04:00
|
|
|
resultShape.reserve(reassociation.size());
|
2022-03-31 17:04:15 +09:00
|
|
|
for (const ReassociationIndices &group : reassociation) {
|
2022-04-06 03:57:03 -04:00
|
|
|
using saturated_arith::Wrapper;
|
|
|
|
auto groupSize = Wrapper::size(1);
|
|
|
|
for (int64_t srcDim : group)
|
|
|
|
groupSize = groupSize * Wrapper::size(srcType.getDimSize(srcDim));
|
|
|
|
resultShape.push_back(groupSize.asSize());
|
2022-03-31 17:04:15 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
if (srcType.getLayout().isIdentity()) {
|
|
|
|
// If the source is contiguous (i.e., no layout map specified), so is the
|
|
|
|
// result.
|
|
|
|
MemRefLayoutAttrInterface layout;
|
|
|
|
return MemRefType::get(resultShape, srcType.getElementType(), layout,
|
|
|
|
srcType.getMemorySpace());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Source may not be fully contiguous. Compute the layout map.
|
|
|
|
// Note: Dimensions that are collapsed into a single dim are assumed to be
|
|
|
|
// contiguous.
|
|
|
|
FailureOr<AffineMap> computedLayout =
|
2022-04-06 03:57:03 -04:00
|
|
|
computeCollapsedLayoutMap(srcType, reassociation);
|
2022-03-31 17:04:15 +09:00
|
|
|
assert(succeeded(computedLayout) &&
|
|
|
|
"invalid source layout map or collapsing non-contiguous dims");
|
|
|
|
auto computedType =
|
|
|
|
MemRefType::get(resultShape, srcType.getElementType(), *computedLayout,
|
|
|
|
srcType.getMemorySpaceAsInt());
|
|
|
|
return canonicalizeStridedLayout(computedType);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
|
|
|
|
ArrayRef<ReassociationIndices> reassociation,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto srcType = src.getType().cast<MemRefType>();
|
|
|
|
MemRefType resultType = computeCollapsedType(srcType, reassociation);
|
|
|
|
build(b, result, resultType, src, attrs);
|
2022-06-26 20:45:25 -07:00
|
|
|
result.addAttribute(::mlir::getReassociationAttrName(),
|
2022-03-31 17:04:15 +09:00
|
|
|
getReassociationIndicesAttribute(b, reassociation));
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult CollapseShapeOp::verify() {
|
2022-03-31 17:04:15 +09:00
|
|
|
MemRefType srcType = getSrcType();
|
|
|
|
MemRefType resultType = getResultType();
|
|
|
|
|
|
|
|
// Verify result shape.
|
|
|
|
if (failed(verifyCollapsedShape(getOperation(), resultType.getShape(),
|
|
|
|
srcType.getShape(), getReassociationIndices(),
|
|
|
|
/*allowMultipleDynamicDimsPerGroup=*/true)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Compute expected result type (including layout map).
|
|
|
|
MemRefType expectedResultType;
|
|
|
|
if (srcType.getLayout().isIdentity()) {
|
|
|
|
// If the source is contiguous (i.e., no layout map specified), so is the
|
|
|
|
// result.
|
|
|
|
MemRefLayoutAttrInterface layout;
|
|
|
|
expectedResultType =
|
|
|
|
MemRefType::get(resultType.getShape(), srcType.getElementType(), layout,
|
|
|
|
srcType.getMemorySpace());
|
|
|
|
} else {
|
|
|
|
// Source may not be fully contiguous. Compute the layout map.
|
|
|
|
// Note: Dimensions that are collapsed into a single dim are assumed to be
|
|
|
|
// contiguous.
|
2022-04-06 03:57:03 -04:00
|
|
|
FailureOr<AffineMap> computedLayout =
|
|
|
|
computeCollapsedLayoutMap(srcType, getReassociationIndices());
|
2022-03-31 17:04:15 +09:00
|
|
|
if (failed(computedLayout))
|
|
|
|
return emitOpError(
|
|
|
|
"invalid source layout map or collapsing non-contiguous dims");
|
|
|
|
auto computedType =
|
|
|
|
MemRefType::get(resultType.getShape(), srcType.getElementType(),
|
|
|
|
*computedLayout, srcType.getMemorySpaceAsInt());
|
|
|
|
expectedResultType = canonicalizeStridedLayout(computedType);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto canonicalizedResultType = canonicalizeStridedLayout(resultType);
|
|
|
|
if (expectedResultType != canonicalizedResultType)
|
|
|
|
return emitOpError("expected collapsed type to be ")
|
|
|
|
<< expectedResultType << " but found " << canonicalizedResultType;
|
|
|
|
|
|
|
|
return success();
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
|
|
|
|
2021-07-28 10:19:05 +00:00
|
|
|
struct CollapseShapeOpMemRefCastFolder
|
|
|
|
: public OpRewritePattern<CollapseShapeOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CollapseShapeOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
auto cast = op.getOperand().getDefiningOp<CastOp>();
|
|
|
|
if (!cast)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!CastOp::canFoldIntoConsumerOp(cast))
|
|
|
|
return failure();
|
|
|
|
|
2022-03-31 17:04:15 +09:00
|
|
|
Type newResultType =
|
|
|
|
computeCollapsedType(cast.getOperand().getType().cast<MemRefType>(),
|
|
|
|
op.getReassociationIndices());
|
2021-07-28 10:19:05 +00:00
|
|
|
|
|
|
|
if (newResultType == op.getResultType()) {
|
|
|
|
rewriter.updateRootInPlace(
|
2022-07-10 21:19:11 -07:00
|
|
|
op, [&]() { op.getSrcMutable().assign(cast.getSource()); });
|
2021-07-28 10:19:05 +00:00
|
|
|
} else {
|
|
|
|
Value newOp = rewriter.create<CollapseShapeOp>(
|
2022-07-10 21:19:11 -07:00
|
|
|
op->getLoc(), cast.getSource(), op.getReassociationIndices());
|
2021-07-28 10:19:05 +00:00
|
|
|
rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
|
|
|
MLIRContext *context) {
|
2022-04-06 11:58:21 +02:00
|
|
|
results.add<ComposeReassociativeReshapeOps<CollapseShapeOp>,
|
|
|
|
ComposeCollapseOfExpandOp<CollapseShapeOp, ExpandShapeOp>,
|
2021-07-28 10:19:05 +00:00
|
|
|
CollapseShapeOpMemRefCastFolder>(context);
|
2021-07-16 13:31:02 +02:00
|
|
|
}
|
2022-03-31 17:15:31 +09:00
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
|
|
|
|
}
|
2022-03-31 17:15:31 +09:00
|
|
|
|
2021-07-16 13:31:02 +02:00
|
|
|
OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ReshapeOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult ReshapeOp::verify() {
|
2022-07-10 21:19:11 -07:00
|
|
|
Type operandType = getSource().getType();
|
|
|
|
Type resultType = getResult().getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
Type operandElementType = operandType.cast<ShapedType>().getElementType();
|
|
|
|
Type resultElementType = resultType.cast<ShapedType>().getElementType();
|
|
|
|
if (operandElementType != resultElementType)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("element types of source and destination memref "
|
|
|
|
"types should be the same");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
|
2021-10-11 18:25:14 +03:00
|
|
|
if (!operandMemRefType.getLayout().isIdentity())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("source memref type should have identity affine map");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
int64_t shapeSize = getShape().getType().cast<MemRefType>().getDimSize(0);
|
2021-02-10 13:53:11 +01:00
|
|
|
auto resultMemRefType = resultType.dyn_cast<MemRefType>();
|
|
|
|
if (resultMemRefType) {
|
2021-10-11 18:25:14 +03:00
|
|
|
if (!resultMemRefType.getLayout().isIdentity())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("result memref type should have identity affine map");
|
2021-02-10 13:53:11 +01:00
|
|
|
if (shapeSize == ShapedType::kDynamicSize)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("cannot use shape operand with dynamic length to "
|
|
|
|
"reshape to statically-ranked memref type");
|
2021-02-10 13:53:11 +01:00
|
|
|
if (shapeSize != resultMemRefType.getRank())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError(
|
2021-02-10 13:53:11 +01:00
|
|
|
"length of shape operand differs from the result's memref rank");
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StoreOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult StoreOp::verify() {
|
|
|
|
if (getNumOperands() != 2 + getMemRefType().getRank())
|
|
|
|
return emitOpError("store index operand count not equal to memref rank");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// store(memrefcast) -> store
|
2021-06-07 13:44:07 -04:00
|
|
|
return foldMemRefCast(*this, getValueToStore());
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubViewOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// A subview result type can be fully inferred from the source type and the
|
|
|
|
/// static representation of offsets, sizes and strides. Special sentinels
|
|
|
|
/// encode the dynamic case.
|
|
|
|
Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
|
2021-12-29 10:48:02 -08:00
|
|
|
ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides) {
|
2021-02-10 13:53:11 +01:00
|
|
|
unsigned rank = sourceMemRefType.getRank();
|
2021-12-29 10:48:02 -08:00
|
|
|
(void)rank;
|
2022-01-09 14:17:28 -05:00
|
|
|
assert(staticOffsets.size() == rank && "staticOffsets length mismatch");
|
|
|
|
assert(staticSizes.size() == rank && "staticSizes length mismatch");
|
|
|
|
assert(staticStrides.size() == rank && "staticStrides length mismatch");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Extract source offset and strides.
|
|
|
|
int64_t sourceOffset;
|
|
|
|
SmallVector<int64_t, 4> sourceStrides;
|
|
|
|
auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
|
|
|
|
assert(succeeded(res) && "SubViewOp expected strided memref type");
|
|
|
|
(void)res;
|
|
|
|
|
|
|
|
// Compute target offset whose value is:
|
|
|
|
// `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
|
|
|
|
int64_t targetOffset = sourceOffset;
|
|
|
|
for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
|
|
|
|
auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
|
2022-04-06 03:57:03 -04:00
|
|
|
using saturated_arith::Wrapper;
|
|
|
|
targetOffset =
|
|
|
|
(Wrapper::offset(targetOffset) +
|
|
|
|
Wrapper::offset(staticOffset) * Wrapper::stride(targetStride))
|
|
|
|
.asOffset();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compute target stride whose value is:
|
|
|
|
// `sourceStrides_i * staticStrides_i`.
|
|
|
|
SmallVector<int64_t, 4> targetStrides;
|
|
|
|
targetStrides.reserve(staticOffsets.size());
|
|
|
|
for (auto it : llvm::zip(sourceStrides, staticStrides)) {
|
|
|
|
auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
|
2022-04-06 03:57:03 -04:00
|
|
|
using saturated_arith::Wrapper;
|
|
|
|
targetStrides.push_back(
|
|
|
|
(Wrapper::stride(sourceStride) * Wrapper::stride(staticStride))
|
|
|
|
.asStride());
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// The type is now known.
|
|
|
|
return MemRefType::get(
|
|
|
|
staticSizes, sourceMemRefType.getElementType(),
|
|
|
|
makeStridedLinearLayoutMap(targetStrides, targetOffset,
|
|
|
|
sourceMemRefType.getContext()),
|
2021-03-23 11:45:24 +03:00
|
|
|
sourceMemRefType.getMemorySpace());
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
|
2021-12-29 10:48:02 -08:00
|
|
|
ArrayRef<OpFoldResult> offsets,
|
|
|
|
ArrayRef<OpFoldResult> sizes,
|
|
|
|
ArrayRef<OpFoldResult> strides) {
|
2021-02-10 13:53:11 +01:00
|
|
|
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
|
|
|
|
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
|
2021-12-29 10:48:02 -08:00
|
|
|
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
|
2021-02-10 13:53:11 +01:00
|
|
|
ShapedType::kDynamicSize);
|
2021-12-29 10:48:02 -08:00
|
|
|
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
2021-02-10 13:53:11 +01:00
|
|
|
return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
|
2021-11-29 16:22:45 +00:00
|
|
|
staticSizes, staticStrides);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2022-07-05 16:39:29 +02:00
|
|
|
Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
|
2021-12-29 10:48:02 -08:00
|
|
|
MemRefType sourceRankedTensorType,
|
|
|
|
ArrayRef<int64_t> offsets,
|
|
|
|
ArrayRef<int64_t> sizes,
|
|
|
|
ArrayRef<int64_t> strides) {
|
2021-02-10 13:53:11 +01:00
|
|
|
auto inferredType =
|
2021-12-29 10:48:02 -08:00
|
|
|
inferResultType(sourceRankedTensorType, offsets, sizes, strides)
|
2021-02-10 13:53:11 +01:00
|
|
|
.cast<MemRefType>();
|
2022-07-05 17:01:40 +02:00
|
|
|
assert(inferredType.getRank() >= static_cast<int64_t>(resultShape.size()) &&
|
|
|
|
"expected ");
|
|
|
|
if (inferredType.getRank() == static_cast<int64_t>(resultShape.size()))
|
2022-07-05 16:39:29 +02:00
|
|
|
return inferredType;
|
|
|
|
|
|
|
|
// Compute which dimensions are dropped.
|
|
|
|
Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
|
|
|
|
computeRankReductionMask(inferredType.getShape(), resultShape);
|
2022-07-13 00:57:02 -07:00
|
|
|
assert(dimsToProject.has_value() && "invalid rank reduction");
|
2022-07-05 16:39:29 +02:00
|
|
|
llvm::SmallBitVector dimsToProjectVector(inferredType.getRank());
|
|
|
|
for (unsigned dim : *dimsToProject)
|
|
|
|
dimsToProjectVector.set(dim);
|
|
|
|
|
|
|
|
// Compute layout map and result type.
|
|
|
|
AffineMap map = getProjectedMap(inferredType.getLayout().getAffineMap(),
|
|
|
|
dimsToProjectVector);
|
|
|
|
return MemRefType::get(resultShape, inferredType.getElementType(), map,
|
|
|
|
inferredType.getMemorySpace());
|
|
|
|
}
|
|
|
|
|
|
|
|
Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
|
2021-12-29 10:48:02 -08:00
|
|
|
MemRefType sourceRankedTensorType,
|
|
|
|
ArrayRef<OpFoldResult> offsets,
|
|
|
|
ArrayRef<OpFoldResult> sizes,
|
|
|
|
ArrayRef<OpFoldResult> strides) {
|
2021-02-10 13:53:11 +01:00
|
|
|
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
|
|
|
|
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
|
2021-12-29 10:48:02 -08:00
|
|
|
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
|
2021-02-10 13:53:11 +01:00
|
|
|
ShapedType::kDynamicSize);
|
2021-12-29 10:48:02 -08:00
|
|
|
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
2021-02-10 13:53:11 +01:00
|
|
|
return SubViewOp::inferRankReducedResultType(
|
2022-07-05 16:39:29 +02:00
|
|
|
resultShape, sourceRankedTensorType, staticOffsets, staticSizes,
|
2021-02-10 13:53:11 +01:00
|
|
|
staticStrides);
|
|
|
|
}
|
2022-07-05 16:39:29 +02:00
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
// Build a SubViewOp with mixed static and dynamic entries and custom result
|
|
|
|
// type. If the type passed is nullptr, it is inferred.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
ArrayRef<OpFoldResult> offsets,
|
|
|
|
ArrayRef<OpFoldResult> sizes,
|
|
|
|
ArrayRef<OpFoldResult> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
|
|
|
|
SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
|
|
|
|
dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
|
|
|
|
ShapedType::kDynamicSize);
|
|
|
|
dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
|
|
|
|
ShapedType::kDynamicStrideOrOffset);
|
|
|
|
auto sourceMemRefType = source.getType().cast<MemRefType>();
|
|
|
|
// Structuring implementation this way avoids duplication between builders.
|
|
|
|
if (!resultType) {
|
|
|
|
resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
|
|
|
|
staticSizes, staticStrides)
|
|
|
|
.cast<MemRefType>();
|
|
|
|
}
|
|
|
|
build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
|
|
|
|
dynamicStrides, b.getI64ArrayAttr(staticOffsets),
|
|
|
|
b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a SubViewOp with mixed static and dynamic entries and inferred result
|
|
|
|
// type.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
|
|
|
|
ArrayRef<OpFoldResult> offsets,
|
|
|
|
ArrayRef<OpFoldResult> sizes,
|
|
|
|
ArrayRef<OpFoldResult> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a SubViewOp with static entries and inferred result type.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
|
|
|
|
ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
|
|
|
|
ArrayRef<int64_t> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
SmallVector<OpFoldResult> sizeValues =
|
|
|
|
llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a SubViewOp with dynamic entries and custom result type. If the
|
|
|
|
// type passed is nullptr, it is inferred.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
|
|
|
|
ArrayRef<int64_t> strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
SmallVector<OpFoldResult> sizeValues =
|
|
|
|
llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
|
|
|
|
return b.getI64IntegerAttr(v);
|
|
|
|
}));
|
|
|
|
build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
|
|
|
|
attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a SubViewOp with dynamic entries and custom result type. If the type
|
|
|
|
// passed is nullptr, it is inferred.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source, ValueRange offsets,
|
|
|
|
ValueRange sizes, ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
|
|
|
|
SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
|
|
|
|
SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
|
|
|
|
llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
|
|
|
|
build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a SubViewOp with dynamic entries and inferred result type.
|
|
|
|
void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
|
|
|
|
ValueRange offsets, ValueRange sizes, ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For ViewLikeOpInterface.
|
2022-07-10 21:19:11 -07:00
|
|
|
Value SubViewOp::getViewSource() { return getSource(); }
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
/// Return true if t1 and t2 have equal offsets (both dynamic or of same
|
|
|
|
/// static value).
|
2021-12-09 07:40:19 +00:00
|
|
|
static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) {
|
|
|
|
AffineExpr t1Offset, t2Offset;
|
|
|
|
SmallVector<AffineExpr> t1Strides, t2Strides;
|
|
|
|
auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset);
|
|
|
|
auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset);
|
|
|
|
return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset;
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
/// Checks if `original` Type type can be rank reduced to `reduced` type.
|
|
|
|
/// This function is slight variant of `is subsequence` algorithm where
|
|
|
|
/// not matching dimension must be 1.
|
2021-11-29 16:22:45 +00:00
|
|
|
static SliceVerificationResult
|
|
|
|
isRankReducedMemRefType(MemRefType originalType,
|
2021-12-09 07:40:19 +00:00
|
|
|
MemRefType candidateRankReducedType,
|
2021-11-29 16:22:45 +00:00
|
|
|
ArrayRef<OpFoldResult> sizes) {
|
2021-12-09 07:40:19 +00:00
|
|
|
auto partialRes = isRankReducedType(originalType, candidateRankReducedType);
|
2021-11-29 16:22:45 +00:00
|
|
|
if (partialRes != SliceVerificationResult::Success)
|
|
|
|
return partialRes;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-12-09 07:40:19 +00:00
|
|
|
auto optionalUnusedDimsMask = computeMemRefRankReductionMask(
|
|
|
|
originalType, candidateRankReducedType, sizes);
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Sizes cannot be matched in case empty vector is returned.
|
2022-06-20 11:22:37 -07:00
|
|
|
if (!optionalUnusedDimsMask)
|
2021-11-29 16:22:45 +00:00
|
|
|
return SliceVerificationResult::LayoutMismatch;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-12-09 07:40:19 +00:00
|
|
|
if (originalType.getMemorySpace() !=
|
|
|
|
candidateRankReducedType.getMemorySpace())
|
2021-11-29 16:22:45 +00:00
|
|
|
return SliceVerificationResult::MemSpaceMismatch;
|
|
|
|
|
2021-12-09 07:40:19 +00:00
|
|
|
// No amount of stride dropping can reconcile incompatible offsets.
|
|
|
|
if (!haveCompatibleOffsets(originalType, candidateRankReducedType))
|
|
|
|
return SliceVerificationResult::LayoutMismatch;
|
|
|
|
|
2021-11-29 16:22:45 +00:00
|
|
|
return SliceVerificationResult::Success;
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename OpTy>
|
2021-11-29 16:22:45 +00:00
|
|
|
static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result,
|
|
|
|
OpTy op, Type expectedType) {
|
2021-02-10 13:53:11 +01:00
|
|
|
auto memrefType = expectedType.cast<ShapedType>();
|
|
|
|
switch (result) {
|
2021-11-29 16:22:45 +00:00
|
|
|
case SliceVerificationResult::Success:
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
2021-11-29 16:22:45 +00:00
|
|
|
case SliceVerificationResult::RankTooLarge:
|
2021-02-10 13:53:11 +01:00
|
|
|
return op.emitError("expected result rank to be smaller or equal to ")
|
2021-11-29 16:22:45 +00:00
|
|
|
<< "the source rank. ";
|
|
|
|
case SliceVerificationResult::SizeMismatch:
|
2021-02-10 13:53:11 +01:00
|
|
|
return op.emitError("expected result type to be ")
|
|
|
|
<< expectedType
|
2021-11-29 16:22:45 +00:00
|
|
|
<< " or a rank-reduced version. (mismatch of result sizes) ";
|
|
|
|
case SliceVerificationResult::ElemTypeMismatch:
|
2021-02-10 13:53:11 +01:00
|
|
|
return op.emitError("expected result element type to be ")
|
2021-11-29 16:22:45 +00:00
|
|
|
<< memrefType.getElementType();
|
|
|
|
case SliceVerificationResult::MemSpaceMismatch:
|
|
|
|
return op.emitError("expected result and source memory spaces to match.");
|
|
|
|
case SliceVerificationResult::LayoutMismatch:
|
2021-02-10 13:53:11 +01:00
|
|
|
return op.emitError("expected result type to be ")
|
|
|
|
<< expectedType
|
2021-11-29 16:22:45 +00:00
|
|
|
<< " or a rank-reduced version. (mismatch of result layout) ";
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
llvm_unreachable("unexpected subview verification result");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verifier for SubViewOp.
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult SubViewOp::verify() {
|
|
|
|
MemRefType baseType = getSourceType();
|
|
|
|
MemRefType subViewType = getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// The base memref and the view memref should be in the same memory space.
|
2021-03-23 11:45:24 +03:00
|
|
|
if (baseType.getMemorySpace() != subViewType.getMemorySpace())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< baseType << " and subview memref type " << subViewType;
|
|
|
|
|
|
|
|
// Verify that the base memref type has a strided layout map.
|
|
|
|
if (!isStrided(baseType))
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("base type ") << baseType << " is not strided";
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// Verify result type against inferred type.
|
|
|
|
auto expectedType = SubViewOp::inferResultType(
|
2022-07-10 21:19:11 -07:00
|
|
|
baseType, extractFromI64ArrayAttr(getStaticOffsets()),
|
|
|
|
extractFromI64ArrayAttr(getStaticSizes()),
|
|
|
|
extractFromI64ArrayAttr(getStaticStrides()));
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-11-29 16:22:45 +00:00
|
|
|
auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(),
|
2022-02-02 10:18:06 -08:00
|
|
|
subViewType, getMixedSizes());
|
|
|
|
return produceSubViewErrorMsg(result, *this, expectedType);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2021-10-11 17:02:03 +03:00
|
|
|
raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) {
|
2021-02-10 13:53:11 +01:00
|
|
|
return os << "range " << range.offset << ":" << range.size << ":"
|
|
|
|
<< range.stride;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the list of Range (i.e. offset, size, stride). Each Range
|
|
|
|
/// entry contains either the dynamic value or a ConstantIndexOp constructed
|
|
|
|
/// with `b` at location `loc`.
|
|
|
|
SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
|
|
|
|
OpBuilder &b, Location loc) {
|
|
|
|
std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
|
|
|
|
assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
|
|
|
|
assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
|
|
|
|
SmallVector<Range, 8> res;
|
|
|
|
unsigned rank = ranks[0];
|
|
|
|
res.reserve(rank);
|
|
|
|
for (unsigned idx = 0; idx < rank; ++idx) {
|
|
|
|
Value offset =
|
|
|
|
op.isDynamicOffset(idx)
|
|
|
|
? op.getDynamicOffset(idx)
|
2021-10-12 23:14:57 +00:00
|
|
|
: b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
|
|
|
|
Value size =
|
|
|
|
op.isDynamicSize(idx)
|
|
|
|
? op.getDynamicSize(idx)
|
|
|
|
: b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
|
2021-02-10 13:53:11 +01:00
|
|
|
Value stride =
|
|
|
|
op.isDynamicStride(idx)
|
|
|
|
? op.getDynamicStride(idx)
|
2021-10-12 23:14:57 +00:00
|
|
|
: b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
|
2021-02-10 13:53:11 +01:00
|
|
|
res.emplace_back(Range{offset, size, stride});
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
/// Compute the canonical result type of a SubViewOp. Call `inferResultType`
|
|
|
|
/// to deduce the result type for the given `sourceType`. Additionally, reduce
|
|
|
|
/// the rank of the inferred result type if `currentResultType` is lower rank
|
|
|
|
/// than `currentSourceType`. Use this signature if `sourceType` is updated
|
|
|
|
/// together with the result type. In this case, it is important to compute
|
|
|
|
/// the dropped dimensions using `currentSourceType` whose strides align with
|
2021-12-13 14:45:34 +00:00
|
|
|
/// `currentResultType`.
|
2021-11-30 15:46:21 +00:00
|
|
|
static MemRefType getCanonicalSubViewResultType(
|
2021-12-13 14:45:34 +00:00
|
|
|
MemRefType currentResultType, MemRefType currentSourceType,
|
|
|
|
MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets,
|
|
|
|
ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) {
|
2021-11-30 15:46:21 +00:00
|
|
|
auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
|
|
|
|
mixedSizes, mixedStrides)
|
|
|
|
.cast<MemRefType>();
|
2022-02-06 14:06:34 +01:00
|
|
|
llvm::Optional<llvm::SmallBitVector> unusedDims =
|
2021-12-13 14:45:34 +00:00
|
|
|
computeMemRefRankReductionMask(currentSourceType, currentResultType,
|
|
|
|
mixedSizes);
|
2021-11-30 15:46:21 +00:00
|
|
|
// Return nullptr as failure mode.
|
|
|
|
if (!unusedDims)
|
|
|
|
return nullptr;
|
|
|
|
SmallVector<int64_t> shape;
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
|
2022-02-06 14:06:34 +01:00
|
|
|
if (unusedDims->test(sizes.index()))
|
2021-11-30 15:46:21 +00:00
|
|
|
continue;
|
|
|
|
shape.push_back(sizes.value());
|
2021-04-28 11:01:22 -07:00
|
|
|
}
|
2021-11-30 15:46:21 +00:00
|
|
|
AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
|
|
|
|
if (!layoutMap.isIdentity())
|
2022-06-20 23:20:25 -07:00
|
|
|
layoutMap = getProjectedMap(layoutMap, *unusedDims);
|
2021-11-30 15:46:21 +00:00
|
|
|
return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
|
|
|
|
nonRankReducedType.getMemorySpace());
|
2021-04-28 11:01:22 -07:00
|
|
|
}
|
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
/// Compute the canonical result type of a SubViewOp. Call `inferResultType`
|
|
|
|
/// to deduce the result type. Additionally, reduce the rank of the inferred
|
|
|
|
/// result type if `currentResultType` is lower rank than `sourceType`.
|
2021-12-13 14:45:34 +00:00
|
|
|
static MemRefType getCanonicalSubViewResultType(
|
|
|
|
MemRefType currentResultType, MemRefType sourceType,
|
|
|
|
ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes,
|
|
|
|
ArrayRef<OpFoldResult> mixedStrides) {
|
|
|
|
return getCanonicalSubViewResultType(currentResultType, sourceType,
|
|
|
|
sourceType, mixedOffsets, mixedSizes,
|
|
|
|
mixedStrides);
|
|
|
|
}
|
|
|
|
|
2021-12-29 10:48:02 -08:00
|
|
|
/// Helper method to check if a `subview` operation is trivially a no-op. This
|
|
|
|
/// is the case if the all offsets are zero, all strides are 1, and the source
|
2022-04-06 03:57:03 -04:00
|
|
|
/// shape is same as the size of the subview. In such cases, the subview can
|
|
|
|
/// be folded into its source.
|
2021-12-29 10:48:02 -08:00
|
|
|
static bool isTrivialSubViewOp(SubViewOp subViewOp) {
|
|
|
|
if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto mixedOffsets = subViewOp.getMixedOffsets();
|
|
|
|
auto mixedSizes = subViewOp.getMixedSizes();
|
|
|
|
auto mixedStrides = subViewOp.getMixedStrides();
|
|
|
|
|
|
|
|
// Check offsets are zero.
|
|
|
|
if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
|
|
|
|
Optional<int64_t> intValue = getConstantIntValue(ofr);
|
2022-07-14 00:19:59 -07:00
|
|
|
return !intValue || intValue.value() != 0;
|
2021-12-29 10:48:02 -08:00
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check strides are one.
|
|
|
|
if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
|
|
|
|
Optional<int64_t> intValue = getConstantIntValue(ofr);
|
2022-07-14 00:19:59 -07:00
|
|
|
return !intValue || intValue.value() != 1;
|
2021-12-29 10:48:02 -08:00
|
|
|
}))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check all size values are static and matches the (static) source shape.
|
|
|
|
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &size : llvm::enumerate(mixedSizes)) {
|
2021-12-29 10:48:02 -08:00
|
|
|
Optional<int64_t> intValue = getConstantIntValue(size.value());
|
2022-06-20 23:20:25 -07:00
|
|
|
if (!intValue || *intValue != sourceShape[size.index()])
|
2021-12-29 10:48:02 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// All conditions met. The `SubViewOp` is foldable as a no-op.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
namespace {
|
|
|
|
/// Pattern to rewrite a subview op with MemRefCast arguments.
|
|
|
|
/// This essentially pushes memref.cast past its consuming subview when
|
|
|
|
/// `canFoldIntoConsumerOp` is true.
|
|
|
|
///
|
|
|
|
/// Example:
|
|
|
|
/// ```
|
|
|
|
/// %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
|
|
|
|
/// %1 = memref.subview %0[0, 0][3, 4][1, 1] :
|
|
|
|
/// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
|
|
|
|
/// ```
|
|
|
|
/// is rewritten into:
|
|
|
|
/// ```
|
|
|
|
/// %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
|
|
|
|
/// %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
|
|
|
|
/// memref<3x4xf32, offset:?, strides:[?, 1]>
|
|
|
|
/// ```
|
|
|
|
class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2022-04-06 03:57:03 -04:00
|
|
|
// Any constant operand, just return to let SubViewOpConstantFolder kick
|
|
|
|
// in.
|
2021-02-10 13:53:11 +01:00
|
|
|
if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
|
|
|
|
return matchPattern(operand, matchConstantIndex());
|
|
|
|
}))
|
|
|
|
return failure();
|
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
auto castOp = subViewOp.getSource().getDefiningOp<CastOp>();
|
2021-02-10 13:53:11 +01:00
|
|
|
if (!castOp)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!CastOp::canFoldIntoConsumerOp(castOp))
|
|
|
|
return failure();
|
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
// Compute the SubViewOp result type after folding the MemRefCastOp. Use
|
|
|
|
// the MemRefCastOp source operand type to infer the result type and the
|
|
|
|
// current SubViewOp source operand type to compute the dropped dimensions
|
|
|
|
// if the operation is rank-reducing.
|
2021-04-28 11:01:22 -07:00
|
|
|
auto resultType = getCanonicalSubViewResultType(
|
2021-12-13 14:45:34 +00:00
|
|
|
subViewOp.getType(), subViewOp.getSourceType(),
|
2022-07-10 21:19:11 -07:00
|
|
|
castOp.getSource().getType().cast<MemRefType>(),
|
2021-02-10 13:53:11 +01:00
|
|
|
subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
|
|
|
|
subViewOp.getMixedStrides());
|
2021-12-13 14:45:34 +00:00
|
|
|
if (!resultType)
|
|
|
|
return failure();
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
Value newSubView = rewriter.create<SubViewOp>(
|
2022-07-10 21:19:11 -07:00
|
|
|
subViewOp.getLoc(), resultType, castOp.getSource(),
|
|
|
|
subViewOp.getOffsets(), subViewOp.getSizes(), subViewOp.getStrides(),
|
|
|
|
subViewOp.getStaticOffsets(), subViewOp.getStaticSizes(),
|
|
|
|
subViewOp.getStaticStrides());
|
2021-02-10 13:53:11 +01:00
|
|
|
rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
|
|
|
|
newSubView);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2021-12-29 10:48:02 -08:00
|
|
|
|
2022-04-06 03:57:03 -04:00
|
|
|
/// Canonicalize subview ops that are no-ops. When the source shape is not
|
|
|
|
/// same as a result shape due to use of `affine_map`.
|
2021-12-29 10:48:02 -08:00
|
|
|
class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
if (!isTrivialSubViewOp(subViewOp))
|
|
|
|
return failure();
|
|
|
|
if (subViewOp.getSourceType() == subViewOp.getType()) {
|
2022-07-10 21:19:11 -07:00
|
|
|
rewriter.replaceOp(subViewOp, subViewOp.getSource());
|
2021-12-29 10:48:02 -08:00
|
|
|
return success();
|
|
|
|
}
|
2022-02-06 12:32:47 -08:00
|
|
|
rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
|
2022-07-10 21:19:11 -07:00
|
|
|
subViewOp.getSource());
|
2021-12-29 10:48:02 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2021-02-10 13:53:11 +01:00
|
|
|
} // namespace
|
|
|
|
|
2021-04-28 11:01:22 -07:00
|
|
|
/// Return the canonical type of the result of a subview.
|
|
|
|
struct SubViewReturnTypeCanonicalizer {
|
|
|
|
MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
|
|
|
|
ArrayRef<OpFoldResult> mixedSizes,
|
|
|
|
ArrayRef<OpFoldResult> mixedStrides) {
|
2021-11-30 15:46:21 +00:00
|
|
|
return getCanonicalSubViewResultType(op.getType(), op.getSourceType(),
|
|
|
|
mixedOffsets, mixedSizes,
|
|
|
|
mixedStrides);
|
2021-04-28 11:01:22 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
/// A canonicalizer wrapper to replace SubViewOps.
|
|
|
|
struct SubViewCanonicalizer {
|
|
|
|
void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
|
2022-02-06 12:32:47 -08:00
|
|
|
rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-22 16:58:34 -07:00
|
|
|
void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
2021-02-10 13:53:11 +01:00
|
|
|
MLIRContext *context) {
|
2021-04-28 11:01:22 -07:00
|
|
|
results
|
|
|
|
.add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
|
|
|
|
SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
|
2021-12-29 10:48:02 -08:00
|
|
|
SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
auto resultShapedType = getResult().getType().cast<ShapedType>();
|
2022-07-10 21:19:11 -07:00
|
|
|
auto sourceShapedType = getSource().getType().cast<ShapedType>();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
if (resultShapedType.hasStaticShape() &&
|
|
|
|
resultShapedType == sourceShapedType) {
|
|
|
|
return getViewSource();
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TransposeOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Build a strided memref type by applying `permutationMap` tp `memRefType`.
|
|
|
|
static MemRefType inferTransposeResultType(MemRefType memRefType,
|
|
|
|
AffineMap permutationMap) {
|
|
|
|
auto rank = memRefType.getRank();
|
|
|
|
auto originalSizes = memRefType.getShape();
|
|
|
|
// Compute permuted sizes.
|
|
|
|
SmallVector<int64_t, 4> sizes(rank, 0);
|
2022-01-02 22:02:14 +00:00
|
|
|
for (const auto &en : llvm::enumerate(permutationMap.getResults()))
|
2021-02-10 13:53:11 +01:00
|
|
|
sizes[en.index()] =
|
|
|
|
originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
|
|
|
|
|
|
|
|
// Compute permuted strides.
|
|
|
|
int64_t offset;
|
|
|
|
SmallVector<int64_t, 4> strides;
|
|
|
|
auto res = getStridesAndOffset(memRefType, strides, offset);
|
|
|
|
assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
|
|
|
|
(void)res;
|
|
|
|
auto map =
|
|
|
|
makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
|
|
|
|
map = permutationMap ? map.compose(permutationMap) : map;
|
2021-10-11 18:25:14 +03:00
|
|
|
return MemRefType::Builder(memRefType)
|
|
|
|
.setShape(sizes)
|
|
|
|
.setLayout(AffineMapAttr::get(map));
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
|
|
|
|
AffineMapAttr permutation,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto permutationMap = permutation.getValue();
|
|
|
|
assert(permutationMap);
|
|
|
|
|
|
|
|
auto memRefType = in.getType().cast<MemRefType>();
|
|
|
|
// Compute result type.
|
|
|
|
MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
|
|
|
|
|
|
|
|
build(b, result, resultType, in, attrs);
|
2022-06-26 20:45:25 -07:00
|
|
|
result.addAttribute(TransposeOp::getPermutationAttrStrName(), permutation);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// transpose $in $permutation attr-dict : type($in) `to` type(results)
|
2022-02-07 17:54:04 -08:00
|
|
|
void TransposeOp::print(OpAsmPrinter &p) {
|
2022-07-10 21:19:11 -07:00
|
|
|
p << " " << getIn() << " " << getPermutation();
|
2022-06-26 20:45:25 -07:00
|
|
|
p.printOptionalAttrDict((*this)->getAttrs(), {getPermutationAttrStrName()});
|
2022-07-10 21:19:11 -07:00
|
|
|
p << " : " << getIn().getType() << " to " << getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2022-02-07 17:54:04 -08:00
|
|
|
ParseResult TransposeOp::parse(OpAsmParser &parser, OperationState &result) {
|
2022-03-21 21:42:13 +01:00
|
|
|
OpAsmParser::UnresolvedOperand in;
|
2021-02-10 13:53:11 +01:00
|
|
|
AffineMap permutation;
|
|
|
|
MemRefType srcType, dstType;
|
|
|
|
if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(srcType) ||
|
|
|
|
parser.resolveOperand(in, srcType, result.operands) ||
|
|
|
|
parser.parseKeywordType("to", dstType) ||
|
|
|
|
parser.addTypeToList(dstType, result.types))
|
|
|
|
return failure();
|
|
|
|
|
2022-06-26 20:45:25 -07:00
|
|
|
result.addAttribute(TransposeOp::getPermutationAttrStrName(),
|
2021-02-10 13:53:11 +01:00
|
|
|
AffineMapAttr::get(permutation));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult TransposeOp::verify() {
|
2022-07-10 21:19:11 -07:00
|
|
|
if (!getPermutation().isPermutation())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected a permutation map");
|
2022-07-10 21:19:11 -07:00
|
|
|
if (getPermutation().getNumDims() != getShapedType().getRank())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("expected a permutation map of same rank as the input");
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
auto srcType = getIn().getType().cast<MemRefType>();
|
2022-02-02 10:18:06 -08:00
|
|
|
auto dstType = getType().cast<MemRefType>();
|
2022-07-10 21:19:11 -07:00
|
|
|
auto transposedType = inferTransposeResultType(srcType, getPermutation());
|
2021-02-10 13:53:11 +01:00
|
|
|
if (dstType != transposedType)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError("output type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< dstType << " does not match transposed input type " << srcType
|
|
|
|
<< ", " << transposedType;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
|
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ViewOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult ViewOp::verify() {
|
|
|
|
auto baseType = getOperand(0).getType().cast<MemRefType>();
|
|
|
|
auto viewType = getType();
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// The base memref should have identity layout map (or none).
|
2021-10-11 18:25:14 +03:00
|
|
|
if (!baseType.getLayout().isIdentity())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("unsupported map for base memref type ") << baseType;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// The result memref should have identity layout map (or none).
|
2021-10-11 18:25:14 +03:00
|
|
|
if (!viewType.getLayout().isIdentity())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("unsupported map for result memref type ") << viewType;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
// The base memref and the view memref should be in the same memory space.
|
2021-03-23 11:45:24 +03:00
|
|
|
if (baseType.getMemorySpace() != viewType.getMemorySpace())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
2021-02-10 13:53:11 +01:00
|
|
|
<< baseType << " and view memref type " << viewType;
|
|
|
|
|
|
|
|
// Verify that we have the correct number of sizes for the result type.
|
|
|
|
unsigned numDynamicDims = viewType.getNumDynamicDims();
|
2022-07-10 21:19:11 -07:00
|
|
|
if (getSizes().size() != numDynamicDims)
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitError("incorrect number of size operands for type ") << viewType;
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2022-07-10 21:19:11 -07:00
|
|
|
Value ViewOp::getViewSource() { return getSource(); }
|
2021-02-10 13:53:11 +01:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Return if none of the operands are constants.
|
|
|
|
if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
|
|
|
|
return matchPattern(operand, matchConstantIndex());
|
|
|
|
}))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Get result memref type.
|
|
|
|
auto memrefType = viewOp.getType();
|
|
|
|
|
|
|
|
// Get offset from old memref view type 'memRefType'.
|
|
|
|
int64_t oldOffset;
|
|
|
|
SmallVector<int64_t, 4> oldStrides;
|
|
|
|
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
|
|
|
|
return failure();
|
|
|
|
assert(oldOffset == 0 && "Expected 0 offset");
|
|
|
|
|
|
|
|
SmallVector<Value, 4> newOperands;
|
|
|
|
|
|
|
|
// Offset cannot be folded into result type.
|
|
|
|
|
|
|
|
// Fold any dynamic dim operands which are produced by a constant.
|
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
|
|
|
|
|
|
|
unsigned dynamicDimPos = 0;
|
|
|
|
unsigned rank = memrefType.getRank();
|
|
|
|
for (unsigned dim = 0, e = rank; dim < e; ++dim) {
|
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (!ShapedType::isDynamic(dimSize)) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
2022-07-10 21:19:11 -07:00
|
|
|
auto *defOp = viewOp.getSizes()[dynamicDimPos].getDefiningOp();
|
2021-10-12 23:14:57 +00:00
|
|
|
if (auto constantIndexOp =
|
|
|
|
dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
|
2021-02-10 13:53:11 +01:00
|
|
|
// Dynamic shape dimension will be folded.
|
2021-10-12 23:14:57 +00:00
|
|
|
newShapeConstants.push_back(constantIndexOp.value());
|
2021-02-10 13:53:11 +01:00
|
|
|
} else {
|
|
|
|
// Dynamic shape dimension not folded; copy operand from old memref.
|
|
|
|
newShapeConstants.push_back(dimSize);
|
2022-07-10 21:19:11 -07:00
|
|
|
newOperands.push_back(viewOp.getSizes()[dynamicDimPos]);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new memref type with constant folded dims.
|
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(memrefType).setShape(newShapeConstants);
|
|
|
|
// Nothing new, don't fold.
|
|
|
|
if (newMemRefType == memrefType)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Create new ViewOp.
|
2022-07-10 21:19:11 -07:00
|
|
|
auto newViewOp = rewriter.create<ViewOp>(
|
|
|
|
viewOp.getLoc(), newMemRefType, viewOp.getOperand(0),
|
|
|
|
viewOp.getByteShift(), newOperands);
|
2021-02-10 13:53:11 +01:00
|
|
|
// Insert a cast so we have the same type as the old memref type.
|
2022-02-06 12:32:47 -08:00
|
|
|
rewriter.replaceOpWithNewOp<CastOp>(viewOp, viewOp.getType(), newViewOp);
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
Value memrefOperand = viewOp.getOperand(0);
|
|
|
|
CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
|
|
|
|
if (!memrefCastOp)
|
|
|
|
return failure();
|
|
|
|
Value allocOperand = memrefCastOp.getOperand();
|
|
|
|
AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
|
|
|
|
if (!allocOp)
|
|
|
|
return failure();
|
|
|
|
rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
|
2022-07-10 21:19:11 -07:00
|
|
|
viewOp.getByteShift(),
|
|
|
|
viewOp.getSizes());
|
2021-02-10 13:53:11 +01:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-12-07 18:27:58 +00:00
|
|
|
} // namespace
|
2021-02-10 13:53:11 +01:00
|
|
|
|
2021-03-22 16:58:34 -07:00
|
|
|
void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
|
2021-02-10 13:53:11 +01:00
|
|
|
MLIRContext *context) {
|
2021-03-22 16:58:34 -07:00
|
|
|
results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
|
2021-02-10 13:53:11 +01:00
|
|
|
}
|
|
|
|
|
2021-12-30 00:59:58 -05:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicRMWOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2022-02-02 10:18:06 -08:00
|
|
|
LogicalResult AtomicRMWOp::verify() {
|
|
|
|
if (getMemRefType().getRank() != getNumOperands() - 2)
|
|
|
|
return emitOpError(
|
2021-12-30 00:59:58 -05:00
|
|
|
"expects the number of subscripts to be equal to memref rank");
|
2022-07-10 21:19:11 -07:00
|
|
|
switch (getKind()) {
|
2021-12-30 00:59:58 -05:00
|
|
|
case arith::AtomicRMWKind::addf:
|
|
|
|
case arith::AtomicRMWKind::maxf:
|
|
|
|
case arith::AtomicRMWKind::minf:
|
|
|
|
case arith::AtomicRMWKind::mulf:
|
2022-07-10 21:19:11 -07:00
|
|
|
if (!getValue().getType().isa<FloatType>())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "with kind '"
|
2022-07-10 21:19:11 -07:00
|
|
|
<< arith::stringifyAtomicRMWKind(getKind())
|
2022-02-02 10:18:06 -08:00
|
|
|
<< "' expects a floating-point type";
|
2021-12-30 00:59:58 -05:00
|
|
|
break;
|
|
|
|
case arith::AtomicRMWKind::addi:
|
|
|
|
case arith::AtomicRMWKind::maxs:
|
|
|
|
case arith::AtomicRMWKind::maxu:
|
|
|
|
case arith::AtomicRMWKind::mins:
|
|
|
|
case arith::AtomicRMWKind::minu:
|
|
|
|
case arith::AtomicRMWKind::muli:
|
|
|
|
case arith::AtomicRMWKind::ori:
|
|
|
|
case arith::AtomicRMWKind::andi:
|
2022-07-10 21:19:11 -07:00
|
|
|
if (!getValue().getType().isa<IntegerType>())
|
2022-02-02 10:18:06 -08:00
|
|
|
return emitOpError() << "with kind '"
|
2022-07-10 21:19:11 -07:00
|
|
|
<< arith::stringifyAtomicRMWKind(getKind())
|
2022-02-02 10:18:06 -08:00
|
|
|
<< "' expects an integer type";
|
2021-12-30 00:59:58 -05:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// atomicrmw(memrefcast) -> atomicrmw
|
2022-07-10 21:19:11 -07:00
|
|
|
if (succeeded(foldMemRefCast(*this, getValue())))
|
2021-12-30 00:59:58 -05:00
|
|
|
return getResult();
|
|
|
|
return OpFoldResult();
|
|
|
|
}
|
|
|
|
|
2021-02-10 13:53:11 +01:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TableGen'd op method definitions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define GET_OP_CLASSES
|
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
|