2019-03-01 13:48:24 -08:00
|
|
|
//===- Ops.cpp - Standard MLIR Operations ---------------------------------===//
|
2018-07-05 09:12:11 -07:00
|
|
|
//
|
2020-01-26 03:58:30 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-23 09:35:36 -08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-07-05 09:12:11 -07:00
|
|
|
//
|
2019-12-23 09:35:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-07-05 09:12:11 -07:00
|
|
|
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
2019-04-27 20:55:38 -07:00
|
|
|
|
2019-12-08 06:25:17 -08:00
|
|
|
#include "mlir/Dialect/CommonFolders.h"
|
2018-09-24 10:23:02 -07:00
|
|
|
#include "mlir/IR/AffineExpr.h"
|
2018-07-24 10:13:31 -07:00
|
|
|
#include "mlir/IR/AffineMap.h"
|
2020-09-14 11:54:55 +02:00
|
|
|
#include "mlir/IR/BlockAndValueMapping.h"
|
2018-07-25 11:15:20 -07:00
|
|
|
#include "mlir/IR/Builders.h"
|
2019-07-11 11:41:04 -07:00
|
|
|
#include "mlir/IR/Function.h"
|
2018-10-29 10:22:49 -07:00
|
|
|
#include "mlir/IR/Matchers.h"
|
2019-05-22 13:41:23 -07:00
|
|
|
#include "mlir/IR/Module.h"
|
2018-07-24 16:07:22 -07:00
|
|
|
#include "mlir/IR/OpImplementation.h"
|
2018-10-25 16:44:04 -07:00
|
|
|
#include "mlir/IR/PatternMatch.h"
|
2019-01-03 14:29:52 -08:00
|
|
|
#include "mlir/IR/StandardTypes.h"
|
2019-09-21 16:14:07 -07:00
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
2018-12-27 14:35:10 -08:00
|
|
|
#include "mlir/IR/Value.h"
|
2018-10-03 10:07:54 -07:00
|
|
|
#include "mlir/Support/MathExtras.h"
|
2019-09-05 12:23:45 -07:00
|
|
|
#include "mlir/Transforms/InliningUtils.h"
|
2020-10-02 06:30:56 -04:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2018-11-08 04:02:00 -08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2019-04-27 20:55:38 -07:00
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
2018-07-05 09:12:11 -07:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2019-11-07 06:32:39 -08:00
|
|
|
|
2019-11-15 10:16:33 -08:00
|
|
|
// Pull in all enum type definitions and utility function declarations.
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/OpsEnums.cpp.inc"
|
2019-11-15 10:16:33 -08:00
|
|
|
|
2018-07-05 09:12:11 -07:00
|
|
|
using namespace mlir;
|
|
|
|
|
2019-08-21 16:50:30 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StandardOpsDialect Interfaces
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
namespace {
|
2019-09-05 12:23:45 -07:00
|
|
|
/// This class defines the interface for handling inlining with standard
|
|
|
|
/// operations.
|
|
|
|
struct StdInlinerInterface : public DialectInlinerInterface {
|
|
|
|
using DialectInlinerInterface::DialectInlinerInterface;
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Analysis Hooks
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
2020-10-28 21:48:38 -07:00
|
|
|
/// All call operations within standard ops can be inlined.
|
2020-10-28 21:48:48 -07:00
|
|
|
bool isLegalToInline(Operation *call, Operation *callable,
|
|
|
|
bool wouldBeCloned) const final {
|
2020-10-28 21:48:38 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 12:23:45 -07:00
|
|
|
/// All operations within standard ops can be inlined.
|
2020-10-28 21:48:48 -07:00
|
|
|
bool isLegalToInline(Operation *, Region *, bool,
|
2019-09-05 12:23:45 -07:00
|
|
|
BlockAndValueMapping &) const final {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Transformation Hooks
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Handle the given inlined terminator by replacing it with a new operation
|
|
|
|
/// as necessary.
|
|
|
|
void handleTerminator(Operation *op, Block *newDest) const final {
|
|
|
|
// Only "std.return" needs to be handled here.
|
|
|
|
auto returnOp = dyn_cast<ReturnOp>(op);
|
|
|
|
if (!returnOp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Replace the return with a branch to the dest.
|
|
|
|
OpBuilder builder(op);
|
2019-12-07 10:35:01 -08:00
|
|
|
builder.create<BranchOp>(op->getLoc(), newDest, returnOp.getOperands());
|
2019-09-05 12:23:45 -07:00
|
|
|
op->erase();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Handle the given inlined terminator by replacing it with a new operation
|
|
|
|
/// as necessary.
|
|
|
|
void handleTerminator(Operation *op,
|
2019-12-23 14:45:01 -08:00
|
|
|
ArrayRef<Value> valuesToRepl) const final {
|
2019-09-05 12:23:45 -07:00
|
|
|
// Only "std.return" needs to be handled here.
|
|
|
|
auto returnOp = cast<ReturnOp>(op);
|
|
|
|
|
|
|
|
// Replace the values directly with the return operands.
|
|
|
|
assert(returnOp.getNumOperands() == valuesToRepl.size());
|
|
|
|
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
|
2020-01-11 08:54:04 -08:00
|
|
|
valuesToRepl[it.index()].replaceAllUsesWith(it.value());
|
2019-09-05 12:23:45 -07:00
|
|
|
}
|
|
|
|
};
|
2019-08-21 16:50:30 -07:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2018-10-21 19:49:31 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StandardOpsDialect
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-10-11 05:13:18 -07:00
|
|
|
/// A custom unary operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
|
|
|
static void printStandardUnaryOp(Operation *op, OpAsmPrinter &p) {
|
|
|
|
assert(op->getNumOperands() == 1 && "unary op should have one operand");
|
|
|
|
assert(op->getNumResults() == 1 && "unary op should have one result");
|
|
|
|
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0);
|
2019-10-11 05:13:18 -07:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << op->getOperand(0).getType();
|
2019-10-11 05:13:18 -07:00
|
|
|
}
|
|
|
|
|
2019-03-02 18:03:03 -08:00
|
|
|
/// A custom binary operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
2019-09-20 20:43:02 -07:00
|
|
|
static void printStandardBinaryOp(Operation *op, OpAsmPrinter &p) {
|
2019-03-02 18:03:03 -08:00
|
|
|
assert(op->getNumOperands() == 2 && "binary op should have two operands");
|
|
|
|
assert(op->getNumResults() == 1 && "binary op should have one result");
|
|
|
|
|
|
|
|
// If not all the operand and result types are the same, just use the
|
|
|
|
// generic assembly form to avoid omitting information in printing.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto resultType = op->getResult(0).getType();
|
|
|
|
if (op->getOperand(0).getType() != resultType ||
|
|
|
|
op->getOperand(1).getType() != resultType) {
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printGenericOp(op);
|
2019-03-02 18:03:03 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0) << ", " << op->getOperand(1);
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2019-03-02 18:03:03 -08:00
|
|
|
|
|
|
|
// Now we can output only one type for all operands and the result.
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << op->getResult(0).getType();
|
2019-03-02 18:03:03 -08:00
|
|
|
}
|
|
|
|
|
2019-05-13 11:56:21 -07:00
|
|
|
/// A custom cast operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
2019-09-20 20:43:02 -07:00
|
|
|
static void printStandardCastOp(Operation *op, OpAsmPrinter &p) {
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0) << " : " << op->getOperand(0).getType() << " to "
|
|
|
|
<< op->getResult(0).getType();
|
2019-05-13 11:56:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A custom cast operation verifier.
|
2020-02-24 16:49:52 -08:00
|
|
|
template <typename T>
|
|
|
|
static LogicalResult verifyCastOp(T op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
auto opType = op.getOperand().getType();
|
2019-05-13 11:56:21 -07:00
|
|
|
auto resType = op.getType();
|
|
|
|
if (!T::areCastCompatible(opType, resType))
|
|
|
|
return op.emitError("operand type ") << opType << " and result type "
|
|
|
|
<< resType << " are cast incompatible";
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-08-07 02:41:44 +00:00
|
|
|
void StandardOpsDialect::initialize() {
|
2019-07-05 05:04:53 -07:00
|
|
|
addOperations<DmaStartOp, DmaWaitOp,
|
2019-01-04 01:34:16 -08:00
|
|
|
#define GET_OP_LIST
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.cpp.inc"
|
2019-01-04 01:34:16 -08:00
|
|
|
>();
|
2019-11-20 10:19:01 -08:00
|
|
|
addInterfaces<StdInlinerInterface>();
|
2018-10-21 19:49:31 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 12:21:42 -08:00
|
|
|
/// Materialize a single constant operation from a given attribute value with
|
|
|
|
/// the desired resultant type.
|
|
|
|
Operation *StandardOpsDialect::materializeConstant(OpBuilder &builder,
|
|
|
|
Attribute value, Type type,
|
|
|
|
Location loc) {
|
|
|
|
return builder.create<ConstantOp>(loc, type, value);
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
/// Matches a ConstantIndexOp.
|
|
|
|
/// TODO: This should probably just be a general matcher that uses m_Constant
|
|
|
|
/// and checks the operation for an index type.
|
|
|
|
static detail::op_matcher<ConstantIndexOp> m_ConstantIndex() {
|
|
|
|
return detail::op_matcher<ConstantIndexOp>();
|
|
|
|
}
|
|
|
|
|
2018-10-25 16:44:04 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Common canonicalization pattern support logic
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// This is a common class used for patterns of the form
|
|
|
|
/// "someop(memrefcast) -> someop". It folds the source of any memref_cast
|
|
|
|
/// into the root operation directly.
|
2019-12-13 14:52:39 -08:00
|
|
|
static LogicalResult foldMemRefCast(Operation *op) {
|
|
|
|
bool folded = false;
|
|
|
|
for (OpOperand &operand : op->getOpOperands()) {
|
2020-05-09 17:52:35 -07:00
|
|
|
auto cast = operand.get().getDefiningOp<MemRefCastOp>();
|
2020-01-11 08:54:04 -08:00
|
|
|
if (cast && !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
|
2019-12-13 14:52:39 -08:00
|
|
|
operand.set(cast.getOperand());
|
|
|
|
folded = true;
|
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
2019-12-13 14:52:39 -08:00
|
|
|
return success(folded);
|
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2020-09-09 12:34:08 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Common cast compatibility check for vector types.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// This method checks for cast compatibility of vector types.
|
|
|
|
/// If 'a' and 'b' are vector types, and they are cast compatible,
|
|
|
|
/// it calls the 'areElementsCastCompatible' function to check for
|
|
|
|
/// element cast compatibility.
|
|
|
|
/// Returns 'true' if the vector types are cast compatible, and 'false'
|
|
|
|
/// otherwise.
|
|
|
|
static bool areVectorCastSimpleCompatible(
|
|
|
|
Type a, Type b, function_ref<bool(Type, Type)> areElementsCastCompatible) {
|
|
|
|
if (auto va = a.dyn_cast<VectorType>())
|
|
|
|
if (auto vb = b.dyn_cast<VectorType>())
|
|
|
|
return va.getShape().equals(vb.getShape()) &&
|
|
|
|
areElementsCastCompatible(va.getElementType(),
|
|
|
|
vb.getElementType());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-11-02 11:21:29 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Helpers for Tensor[Load|Store]Op, TensorToMemrefOp, and GlobalMemrefOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static Type getTensorTypeFromMemRefType(Type type) {
|
|
|
|
if (auto memref = type.dyn_cast<MemRefType>())
|
|
|
|
return RankedTensorType::get(memref.getShape(), memref.getElementType());
|
|
|
|
if (auto memref = type.dyn_cast<UnrankedMemRefType>())
|
|
|
|
return UnrankedTensorType::get(memref.getElementType());
|
|
|
|
return NoneType::get(type.getContext());
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AddFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult AddFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a + b; });
|
2018-09-19 21:35:11 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AddIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult AddIOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-24 12:34:00 -08:00
|
|
|
/// addi(x, 0) -> x
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a + b; });
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2020-10-22 14:48:52 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BaseOpWithOffsetSizesAndStridesOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Print a list with either (1) the static integer value in `arrayAttr` if
|
|
|
|
/// `isDynamic` evaluates to false or (2) the next value otherwise.
|
|
|
|
/// This allows idiomatic printing of mixed value and integer attributes in a
|
|
|
|
/// list. E.g. `[%arg0, 7, 42, %arg42]`.
|
|
|
|
static void
|
|
|
|
printListOfOperandsOrIntegers(OpAsmPrinter &p, ValueRange values,
|
|
|
|
ArrayAttr arrayAttr,
|
|
|
|
llvm::function_ref<bool(int64_t)> isDynamic) {
|
|
|
|
p << '[';
|
|
|
|
unsigned idx = 0;
|
|
|
|
llvm::interleaveComma(arrayAttr, p, [&](Attribute a) {
|
|
|
|
int64_t val = a.cast<IntegerAttr>().getInt();
|
|
|
|
if (isDynamic(val))
|
|
|
|
p << values[idx++];
|
|
|
|
else
|
|
|
|
p << val;
|
|
|
|
});
|
|
|
|
p << ']';
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Parse a mixed list with either (1) static integer values or (2) SSA values.
|
|
|
|
/// Fill `result` with the integer ArrayAttr named `attrName` where `dynVal`
|
|
|
|
/// encode the position of SSA values. Add the parsed SSA values to `ssa`
|
|
|
|
/// in-order.
|
|
|
|
//
|
|
|
|
/// E.g. after parsing "[%arg0, 7, 42, %arg42]":
|
|
|
|
/// 1. `result` is filled with the i64 ArrayAttr "[`dynVal`, 7, 42, `dynVal`]"
|
|
|
|
/// 2. `ssa` is filled with "[%arg0, %arg1]".
|
|
|
|
static ParseResult
|
|
|
|
parseListOfOperandsOrIntegers(OpAsmParser &parser, OperationState &result,
|
|
|
|
StringRef attrName, int64_t dynVal,
|
|
|
|
SmallVectorImpl<OpAsmParser::OperandType> &ssa) {
|
|
|
|
if (failed(parser.parseLSquare()))
|
|
|
|
return failure();
|
|
|
|
// 0-D.
|
|
|
|
if (succeeded(parser.parseOptionalRSquare())) {
|
|
|
|
result.addAttribute(attrName, parser.getBuilder().getArrayAttr({}));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<int64_t, 4> attrVals;
|
|
|
|
while (true) {
|
|
|
|
OpAsmParser::OperandType operand;
|
|
|
|
auto res = parser.parseOptionalOperand(operand);
|
|
|
|
if (res.hasValue() && succeeded(res.getValue())) {
|
|
|
|
ssa.push_back(operand);
|
|
|
|
attrVals.push_back(dynVal);
|
|
|
|
} else {
|
|
|
|
IntegerAttr attr;
|
|
|
|
if (failed(parser.parseAttribute<IntegerAttr>(attr)))
|
|
|
|
return parser.emitError(parser.getNameLoc())
|
|
|
|
<< "expected SSA value or integer";
|
|
|
|
attrVals.push_back(attr.getInt());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (succeeded(parser.parseOptionalComma()))
|
|
|
|
continue;
|
|
|
|
if (failed(parser.parseRSquare()))
|
|
|
|
return failure();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto arrayAttr = parser.getBuilder().getI64ArrayAttr(attrVals);
|
|
|
|
result.addAttribute(attrName, arrayAttr);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify that a particular offset/size/stride static attribute is well-formed.
|
|
|
|
template <typename OpType>
|
|
|
|
static LogicalResult verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
OpType op, StringRef name, unsigned expectedNumElements, StringRef attrName,
|
|
|
|
ArrayAttr attr, llvm::function_ref<bool(int64_t)> isDynamic,
|
|
|
|
ValueRange values) {
|
|
|
|
/// Check static and dynamic offsets/sizes/strides breakdown.
|
|
|
|
if (attr.size() != expectedNumElements)
|
|
|
|
return op.emitError("expected ")
|
|
|
|
<< expectedNumElements << " " << name << " values";
|
|
|
|
unsigned expectedNumDynamicEntries =
|
|
|
|
llvm::count_if(attr.getValue(), [&](Attribute attr) {
|
|
|
|
return isDynamic(attr.cast<IntegerAttr>().getInt());
|
|
|
|
});
|
|
|
|
if (values.size() != expectedNumDynamicEntries)
|
|
|
|
return op.emitError("expected ")
|
|
|
|
<< expectedNumDynamicEntries << " dynamic " << name << " values";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Extract int64_t values from the assumed ArrayAttr of IntegerAttr.
|
|
|
|
static SmallVector<int64_t, 4> extractFromI64ArrayAttr(Attribute attr) {
|
|
|
|
return llvm::to_vector<4>(
|
|
|
|
llvm::map_range(attr.cast<ArrayAttr>(), [](Attribute a) -> int64_t {
|
|
|
|
return a.cast<IntegerAttr>().getInt();
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify static attributes offsets/sizes/strides.
|
|
|
|
template <typename OpType>
|
|
|
|
static LogicalResult verifyOpWithOffsetSizesAndStrides(OpType op) {
|
|
|
|
unsigned srcRank = op.getSourceRank();
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "offset", srcRank, op.getStaticOffsetsAttrName(),
|
|
|
|
op.static_offsets(), ShapedType::isDynamicStrideOrOffset,
|
|
|
|
op.offsets())))
|
|
|
|
return failure();
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "size", srcRank, op.getStaticSizesAttrName(), op.static_sizes(),
|
|
|
|
ShapedType::isDynamic, op.sizes())))
|
|
|
|
return failure();
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "stride", srcRank, op.getStaticStridesAttrName(),
|
|
|
|
op.static_strides(), ShapedType::isDynamicStrideOrOffset,
|
|
|
|
op.strides())))
|
|
|
|
return failure();
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-22 21:20:21 +05:30
|
|
|
// AllocOp / AllocaOp
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
template <typename AllocLikeOp>
|
2020-11-11 09:42:23 +01:00
|
|
|
static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
|
2020-04-09 15:16:24 +05:30
|
|
|
static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
|
2020-03-22 21:20:21 +05:30
|
|
|
"applies to only alloc or alloca");
|
|
|
|
auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
|
2018-09-20 09:39:55 -07:00
|
|
|
if (!memRefType)
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError("result must be a memref");
|
2018-09-20 09:39:55 -07:00
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
if (static_cast<int64_t>(op.dynamicSizes().size()) !=
|
|
|
|
memRefType.getNumDynamicDims())
|
|
|
|
return op.emitOpError("dimension operand count does not equal memref "
|
|
|
|
"dynamic dimension count");
|
2019-08-16 19:21:50 -07:00
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
unsigned numSymbols = 0;
|
|
|
|
if (!memRefType.getAffineMaps().empty())
|
|
|
|
numSymbols = memRefType.getAffineMaps().front().getNumSymbols();
|
|
|
|
if (op.symbolOperands().size() != numSymbols)
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError(
|
2020-11-11 09:42:23 +01:00
|
|
|
"symbol operand count does not equal memref symbol count");
|
2019-04-02 13:09:34 -07:00
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
return success();
|
|
|
|
}
|
2020-04-09 15:16:24 +05:30
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); }
|
2020-04-09 15:16:24 +05:30
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
static LogicalResult verify(AllocaOp op) {
|
2020-04-09 15:16:24 +05:30
|
|
|
// An alloca op needs to have an ancestor with an allocation scope trait.
|
2020-11-11 09:42:23 +01:00
|
|
|
if (!op.getParentWithTrait<OpTrait::AutomaticAllocationScope>())
|
2020-04-16 18:57:32 +05:30
|
|
|
return op.emitOpError(
|
|
|
|
"requires an ancestor op with AutomaticAllocationScope trait");
|
|
|
|
|
2020-11-11 09:42:23 +01:00
|
|
|
return verifyAllocLikeOp(op);
|
2018-07-30 13:08:05 -07:00
|
|
|
}
|
|
|
|
|
2018-10-25 16:44:04 -07:00
|
|
|
namespace {
|
2020-03-22 21:20:21 +05:30
|
|
|
/// Fold constant dimensions into an alloc like operation.
|
|
|
|
template <typename AllocLikeOp>
|
|
|
|
struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
|
|
|
|
using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
LogicalResult matchAndRewrite(AllocLikeOp alloc,
|
2020-03-17 20:07:55 -07:00
|
|
|
PatternRewriter &rewriter) const override {
|
2018-10-25 16:44:04 -07:00
|
|
|
// Check to see if any dimensions operands are constants. If so, we can
|
|
|
|
// substitute and drop them.
|
2019-12-23 14:45:01 -08:00
|
|
|
if (llvm::none_of(alloc.getOperands(), [](Value operand) {
|
2019-05-25 17:22:27 -07:00
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-05-25 17:22:27 -07:00
|
|
|
auto memrefType = alloc.getType();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
// Ok, we have one or more constant operands. Collect the non-constant ones
|
|
|
|
// and keep track of the resultant memref type to build.
|
2019-01-23 14:39:45 -08:00
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
2018-10-30 14:59:22 -07:00
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
2019-12-23 14:45:01 -08:00
|
|
|
SmallVector<Value, 4> newOperands;
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
unsigned dynamicDimPos = 0;
|
2018-10-30 14:59:22 -07:00
|
|
|
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
|
2019-01-23 14:39:45 -08:00
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
2018-10-25 16:44:04 -07:00
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (dimSize != -1) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
2020-01-11 08:54:04 -08:00
|
|
|
auto *defOp = alloc.getOperand(dynamicDimPos).getDefiningOp();
|
2019-04-05 12:24:03 -07:00
|
|
|
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
|
2018-10-25 16:44:04 -07:00
|
|
|
// Dynamic shape dimension will be folded.
|
2019-03-24 19:53:05 -07:00
|
|
|
newShapeConstants.push_back(constantIndexOp.getValue());
|
2018-10-25 16:44:04 -07:00
|
|
|
} else {
|
|
|
|
// Dynamic shape dimension not folded; copy operand from old memref.
|
|
|
|
newShapeConstants.push_back(-1);
|
2019-05-25 17:22:27 -07:00
|
|
|
newOperands.push_back(alloc.getOperand(dynamicDimPos));
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new memref type (which will have fewer dynamic dimensions).
|
2020-01-22 13:46:11 -08:00
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(memrefType).setShape(newShapeConstants);
|
2019-05-31 16:41:21 -07:00
|
|
|
assert(static_cast<int64_t>(newOperands.size()) ==
|
|
|
|
newMemRefType.getNumDynamicDims());
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
// Create and insert the alloc op for the new memref.
|
2020-03-22 21:20:21 +05:30
|
|
|
auto newAlloc = rewriter.create<AllocLikeOp>(alloc.getLoc(), newMemRefType,
|
|
|
|
newOperands, IntegerAttr());
|
2018-10-25 16:44:04 -07:00
|
|
|
// Insert a cast so we have the same type as the old alloc.
|
2019-05-25 17:22:27 -07:00
|
|
|
auto resultCast = rewriter.create<MemRefCastOp>(alloc.getLoc(), newAlloc,
|
|
|
|
alloc.getType());
|
2018-10-25 16:44:04 -07:00
|
|
|
|
[mlir] Remove 'valuesToRemoveIfDead' from PatternRewriter API
Summary:
Remove 'valuesToRemoveIfDead' from PatternRewriter API. The removal
functionality wasn't implemented and we decided [1] not to implement it in
favor of having more powerful DCE approaches.
[1] https://github.com/tensorflow/mlir/pull/212
Reviewers: rriddle, bondhugula
Reviewed By: rriddle
Subscribers: liufengdb, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72545
2020-01-27 13:13:20 -08:00
|
|
|
rewriter.replaceOp(alloc, {resultCast});
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
};
|
2019-01-16 11:40:37 -08:00
|
|
|
|
2019-03-28 08:24:38 -07:00
|
|
|
/// Fold alloc operations with no uses. Alloc has side effects on the heap,
|
2019-01-16 11:40:37 -08:00
|
|
|
/// but can still be deleted if it has zero uses.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyDeadAlloc : public OpRewritePattern<AllocOp> {
|
|
|
|
using OpRewritePattern<AllocOp>::OpRewritePattern;
|
2019-01-16 11:40:37 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(AllocOp alloc,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-10-16 09:50:28 -07:00
|
|
|
if (alloc.use_empty()) {
|
|
|
|
rewriter.eraseOp(alloc);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-10-16 09:50:28 -07:00
|
|
|
}
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 11:40:37 -08:00
|
|
|
}
|
|
|
|
};
|
2018-10-25 16:44:04 -07:00
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2018-11-28 15:09:39 -08:00
|
|
|
void AllocOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
2018-10-25 16:44:04 -07:00
|
|
|
MLIRContext *context) {
|
2020-03-22 21:20:21 +05:30
|
|
|
results.insert<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc>(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AllocaOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<SimplifyAllocConst<AllocaOp>>(context);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AndOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult AndOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// and(x, 0) -> 0
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return rhs();
|
2020-06-15 22:28:43 -07:00
|
|
|
/// and(x, allOnes) -> x
|
|
|
|
APInt intValue;
|
|
|
|
if (matchPattern(rhs(), m_ConstantInt(&intValue)) &&
|
|
|
|
intValue.isAllOnesValue())
|
|
|
|
return lhs();
|
2020-03-04 09:44:36 -08:00
|
|
|
/// and(x,x) -> x
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return rhs();
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a & b; });
|
|
|
|
}
|
|
|
|
|
2020-07-14 10:08:04 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AssertOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct EraseRedundantAssertions : public OpRewritePattern<AssertOp> {
|
|
|
|
using OpRewritePattern<AssertOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(AssertOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Erase assertion if argument is constant true.
|
|
|
|
if (matchPattern(op.arg(), m_One())) {
|
|
|
|
rewriter.eraseOp(op);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void AssertOp::getCanonicalizationPatterns(OwningRewritePatternList &patterns,
|
|
|
|
MLIRContext *context) {
|
|
|
|
patterns.insert<EraseRedundantAssertions>(context);
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AssumeAlignmentOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(AssumeAlignmentOp op) {
|
2020-09-01 13:32:14 -07:00
|
|
|
unsigned alignment = op.alignment();
|
2020-03-04 09:44:36 -08:00
|
|
|
if (!llvm::isPowerOf2_32(alignment))
|
|
|
|
return op.emitOpError("alignment must be power of 2");
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicRMWOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(AtomicRMWOp op) {
|
|
|
|
if (op.getMemRefType().getRank() != op.getNumOperands() - 2)
|
|
|
|
return op.emitOpError(
|
|
|
|
"expects the number of subscripts to be equal to memref rank");
|
|
|
|
switch (op.kind()) {
|
|
|
|
case AtomicRMWKind::addf:
|
|
|
|
case AtomicRMWKind::maxf:
|
|
|
|
case AtomicRMWKind::minf:
|
|
|
|
case AtomicRMWKind::mulf:
|
|
|
|
if (!op.value().getType().isa<FloatType>())
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "with kind '" << stringifyAtomicRMWKind(op.kind())
|
|
|
|
<< "' expects a floating-point type";
|
|
|
|
break;
|
|
|
|
case AtomicRMWKind::addi:
|
|
|
|
case AtomicRMWKind::maxs:
|
|
|
|
case AtomicRMWKind::maxu:
|
|
|
|
case AtomicRMWKind::mins:
|
|
|
|
case AtomicRMWKind::minu:
|
|
|
|
case AtomicRMWKind::muli:
|
|
|
|
if (!op.value().getType().isa<IntegerType>())
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "with kind '" << stringifyAtomicRMWKind(op.kind())
|
|
|
|
<< "' expects an integer type";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-04-20 15:16:52 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GenericAtomicRMWOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void GenericAtomicRMWOp::build(OpBuilder &builder, OperationState &result,
|
2020-04-20 15:16:52 +02:00
|
|
|
Value memref, ValueRange ivs) {
|
|
|
|
result.addOperands(memref);
|
|
|
|
result.addOperands(ivs);
|
|
|
|
|
|
|
|
if (auto memrefType = memref.getType().dyn_cast<MemRefType>()) {
|
|
|
|
Type elementType = memrefType.getElementType();
|
|
|
|
result.addTypes(elementType);
|
|
|
|
|
|
|
|
Region *bodyRegion = result.addRegion();
|
|
|
|
bodyRegion->push_back(new Block());
|
2020-07-10 17:07:29 -07:00
|
|
|
bodyRegion->addArgument(elementType);
|
2020-04-20 15:16:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(GenericAtomicRMWOp op) {
|
2020-07-10 17:07:29 -07:00
|
|
|
auto &body = op.body();
|
|
|
|
if (body.getNumArguments() != 1)
|
2020-04-20 15:16:52 +02:00
|
|
|
return op.emitOpError("expected single number of entry block arguments");
|
|
|
|
|
2020-07-10 17:07:29 -07:00
|
|
|
if (op.getResult().getType() != body.getArgument(0).getType())
|
2020-04-20 15:16:52 +02:00
|
|
|
return op.emitOpError(
|
|
|
|
"expected block argument of the same type result type");
|
2020-04-22 09:02:00 +02:00
|
|
|
|
|
|
|
bool hasSideEffects =
|
2020-07-10 17:07:29 -07:00
|
|
|
body.walk([&](Operation *nestedOp) {
|
2020-04-22 09:02:00 +02:00
|
|
|
if (MemoryEffectOpInterface::hasNoEffect(nestedOp))
|
|
|
|
return WalkResult::advance();
|
|
|
|
nestedOp->emitError("body of 'generic_atomic_rmw' should contain "
|
|
|
|
"only operations with no side effects");
|
|
|
|
return WalkResult::interrupt();
|
|
|
|
})
|
|
|
|
.wasInterrupted();
|
|
|
|
return hasSideEffects ? failure() : success();
|
2020-04-20 15:16:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseGenericAtomicRMWOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
OpAsmParser::OperandType memref;
|
|
|
|
Type memrefType;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> ivs;
|
|
|
|
|
|
|
|
Type indexType = parser.getBuilder().getIndexType();
|
|
|
|
if (parser.parseOperand(memref) ||
|
|
|
|
parser.parseOperandList(ivs, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseColonType(memrefType) ||
|
|
|
|
parser.resolveOperand(memref, memrefType, result.operands) ||
|
|
|
|
parser.resolveOperands(ivs, indexType, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
Region *body = result.addRegion();
|
2020-08-15 23:58:32 +00:00
|
|
|
if (parser.parseRegion(*body, llvm::None, llvm::None) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes))
|
2020-04-20 15:16:52 +02:00
|
|
|
return failure();
|
|
|
|
result.types.push_back(memrefType.cast<MemRefType>().getElementType());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, GenericAtomicRMWOp op) {
|
|
|
|
p << op.getOperationName() << ' ' << op.memref() << "[" << op.indices()
|
|
|
|
<< "] : " << op.memref().getType();
|
|
|
|
p.printRegion(op.body());
|
|
|
|
p.printOptionalAttrDict(op.getAttrs());
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicYieldOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(AtomicYieldOp op) {
|
|
|
|
Type parentType = op.getParentOp()->getResultTypes().front();
|
|
|
|
Type resultType = op.result().getType();
|
|
|
|
if (parentType != resultType)
|
|
|
|
return op.emitOpError() << "types mismatch between yield op: " << resultType
|
|
|
|
<< " and its parent: " << parentType;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BranchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-04-23 04:40:33 -07:00
|
|
|
/// Given a successor, try to collapse it to a new destination if it only
|
|
|
|
/// contains a passthrough unconditional branch. If the successor is
|
|
|
|
/// collapsable, `successor` and `successorOperands` are updated to reference
|
|
|
|
/// the new destination and values. `argStorage` is an optional storage to use
|
|
|
|
/// if operands to the collapsed successor need to be remapped.
|
|
|
|
static LogicalResult collapseBranch(Block *&successor,
|
|
|
|
ValueRange &successorOperands,
|
|
|
|
SmallVectorImpl<Value> &argStorage) {
|
|
|
|
// Check that the successor only contains a unconditional branch.
|
|
|
|
if (std::next(successor->begin()) != successor->end())
|
|
|
|
return failure();
|
|
|
|
// Check that the terminator is an unconditional branch.
|
|
|
|
BranchOp successorBranch = dyn_cast<BranchOp>(successor->getTerminator());
|
|
|
|
if (!successorBranch)
|
|
|
|
return failure();
|
|
|
|
// Check that the arguments are only used within the terminator.
|
|
|
|
for (BlockArgument arg : successor->getArguments()) {
|
|
|
|
for (Operation *user : arg.getUsers())
|
|
|
|
if (user != successorBranch)
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
// Don't try to collapse branches to infinite loops.
|
|
|
|
Block *successorDest = successorBranch.getDest();
|
|
|
|
if (successorDest == successor)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Update the operands to the successor. If the branch parent has no
|
|
|
|
// arguments, we can use the branch operands directly.
|
|
|
|
OperandRange operands = successorBranch.getOperands();
|
|
|
|
if (successor->args_empty()) {
|
|
|
|
successor = successorDest;
|
|
|
|
successorOperands = operands;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we need to remap any argument operands.
|
|
|
|
for (Value operand : operands) {
|
|
|
|
BlockArgument argOperand = operand.dyn_cast<BlockArgument>();
|
|
|
|
if (argOperand && argOperand.getOwner() == successor)
|
|
|
|
argStorage.push_back(successorOperands[argOperand.getArgNumber()]);
|
|
|
|
else
|
|
|
|
argStorage.push_back(operand);
|
|
|
|
}
|
|
|
|
successor = successorDest;
|
|
|
|
successorOperands = argStorage;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-11-05 11:57:03 -08:00
|
|
|
namespace {
|
|
|
|
/// Simplify a branch to a block that has a single predecessor. This effectively
|
|
|
|
/// merges the two blocks.
|
|
|
|
struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
|
|
|
|
using OpRewritePattern<BranchOp>::OpRewritePattern;
|
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(BranchOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-11-05 11:57:03 -08:00
|
|
|
// Check that the successor block has a single predecessor.
|
|
|
|
Block *succ = op.getDest();
|
|
|
|
Block *opParent = op.getOperation()->getBlock();
|
2020-04-14 14:53:07 -07:00
|
|
|
if (succ == opParent || !llvm::hasSingleElement(succ->getPredecessors()))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-05 11:57:03 -08:00
|
|
|
|
|
|
|
// Merge the successor into the current block and erase the branch.
|
2019-12-06 20:06:48 -08:00
|
|
|
rewriter.mergeBlocks(succ, opParent, op.getOperands());
|
2019-11-05 11:57:03 -08:00
|
|
|
rewriter.eraseOp(op);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-05 11:57:03 -08:00
|
|
|
}
|
|
|
|
};
|
2020-04-23 04:40:33 -07:00
|
|
|
|
|
|
|
/// br ^bb1
|
|
|
|
/// ^bb1
|
|
|
|
/// br ^bbN(...)
|
|
|
|
///
|
|
|
|
/// -> br ^bbN(...)
|
|
|
|
///
|
|
|
|
struct SimplifyPassThroughBr : public OpRewritePattern<BranchOp> {
|
|
|
|
using OpRewritePattern<BranchOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(BranchOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
Block *dest = op.getDest();
|
|
|
|
ValueRange destOperands = op.getOperands();
|
|
|
|
SmallVector<Value, 4> destOperandStorage;
|
|
|
|
|
|
|
|
// Try to collapse the successor if it points somewhere other than this
|
|
|
|
// block.
|
|
|
|
if (dest == op.getOperation()->getBlock() ||
|
|
|
|
failed(collapseBranch(dest, destOperands, destOperandStorage)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Create a new branch with the collapsed successor.
|
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(op, dest, destOperands);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2019-11-05 11:57:03 -08:00
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2020-03-05 12:39:46 -08:00
|
|
|
Block *BranchOp::getDest() { return getSuccessor(); }
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2020-03-05 12:39:46 -08:00
|
|
|
void BranchOp::setDest(Block *block) { return setSuccessor(block); }
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
void BranchOp::eraseOperand(unsigned index) {
|
2020-03-05 12:40:23 -08:00
|
|
|
getOperation()->eraseOperand(index);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-11-05 11:57:03 -08:00
|
|
|
void BranchOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
2020-04-23 04:40:33 -07:00
|
|
|
results.insert<SimplifyBrToBlockWithSinglePred, SimplifyPassThroughBr>(
|
|
|
|
context);
|
2019-11-05 11:57:03 -08:00
|
|
|
}
|
|
|
|
|
2020-04-29 16:09:43 -07:00
|
|
|
Optional<MutableOperandRange>
|
|
|
|
BranchOp::getMutableSuccessorOperands(unsigned index) {
|
2020-03-05 12:40:23 -08:00
|
|
|
assert(index == 0 && "invalid successor index");
|
2020-04-29 16:09:43 -07:00
|
|
|
return destOperandsMutable();
|
2020-03-05 12:40:23 -08:00
|
|
|
}
|
|
|
|
|
2020-04-21 02:54:05 -07:00
|
|
|
Block *BranchOp::getSuccessorForOperands(ArrayRef<Attribute>) { return dest(); }
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-08-21 17:55:22 -07:00
|
|
|
// CallOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-16 11:57:00 -07:00
|
|
|
LogicalResult CallOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
2018-08-21 17:55:22 -07:00
|
|
|
// Check that the callee attribute was specified.
|
2020-10-16 11:57:00 -07:00
|
|
|
auto fnAttr = getAttrOfType<FlatSymbolRefAttr>("callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
if (!fnAttr)
|
2020-10-16 11:57:00 -07:00
|
|
|
return emitOpError("requires a 'callee' symbol reference attribute");
|
|
|
|
FuncOp fn = symbolTable.lookupNearestSymbolFrom<FuncOp>(*this, fnAttr);
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fn)
|
2020-10-16 11:57:00 -07:00
|
|
|
return emitOpError() << "'" << fnAttr.getValue()
|
|
|
|
<< "' does not reference a valid function";
|
2018-08-21 17:55:22 -07:00
|
|
|
|
|
|
|
// Verify that the operand and result types match the callee.
|
2019-07-01 10:29:09 -07:00
|
|
|
auto fnType = fn.getType();
|
2020-10-16 11:57:00 -07:00
|
|
|
if (fnType.getNumInputs() != getNumOperands())
|
|
|
|
return emitOpError("incorrect number of operands for callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i)
|
2020-10-16 11:57:00 -07:00
|
|
|
if (getOperand(i).getType() != fnType.getInput(i))
|
|
|
|
return emitOpError("operand type mismatch: expected operand type ")
|
2020-07-30 14:47:42 -07:00
|
|
|
<< fnType.getInput(i) << ", but provided "
|
2020-10-16 11:57:00 -07:00
|
|
|
<< getOperand(i).getType() << " for operand number " << i;
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2020-10-16 11:57:00 -07:00
|
|
|
if (fnType.getNumResults() != getNumResults())
|
|
|
|
return emitOpError("incorrect number of results for callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i)
|
2020-10-16 11:57:00 -07:00
|
|
|
if (getResult(i).getType() != fnType.getResult(i))
|
|
|
|
return emitOpError("result type mismatch");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-21 17:55:22 -07:00
|
|
|
}
|
|
|
|
|
2019-05-23 17:01:16 -07:00
|
|
|
FunctionType CallOp::getCalleeType() {
|
2020-08-04 11:46:26 -07:00
|
|
|
return FunctionType::get(getOperandTypes(), getResultTypes(), getContext());
|
2019-05-22 13:41:23 -07:00
|
|
|
}
|
|
|
|
|
2018-08-21 17:55:22 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CallIndirectOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-01-29 18:08:28 -08:00
|
|
|
namespace {
|
|
|
|
/// Fold indirect calls that have a constant function as the callee operand.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyIndirectCallWithKnownCallee
|
|
|
|
: public OpRewritePattern<CallIndirectOp> {
|
|
|
|
using OpRewritePattern<CallIndirectOp>::OpRewritePattern;
|
2019-01-29 18:08:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(CallIndirectOp indirectCall,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-05-24 18:49:45 -07:00
|
|
|
// Check that the callee is a constant callee.
|
2019-07-11 11:41:04 -07:00
|
|
|
SymbolRefAttr calledFn;
|
2019-05-24 18:49:45 -07:00
|
|
|
if (!matchPattern(indirectCall.getCallee(), m_Constant(&calledFn)))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-29 18:08:28 -08:00
|
|
|
|
|
|
|
// Replace with a direct call.
|
2020-01-27 19:57:14 -08:00
|
|
|
rewriter.replaceOpWithNewOp<CallOp>(indirectCall, calledFn,
|
|
|
|
indirectCall.getResultTypes(),
|
2019-12-07 10:35:01 -08:00
|
|
|
indirectCall.getArgOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-01-29 18:08:28 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-01-29 18:08:28 -08:00
|
|
|
void CallIndirectOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
2019-08-05 18:37:56 -07:00
|
|
|
results.insert<SimplifyIndirectCallWithKnownCallee>(context);
|
2019-01-29 18:08:28 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-05-06 17:51:08 -07:00
|
|
|
// General helpers for comparison ops
|
2019-01-29 18:08:28 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-11-08 04:02:00 -08:00
|
|
|
// Return the type of the same shape (scalar, vector or tensor) containing i1.
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
static Type getI1SameShape(Type type) {
|
2020-02-19 10:18:28 -08:00
|
|
|
auto i1Type = IntegerType::get(1, type.getContext());
|
2018-11-08 04:02:00 -08:00
|
|
|
if (auto tensorType = type.dyn_cast<RankedTensorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return RankedTensorType::get(tensorType.getShape(), i1Type);
|
2019-07-07 14:04:46 -07:00
|
|
|
if (type.isa<UnrankedTensorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return UnrankedTensorType::get(i1Type);
|
2018-11-08 04:02:00 -08:00
|
|
|
if (auto vectorType = type.dyn_cast<VectorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return VectorType::get(vectorType.getShape(), i1Type);
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
return i1Type;
|
2018-11-08 04:02:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-06 17:51:08 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CmpIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
static void buildCmpIOp(OpBuilder &build, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
CmpIPredicate predicate, Value lhs, Value rhs) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({lhs, rhs});
|
2020-02-19 10:18:28 -08:00
|
|
|
result.types.push_back(getI1SameShape(lhs.getType()));
|
2020-04-23 16:02:46 +02:00
|
|
|
result.addAttribute(CmpIOp::getPredicateAttrName(),
|
|
|
|
build.getI64IntegerAttr(static_cast<int64_t>(predicate)));
|
2018-11-08 04:02:00 -08:00
|
|
|
}
|
|
|
|
|
2019-01-06 14:09:15 -08:00
|
|
|
// Compute `lhs` `pred` `rhs`, where `pred` is one of the known integer
|
|
|
|
// comparison predicates.
|
2020-05-26 21:11:01 -07:00
|
|
|
bool mlir::applyCmpPredicate(CmpIPredicate predicate, const APInt &lhs,
|
|
|
|
const APInt &rhs) {
|
2019-01-06 14:09:15 -08:00
|
|
|
switch (predicate) {
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::eq:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.eq(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ne:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ne(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::slt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.slt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sle:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sle(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sgt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sgt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sge:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sge(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ult:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ult(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ule:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ule(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ugt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ugt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::uge:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.uge(rhs);
|
|
|
|
}
|
2019-12-26 18:29:54 -08:00
|
|
|
llvm_unreachable("unknown comparison predicate");
|
2019-01-06 14:09:15 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for comparisons.
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult CmpIOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-06 14:09:15 -08:00
|
|
|
assert(operands.size() == 2 && "cmpi takes two arguments");
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs || !rhs)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto val = applyCmpPredicate(getPredicate(), lhs.getValue(), rhs.getValue());
|
2019-05-16 12:51:45 -07:00
|
|
|
return IntegerAttr::get(IntegerType::get(1, getContext()), APInt(1, val));
|
2019-01-06 14:09:15 -08:00
|
|
|
}
|
|
|
|
|
2019-05-06 17:51:08 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CmpFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
static void buildCmpFOp(OpBuilder &build, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
CmpFPredicate predicate, Value lhs, Value rhs) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({lhs, rhs});
|
2020-02-19 10:18:28 -08:00
|
|
|
result.types.push_back(getI1SameShape(lhs.getType()));
|
2020-04-23 16:02:46 +02:00
|
|
|
result.addAttribute(CmpFOp::getPredicateAttrName(),
|
|
|
|
build.getI64IntegerAttr(static_cast<int64_t>(predicate)));
|
2019-05-06 17:51:08 -07:00
|
|
|
}
|
|
|
|
|
2020-03-03 13:02:02 -08:00
|
|
|
/// Compute `lhs` `pred` `rhs`, where `pred` is one of the known floating point
|
|
|
|
/// comparison predicates.
|
2020-05-26 21:11:01 -07:00
|
|
|
bool mlir::applyCmpPredicate(CmpFPredicate predicate, const APFloat &lhs,
|
|
|
|
const APFloat &rhs) {
|
2019-05-06 17:51:08 -07:00
|
|
|
auto cmpResult = lhs.compare(rhs);
|
|
|
|
switch (predicate) {
|
2019-07-01 17:40:40 -07:00
|
|
|
case CmpFPredicate::AlwaysFalse:
|
2019-05-06 17:51:08 -07:00
|
|
|
return false;
|
|
|
|
case CmpFPredicate::OEQ:
|
|
|
|
return cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::OGT:
|
|
|
|
return cmpResult == APFloat::cmpGreaterThan;
|
|
|
|
case CmpFPredicate::OGE:
|
|
|
|
return cmpResult == APFloat::cmpGreaterThan ||
|
|
|
|
cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::OLT:
|
|
|
|
return cmpResult == APFloat::cmpLessThan;
|
|
|
|
case CmpFPredicate::OLE:
|
|
|
|
return cmpResult == APFloat::cmpLessThan || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ONE:
|
|
|
|
return cmpResult != APFloat::cmpUnordered && cmpResult != APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ORD:
|
|
|
|
return cmpResult != APFloat::cmpUnordered;
|
|
|
|
case CmpFPredicate::UEQ:
|
|
|
|
return cmpResult == APFloat::cmpUnordered || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UGT:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpGreaterThan;
|
|
|
|
case CmpFPredicate::UGE:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpGreaterThan ||
|
|
|
|
cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ULT:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpLessThan;
|
|
|
|
case CmpFPredicate::ULE:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpLessThan || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UNE:
|
|
|
|
return cmpResult != APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UNO:
|
|
|
|
return cmpResult == APFloat::cmpUnordered;
|
2019-07-01 17:40:40 -07:00
|
|
|
case CmpFPredicate::AlwaysTrue:
|
2019-05-06 17:51:08 -07:00
|
|
|
return true;
|
|
|
|
}
|
Fix a warning about an unreachable default in a switch statement.
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75663
2020-03-04 22:06:54 -08:00
|
|
|
llvm_unreachable("unknown comparison predicate");
|
2019-05-06 17:51:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for comparisons.
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult CmpFOp::fold(ArrayRef<Attribute> operands) {
|
2019-05-06 17:51:08 -07:00
|
|
|
assert(operands.size() == 2 && "cmpf takes two arguments");
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<FloatAttr>();
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<FloatAttr>();
|
2019-09-12 15:43:26 -07:00
|
|
|
|
2020-07-07 01:35:23 -07:00
|
|
|
// TODO: We could actually do some intelligent things if we know only one
|
2019-09-12 15:43:26 -07:00
|
|
|
// of the operands, but it's inf or nan.
|
|
|
|
if (!lhs || !rhs)
|
2019-05-06 17:51:08 -07:00
|
|
|
return {};
|
|
|
|
|
|
|
|
auto val = applyCmpPredicate(getPredicate(), lhs.getValue(), rhs.getValue());
|
2019-05-16 12:51:45 -07:00
|
|
|
return IntegerAttr::get(IntegerType::get(1, getContext()), APInt(1, val));
|
2019-05-06 17:51:08 -07:00
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CondBranchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
/// cond_br true, ^bb1, ^bb2
|
|
|
|
/// -> br ^bb1
|
|
|
|
/// cond_br false, ^bb1, ^bb2
|
|
|
|
/// -> br ^bb2
|
2019-03-01 16:58:00 -08:00
|
|
|
///
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
|
|
|
|
using OpRewritePattern<CondBranchOp>::OpRewritePattern;
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(CondBranchOp condbr,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-11-06 13:51:19 -08:00
|
|
|
if (matchPattern(condbr.getCondition(), m_NonZero())) {
|
|
|
|
// True branch taken.
|
2019-12-07 10:35:01 -08:00
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getTrueDest(),
|
|
|
|
condbr.getTrueOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-06 13:51:19 -08:00
|
|
|
} else if (matchPattern(condbr.getCondition(), m_Zero())) {
|
|
|
|
// False branch taken.
|
2019-12-07 10:35:01 -08:00
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getFalseDest(),
|
|
|
|
condbr.getFalseOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
};
|
2020-04-23 04:40:16 -07:00
|
|
|
|
|
|
|
/// cond_br %cond, ^bb1, ^bb2
|
|
|
|
/// ^bb1
|
|
|
|
/// br ^bbN(...)
|
|
|
|
/// ^bb2
|
|
|
|
/// br ^bbK(...)
|
|
|
|
///
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
/// -> cond_br %cond, ^bbN(...), ^bbK(...)
|
2020-04-23 04:40:16 -07:00
|
|
|
///
|
|
|
|
struct SimplifyPassThroughCondBranch : public OpRewritePattern<CondBranchOp> {
|
|
|
|
using OpRewritePattern<CondBranchOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CondBranchOp condbr,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
Block *trueDest = condbr.trueDest(), *falseDest = condbr.falseDest();
|
|
|
|
ValueRange trueDestOperands = condbr.getTrueOperands();
|
|
|
|
ValueRange falseDestOperands = condbr.getFalseOperands();
|
|
|
|
SmallVector<Value, 4> trueDestOperandStorage, falseDestOperandStorage;
|
|
|
|
|
|
|
|
// Try to collapse one of the current successors.
|
|
|
|
LogicalResult collapsedTrue =
|
|
|
|
collapseBranch(trueDest, trueDestOperands, trueDestOperandStorage);
|
|
|
|
LogicalResult collapsedFalse =
|
|
|
|
collapseBranch(falseDest, falseDestOperands, falseDestOperandStorage);
|
|
|
|
if (failed(collapsedTrue) && failed(collapsedFalse))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Create a new branch with the collapsed successors.
|
|
|
|
rewriter.replaceOpWithNewOp<CondBranchOp>(condbr, condbr.getCondition(),
|
|
|
|
trueDest, trueDestOperands,
|
|
|
|
falseDest, falseDestOperands);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
|
|
|
|
/// cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
|
|
|
|
/// -> br ^bb1(A, ..., N)
|
|
|
|
///
|
|
|
|
/// cond_br %cond, ^bb1(A), ^bb1(B)
|
|
|
|
/// -> %select = select %cond, A, B
|
|
|
|
/// br ^bb1(%select)
|
|
|
|
///
|
|
|
|
struct SimplifyCondBranchIdenticalSuccessors
|
|
|
|
: public OpRewritePattern<CondBranchOp> {
|
|
|
|
using OpRewritePattern<CondBranchOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CondBranchOp condbr,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Check that the true and false destinations are the same and have the same
|
|
|
|
// operands.
|
|
|
|
Block *trueDest = condbr.trueDest();
|
|
|
|
if (trueDest != condbr.falseDest())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// If all of the operands match, no selects need to be generated.
|
|
|
|
OperandRange trueOperands = condbr.getTrueOperands();
|
|
|
|
OperandRange falseOperands = condbr.getFalseOperands();
|
|
|
|
if (trueOperands == falseOperands) {
|
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, trueDest, trueOperands);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if the current block is the only predecessor insert selects
|
|
|
|
// for any mismatched branch operands.
|
|
|
|
if (trueDest->getUniquePredecessor() != condbr.getOperation()->getBlock())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Generate a select for any operands that differ between the two.
|
|
|
|
SmallVector<Value, 8> mergedOperands;
|
|
|
|
mergedOperands.reserve(trueOperands.size());
|
|
|
|
Value condition = condbr.getCondition();
|
|
|
|
for (auto it : llvm::zip(trueOperands, falseOperands)) {
|
|
|
|
if (std::get<0>(it) == std::get<1>(it))
|
|
|
|
mergedOperands.push_back(std::get<0>(it));
|
|
|
|
else
|
|
|
|
mergedOperands.push_back(rewriter.create<SelectOp>(
|
|
|
|
condbr.getLoc(), condition, std::get<0>(it), std::get<1>(it)));
|
|
|
|
}
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, trueDest, mergedOperands);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2020-10-18 13:43:09 -07:00
|
|
|
|
|
|
|
/// ...
|
|
|
|
/// cond_br %cond, ^bb1(...), ^bb2(...)
|
|
|
|
/// ...
|
|
|
|
/// ^bb1: // has single predecessor
|
|
|
|
/// ...
|
|
|
|
/// cond_br %cond, ^bb3(...), ^bb4(...)
|
|
|
|
///
|
|
|
|
/// ->
|
|
|
|
///
|
|
|
|
/// ...
|
|
|
|
/// cond_br %cond, ^bb1(...), ^bb2(...)
|
|
|
|
/// ...
|
|
|
|
/// ^bb1: // has single predecessor
|
|
|
|
/// ...
|
|
|
|
/// br ^bb3(...)
|
|
|
|
///
|
|
|
|
struct SimplifyCondBranchFromCondBranchOnSameCondition
|
|
|
|
: public OpRewritePattern<CondBranchOp> {
|
|
|
|
using OpRewritePattern<CondBranchOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(CondBranchOp condbr,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Check that we have a single distinct predecessor.
|
|
|
|
Block *currentBlock = condbr.getOperation()->getBlock();
|
|
|
|
Block *predecessor = currentBlock->getSinglePredecessor();
|
|
|
|
if (!predecessor)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Check that the predecessor terminates with a conditional branch to this
|
|
|
|
// block and that it branches on the same condition.
|
|
|
|
auto predBranch = dyn_cast<CondBranchOp>(predecessor->getTerminator());
|
|
|
|
if (!predBranch || condbr.getCondition() != predBranch.getCondition())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Fold this branch to an unconditional branch.
|
|
|
|
if (currentBlock == predBranch.trueDest())
|
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.trueDest(),
|
|
|
|
condbr.trueDestOperands());
|
|
|
|
else
|
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.falseDest(),
|
|
|
|
condbr.falseDestOperands());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2020-04-23 04:40:16 -07:00
|
|
|
} // end anonymous namespace
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
void CondBranchOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
[mlir][Standard] Add a canonicalization to simplify cond_br when the successors are identical
This revision adds support for canonicalizing the following:
```
cond_br %cond, ^bb1(A, ..., N), ^bb1(A, ..., N)
br ^bb1(A, ..., N)
```
If the operands to the successor are different and the cond_br is the only predecessor, we emit selects for the branch operands.
```
cond_br %cond, ^bb1(A), ^bb1(B)
%select = select %cond, A, B
br ^bb1(%select)
```
Differential Revision: https://reviews.llvm.org/D78682
2020-04-23 04:40:25 -07:00
|
|
|
results.insert<SimplifyConstCondBranchPred, SimplifyPassThroughCondBranch,
|
2020-10-18 13:43:09 -07:00
|
|
|
SimplifyCondBranchIdenticalSuccessors,
|
|
|
|
SimplifyCondBranchFromCondBranchOnSameCondition>(context);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2020-04-29 16:09:43 -07:00
|
|
|
Optional<MutableOperandRange>
|
|
|
|
CondBranchOp::getMutableSuccessorOperands(unsigned index) {
|
2020-03-05 12:40:23 -08:00
|
|
|
assert(index < getNumSuccessors() && "invalid successor index");
|
2020-04-29 16:09:43 -07:00
|
|
|
return index == trueIndex ? trueDestOperandsMutable()
|
|
|
|
: falseDestOperandsMutable();
|
2020-03-05 12:40:23 -08:00
|
|
|
}
|
|
|
|
|
2020-04-21 02:54:05 -07:00
|
|
|
Block *CondBranchOp::getSuccessorForOperands(ArrayRef<Attribute> operands) {
|
|
|
|
if (IntegerAttr condAttr = operands.front().dyn_cast_or_null<IntegerAttr>())
|
|
|
|
return condAttr.getValue().isOneValue() ? trueDest() : falseDest();
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant*Op
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
static void print(OpAsmPrinter &p, ConstantOp &op) {
|
|
|
|
p << "constant ";
|
|
|
|
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"value"});
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-05-02 11:52:33 -07:00
|
|
|
if (op.getAttrs().size() > 1)
|
2019-09-20 20:43:02 -07:00
|
|
|
p << ' ';
|
2019-12-12 15:31:39 -08:00
|
|
|
p << op.getValue();
|
2019-05-22 13:41:23 -07:00
|
|
|
|
2019-07-11 11:41:04 -07:00
|
|
|
// If the value is a symbol reference, print a trailing type.
|
2019-08-16 19:21:50 -07:00
|
|
|
if (op.getValue().isa<SymbolRefAttr>())
|
2019-09-20 20:43:02 -07:00
|
|
|
p << " : " << op.getType();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 11:36:49 -07:00
|
|
|
static ParseResult parseConstantOp(OpAsmParser &parser,
|
2019-09-20 19:47:05 -07:00
|
|
|
OperationState &result) {
|
2019-03-01 16:58:00 -08:00
|
|
|
Attribute valueAttr;
|
2019-11-05 13:32:07 -08:00
|
|
|
if (parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.parseAttribute(valueAttr, "value", result.attributes))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-07-11 11:41:04 -07:00
|
|
|
// If the attribute is a symbol reference, then we expect a trailing type.
|
2019-05-22 13:41:23 -07:00
|
|
|
Type type;
|
2019-07-11 11:41:04 -07:00
|
|
|
if (!valueAttr.isa<SymbolRefAttr>())
|
2019-05-22 13:41:23 -07:00
|
|
|
type = valueAttr.getType();
|
2019-09-20 11:36:49 -07:00
|
|
|
else if (parser.parseColonType(type))
|
2019-05-22 13:41:23 -07:00
|
|
|
return failure();
|
|
|
|
|
2019-05-08 14:46:39 -07:00
|
|
|
// Add the attribute type to the list.
|
2019-09-20 19:47:05 -07:00
|
|
|
return parser.addTypeToList(type, result.types);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// The constant op requires an attribute, and furthermore requires that it
|
|
|
|
/// matches the return type.
|
2019-05-02 11:52:33 -07:00
|
|
|
static LogicalResult verify(ConstantOp &op) {
|
|
|
|
auto value = op.getValue();
|
2019-03-01 16:58:00 -08:00
|
|
|
if (!value)
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires a 'value' attribute");
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-05-02 11:52:33 -07:00
|
|
|
auto type = op.getType();
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!value.getType().isa<NoneType>() && type != value.getType())
|
2019-05-08 12:11:10 -07:00
|
|
|
return op.emitOpError() << "requires attribute's type (" << value.getType()
|
|
|
|
<< ") to match op's return type (" << type << ")";
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-07-22 21:43:14 -07:00
|
|
|
if (type.isa<IndexType>() || value.isa<BoolAttr>())
|
2019-05-08 12:11:10 -07:00
|
|
|
return success();
|
|
|
|
|
|
|
|
if (auto intAttr = value.dyn_cast<IntegerAttr>()) {
|
2019-03-01 16:58:00 -08:00
|
|
|
// If the type has a known bitwidth we verify that the value can be
|
|
|
|
// represented with the given bitwidth.
|
2019-05-08 12:11:10 -07:00
|
|
|
auto bitwidth = type.cast<IntegerType>().getWidth();
|
|
|
|
auto intVal = intAttr.getValue();
|
|
|
|
if (!intVal.isSignedIntN(bitwidth) && !intVal.isIntN(bitwidth))
|
|
|
|
return op.emitOpError("requires 'value' to be an integer within the "
|
|
|
|
"range of the integer result type");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type.isa<FloatType>()) {
|
|
|
|
if (!value.isa<FloatAttr>())
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a floating point constant");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-16 00:12:45 -07:00
|
|
|
if (type.isa<ShapedType>()) {
|
2019-03-01 16:58:00 -08:00
|
|
|
if (!value.isa<ElementsAttr>())
|
2019-05-16 00:12:45 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a shaped constant");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type.isa<FunctionType>()) {
|
2019-11-11 18:18:02 -08:00
|
|
|
auto fnAttr = value.dyn_cast<FlatSymbolRefAttr>();
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fnAttr)
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a function reference");
|
2019-05-22 13:41:23 -07:00
|
|
|
|
|
|
|
// Try to find the referenced function.
|
2019-07-08 11:20:26 -07:00
|
|
|
auto fn =
|
2019-07-10 15:49:27 -07:00
|
|
|
op.getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(fnAttr.getValue());
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fn)
|
2020-06-29 09:56:40 +02:00
|
|
|
return op.emitOpError()
|
|
|
|
<< "reference to undefined function '" << fnAttr.getValue() << "'";
|
2019-05-22 13:41:23 -07:00
|
|
|
|
|
|
|
// Check that the referenced function has the correct type.
|
2019-07-01 10:29:09 -07:00
|
|
|
if (fn.getType() != type)
|
2019-05-22 13:41:23 -07:00
|
|
|
return op.emitOpError("reference to function with mismatched type");
|
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-06-01 10:10:24 -07:00
|
|
|
if (type.isa<NoneType>() && value.isa<UnitAttr>())
|
|
|
|
return success();
|
|
|
|
|
2019-07-22 21:43:14 -07:00
|
|
|
return op.emitOpError("unsupported 'value' attribute: ") << value;
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult ConstantOp::fold(ArrayRef<Attribute> operands) {
|
2019-03-01 16:58:00 -08:00
|
|
|
assert(operands.empty() && "constant has no operands");
|
|
|
|
return getValue();
|
|
|
|
}
|
|
|
|
|
2019-11-20 10:19:01 -08:00
|
|
|
void ConstantOp::getAsmResultNames(
|
2019-12-23 14:45:01 -08:00
|
|
|
function_ref<void(Value, StringRef)> setNameFn) {
|
2019-11-20 10:19:01 -08:00
|
|
|
Type type = getType();
|
|
|
|
if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
|
|
|
|
IntegerType intTy = type.dyn_cast<IntegerType>();
|
|
|
|
|
|
|
|
// Sugar i1 constants with 'true' and 'false'.
|
|
|
|
if (intTy && intTy.getWidth() == 1)
|
|
|
|
return setNameFn(getResult(), (intCst.getInt() ? "true" : "false"));
|
|
|
|
|
|
|
|
// Otherwise, build a complex name with the value and type.
|
|
|
|
SmallString<32> specialNameBuffer;
|
|
|
|
llvm::raw_svector_ostream specialName(specialNameBuffer);
|
|
|
|
specialName << 'c' << intCst.getInt();
|
|
|
|
if (intTy)
|
|
|
|
specialName << '_' << type;
|
|
|
|
setNameFn(getResult(), specialName.str());
|
|
|
|
|
|
|
|
} else if (type.isa<FunctionType>()) {
|
|
|
|
setNameFn(getResult(), "f");
|
|
|
|
} else {
|
|
|
|
setNameFn(getResult(), "cst");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 11:48:43 -07:00
|
|
|
/// Returns true if a constant operation can be built with the given value and
|
|
|
|
/// result type.
|
|
|
|
bool ConstantOp::isBuildableWith(Attribute value, Type type) {
|
2019-07-11 11:41:04 -07:00
|
|
|
// SymbolRefAttr can only be used with a function type.
|
|
|
|
if (value.isa<SymbolRefAttr>())
|
2019-06-22 11:48:43 -07:00
|
|
|
return type.isa<FunctionType>();
|
|
|
|
// Otherwise, the attribute must have the same type as 'type'.
|
|
|
|
if (value.getType() != type)
|
|
|
|
return false;
|
|
|
|
// Finally, check that the attribute kind is handled.
|
2020-06-29 07:31:48 -07:00
|
|
|
return value.isa<IntegerAttr, FloatAttr, ElementsAttr, UnitAttr>();
|
2019-06-22 11:48:43 -07:00
|
|
|
}
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void ConstantFloatOp::build(OpBuilder &builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
const APFloat &value, FloatType type) {
|
2020-04-23 16:02:46 +02:00
|
|
|
ConstantOp::build(builder, result, type, builder.getFloatAttr(type, value));
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantFloatOp::classof(Operation *op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
return ConstantOp::classof(op) && op->getResult(0).getType().isa<FloatType>();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantIntOp only matches values whose result type is an IntegerType.
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantIntOp::classof(Operation *op) {
|
|
|
|
return ConstantOp::classof(op) &&
|
2020-01-10 14:48:24 -05:00
|
|
|
op->getResult(0).getType().isSignlessInteger();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void ConstantIntOp::build(OpBuilder &builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value, unsigned width) {
|
2020-04-23 16:02:46 +02:00
|
|
|
Type type = builder.getIntegerType(width);
|
|
|
|
ConstantOp::build(builder, result, type, builder.getIntegerAttr(type, value));
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a constant int op producing an integer with the specified type,
|
|
|
|
/// which must be an integer type.
|
2020-04-23 16:02:46 +02:00
|
|
|
void ConstantIntOp::build(OpBuilder &builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value, Type type) {
|
2020-01-10 14:48:24 -05:00
|
|
|
assert(type.isSignlessInteger() &&
|
|
|
|
"ConstantIntOp can only have signless integer type");
|
2020-04-23 16:02:46 +02:00
|
|
|
ConstantOp::build(builder, result, type, builder.getIntegerAttr(type, value));
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantIndexOp only matches values whose result type is Index.
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantIndexOp::classof(Operation *op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
return ConstantOp::classof(op) && op->getResult(0).getType().isIndex();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void ConstantIndexOp::build(OpBuilder &builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value) {
|
2020-04-23 16:02:46 +02:00
|
|
|
Type type = builder.getIndexType();
|
|
|
|
ConstantOp::build(builder, result, type, builder.getIntegerAttr(type, value));
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2018-08-15 15:39:26 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DeallocOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-01-16 12:39:03 -08:00
|
|
|
namespace {
|
2019-03-28 08:24:38 -07:00
|
|
|
/// Fold Dealloc operations that are deallocating an AllocOp that is only used
|
2019-01-16 12:39:03 -08:00
|
|
|
/// by other Dealloc operations.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyDeadDealloc : public OpRewritePattern<DeallocOp> {
|
|
|
|
using OpRewritePattern<DeallocOp>::OpRewritePattern;
|
2019-01-16 12:39:03 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(DeallocOp dealloc,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-03-28 08:24:38 -07:00
|
|
|
// Check that the memref operand's defining operation is an AllocOp.
|
2019-12-23 14:45:01 -08:00
|
|
|
Value memref = dealloc.memref();
|
2020-01-11 08:54:04 -08:00
|
|
|
if (!isa_and_nonnull<AllocOp>(memref.getDefiningOp()))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 12:39:03 -08:00
|
|
|
|
|
|
|
// Check that all of the uses of the AllocOp are other DeallocOps.
|
2020-01-11 08:54:04 -08:00
|
|
|
for (auto *user : memref.getUsers())
|
2019-05-18 11:09:07 -07:00
|
|
|
if (!isa<DeallocOp>(user))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 12:39:03 -08:00
|
|
|
|
|
|
|
// Erase the dealloc operation.
|
2019-10-16 09:50:28 -07:00
|
|
|
rewriter.eraseOp(dealloc);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-01-16 12:39:03 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
2018-08-15 15:39:26 -07:00
|
|
|
|
2019-05-08 22:38:01 -07:00
|
|
|
static LogicalResult verify(DeallocOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
if (!op.memref().getType().isa<MemRefType>())
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError("operand must be a memref");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-15 15:39:26 -07:00
|
|
|
}
|
|
|
|
|
2018-11-28 15:09:39 -08:00
|
|
|
void DeallocOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
2018-10-25 16:44:04 -07:00
|
|
|
MLIRContext *context) {
|
2019-08-05 18:37:56 -07:00
|
|
|
results.insert<SimplifyDeadDealloc>(context);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// dealloc(memrefcast) -> dealloc
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DimOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-06-10 13:52:43 +00:00
|
|
|
void DimOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Value memrefOrTensor, int64_t index) {
|
|
|
|
auto loc = result.location;
|
|
|
|
Value indexValue = builder.create<ConstantIndexOp>(loc, index);
|
2020-06-25 08:50:02 +00:00
|
|
|
build(builder, result, memrefOrTensor, indexValue);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DimOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Value memrefOrTensor, Value index) {
|
2020-06-10 13:52:43 +00:00
|
|
|
auto indexTy = builder.getIndexType();
|
2020-06-25 08:50:02 +00:00
|
|
|
build(builder, result, indexTy, memrefOrTensor, index);
|
2020-06-10 13:52:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Optional<int64_t> DimOp::getConstantIndex() {
|
2020-06-15 10:39:05 +00:00
|
|
|
if (auto constantOp = index().getDefiningOp<ConstantOp>())
|
2020-06-10 13:52:43 +00:00
|
|
|
return constantOp.getValue().cast<IntegerAttr>().getInt();
|
|
|
|
return {};
|
2018-07-25 11:15:20 -07:00
|
|
|
}
|
|
|
|
|
2019-05-10 15:26:23 -07:00
|
|
|
static LogicalResult verify(DimOp op) {
|
2020-06-10 13:52:43 +00:00
|
|
|
// Assume unknown index to be in range.
|
|
|
|
Optional<int64_t> index = op.getConstantIndex();
|
|
|
|
if (!index.hasValue())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
// Check that constant index is not knowingly out of range.
|
|
|
|
auto type = op.memrefOrTensor().getType();
|
2018-10-30 14:59:22 -07:00
|
|
|
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
|
2020-06-10 13:52:43 +00:00
|
|
|
if (index.getValue() >= tensorType.getRank())
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("index is out of range");
|
2018-10-30 14:59:22 -07:00
|
|
|
} else if (auto memrefType = type.dyn_cast<MemRefType>()) {
|
2020-06-10 13:52:43 +00:00
|
|
|
if (index.getValue() >= memrefType.getRank())
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("index is out of range");
|
2020-08-06 10:33:48 +02:00
|
|
|
} else if (type.isa<UnrankedTensorType>() || type.isa<UnrankedMemRefType>()) {
|
2020-06-10 13:52:43 +00:00
|
|
|
// Assume index to be in range.
|
2018-07-24 08:34:58 -07:00
|
|
|
} else {
|
2020-06-10 13:52:43 +00:00
|
|
|
llvm_unreachable("expected operand with tensor or memref type");
|
2018-07-24 08:34:58 -07:00
|
|
|
}
|
2018-07-06 10:46:19 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-06 10:46:19 -07:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
|
2020-06-16 20:56:30 +00:00
|
|
|
auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
|
2020-06-10 13:52:43 +00:00
|
|
|
|
|
|
|
// All forms of folding require a known index.
|
|
|
|
if (!index)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto argTy = memrefOrTensor().getType();
|
2020-08-06 10:33:48 +02:00
|
|
|
// Fold if the shape extent along the given index is known.
|
2020-06-10 13:52:43 +00:00
|
|
|
if (auto shapedTy = argTy.dyn_cast<ShapedType>()) {
|
2020-08-06 10:33:48 +02:00
|
|
|
// Folding for unranked types (UnrankedMemRefType, UnrankedTensorType) is
|
|
|
|
// not supported.
|
|
|
|
if (!shapedTy.hasRank())
|
|
|
|
return {};
|
2020-06-10 13:52:43 +00:00
|
|
|
if (!shapedTy.isDynamicDim(index.getInt())) {
|
|
|
|
Builder builder(getContext());
|
|
|
|
return builder.getIndexAttr(shapedTy.getShape()[index.getInt()]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-04 11:19:51 +00:00
|
|
|
Operation *definingOp = memrefOrTensor().getDefiningOp();
|
|
|
|
// dim(tensor_load(memref)) -> dim(memref)
|
|
|
|
if (auto tensorLoadOp = dyn_cast_or_null<TensorLoadOp>(definingOp)) {
|
|
|
|
setOperand(0, tensorLoadOp.memref());
|
|
|
|
return getResult();
|
|
|
|
}
|
|
|
|
|
2020-06-10 13:52:43 +00:00
|
|
|
// Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
|
|
|
|
auto memrefType = argTy.dyn_cast<MemRefType>();
|
2019-12-06 05:59:06 -08:00
|
|
|
if (!memrefType)
|
|
|
|
return {};
|
|
|
|
|
2020-06-10 13:52:43 +00:00
|
|
|
// The size at the given index is now known to be a dynamic size of a memref.
|
|
|
|
unsigned unsignedIndex = index.getValue().getZExtValue();
|
2020-11-04 11:19:51 +00:00
|
|
|
if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
|
2019-12-06 05:59:06 -08:00
|
|
|
return *(alloc.getDynamicSizes().begin() +
|
2020-06-10 13:52:43 +00:00
|
|
|
memrefType.getDynamicDimIndex(unsignedIndex));
|
2019-12-06 05:59:06 -08:00
|
|
|
|
2020-11-04 11:19:51 +00:00
|
|
|
if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
|
2019-12-06 05:59:06 -08:00
|
|
|
return *(view.getDynamicSizes().begin() +
|
2020-06-10 13:52:43 +00:00
|
|
|
memrefType.getDynamicDimIndex(unsignedIndex));
|
2019-12-06 05:59:06 -08:00
|
|
|
|
2020-11-04 11:19:51 +00:00
|
|
|
if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
|
2020-06-10 13:52:43 +00:00
|
|
|
assert(subview.isDynamicSize(unsignedIndex) &&
|
2020-05-15 13:45:06 -04:00
|
|
|
"Expected dynamic subview size");
|
2020-06-10 13:52:43 +00:00
|
|
|
return subview.getDynamicSize(unsignedIndex);
|
2019-11-18 04:31:02 -08:00
|
|
|
}
|
|
|
|
|
2020-06-10 13:52:43 +00:00
|
|
|
// dim(memrefcast) -> dim
|
2019-12-13 14:52:39 -08:00
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
|
|
|
|
return {};
|
2019-09-13 18:18:21 -07:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:04:27 -07:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaStartOp
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void DmaStartOp::build(OpBuilder &builder, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
Value srcMemRef, ValueRange srcIndices, Value destMemRef,
|
|
|
|
ValueRange destIndices, Value numElements,
|
|
|
|
Value tagMemRef, ValueRange tagIndices, Value stride,
|
|
|
|
Value elementsPerStride) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands(srcMemRef);
|
|
|
|
result.addOperands(srcIndices);
|
|
|
|
result.addOperands(destMemRef);
|
|
|
|
result.addOperands(destIndices);
|
|
|
|
result.addOperands({numElements, tagMemRef});
|
|
|
|
result.addOperands(tagIndices);
|
2019-08-16 19:21:50 -07:00
|
|
|
if (stride)
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({stride, elementsPerStride});
|
2018-11-08 17:31:01 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
void DmaStartOp::print(OpAsmPrinter &p) {
|
2020-01-11 08:54:04 -08:00
|
|
|
p << "dma_start " << getSrcMemRef() << '[' << getSrcIndices() << "], "
|
|
|
|
<< getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
|
|
|
|
<< ", " << getTagMemRef() << '[' << getTagIndices() << ']';
|
2019-12-12 15:31:39 -08:00
|
|
|
if (isStrided())
|
2020-01-11 08:54:04 -08:00
|
|
|
p << ", " << getStride() << ", " << getNumElementsPerStride();
|
2019-12-12 15:31:39 -08:00
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
|
|
|
|
<< ", " << getTagMemRef().getType();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse DmaStartOp.
|
2018-10-18 11:14:26 -07:00
|
|
|
// Ex:
|
2018-10-09 15:04:27 -07:00
|
|
|
// %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
|
2019-02-19 10:26:53 -08:00
|
|
|
// %tag[%index], %stride, %num_elt_per_stride :
|
|
|
|
// : memref<3076 x f32, 0>,
|
|
|
|
// memref<1024 x f32, 2>,
|
|
|
|
// memref<1 x i32>
|
2018-10-09 15:04:27 -07:00
|
|
|
//
|
2019-09-20 19:47:05 -07:00
|
|
|
ParseResult DmaStartOp::parse(OpAsmParser &parser, OperationState &result) {
|
2018-10-09 15:04:27 -07:00
|
|
|
OpAsmParser::OperandType srcMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
|
|
|
|
OpAsmParser::OperandType dstMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
|
|
|
|
OpAsmParser::OperandType numElementsInfo;
|
|
|
|
OpAsmParser::OperandType tagMemrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
|
2018-12-05 15:30:25 -08:00
|
|
|
SmallVector<OpAsmParser::OperandType, 2> strideInfo;
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2018-10-30 14:59:22 -07:00
|
|
|
SmallVector<Type, 3> types;
|
2019-09-20 11:36:49 -07:00
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
|
|
|
// Parse and resolve the following list of operands:
|
|
|
|
// *) source memref followed by its indices (in square brackets).
|
|
|
|
// *) destination memref followed by its indices (in square brackets).
|
|
|
|
// *) dma size in KiB.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperand(srcMemRefInfo) ||
|
|
|
|
parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
|
|
|
|
parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(numElementsInfo) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
|
|
|
|
parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-12-05 15:30:25 -08:00
|
|
|
|
|
|
|
// Parse optional stride and elements per stride.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseTrailingOperandList(strideInfo))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2019-08-16 19:21:50 -07:00
|
|
|
|
|
|
|
bool isStrided = strideInfo.size() == 2;
|
|
|
|
if (!strideInfo.empty() && !isStrided) {
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected two stride related operands");
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseColonTypeList(types))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
if (types.size() != 3)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
|
|
|
|
parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
|
|
|
|
parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
|
2018-10-09 15:04:27 -07:00
|
|
|
// size should be an index.
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
|
2018-10-09 15:04:27 -07:00
|
|
|
// tag indices should be index.
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperands(tagIndexInfos, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2018-12-05 15:30:25 -08:00
|
|
|
if (isStrided) {
|
2019-09-20 19:47:05 -07:00
|
|
|
if (parser.resolveOperands(strideInfo, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2019-05-06 22:01:31 -07:00
|
|
|
return success();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
LogicalResult DmaStartOp::verify() {
|
2020-05-05 14:09:35 +02:00
|
|
|
unsigned numOperands = getNumOperands();
|
|
|
|
|
|
|
|
// Mandatory non-variadic operands are: src memref, dst memref, tag memref and
|
|
|
|
// the number of elements.
|
|
|
|
if (numOperands < 4)
|
|
|
|
return emitOpError("expected at least 4 operands");
|
|
|
|
|
|
|
|
// Check types of operands. The order of these calls is important: the later
|
|
|
|
// calls rely on some type properties to compute the operand position.
|
|
|
|
// 1. Source memref.
|
|
|
|
if (!getSrcMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected source to be of memref type");
|
|
|
|
if (numOperands < getSrcMemRefRank() + 4)
|
|
|
|
return emitOpError() << "expected at least " << getSrcMemRefRank() + 4
|
|
|
|
<< " operands";
|
|
|
|
if (!getSrcIndices().empty() &&
|
|
|
|
!llvm::all_of(getSrcIndices().getTypes(),
|
|
|
|
[](Type t) { return t.isIndex(); }))
|
|
|
|
return emitOpError("expected source indices to be of index type");
|
|
|
|
|
|
|
|
// 2. Destination memref.
|
|
|
|
if (!getDstMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected destination to be of memref type");
|
|
|
|
unsigned numExpectedOperands = getSrcMemRefRank() + getDstMemRefRank() + 4;
|
|
|
|
if (numOperands < numExpectedOperands)
|
|
|
|
return emitOpError() << "expected at least " << numExpectedOperands
|
|
|
|
<< " operands";
|
|
|
|
if (!getDstIndices().empty() &&
|
|
|
|
!llvm::all_of(getDstIndices().getTypes(),
|
|
|
|
[](Type t) { return t.isIndex(); }))
|
|
|
|
return emitOpError("expected destination indices to be of index type");
|
|
|
|
|
|
|
|
// 3. Number of elements.
|
|
|
|
if (!getNumElements().getType().isIndex())
|
|
|
|
return emitOpError("expected num elements to be of index type");
|
|
|
|
|
|
|
|
// 4. Tag memref.
|
|
|
|
if (!getTagMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError("expected tag to be of memref type");
|
|
|
|
numExpectedOperands += getTagMemRefRank();
|
|
|
|
if (numOperands < numExpectedOperands)
|
|
|
|
return emitOpError() << "expected at least " << numExpectedOperands
|
|
|
|
<< " operands";
|
|
|
|
if (!getTagIndices().empty() &&
|
|
|
|
!llvm::all_of(getTagIndices().getTypes(),
|
|
|
|
[](Type t) { return t.isIndex(); }))
|
|
|
|
return emitOpError("expected tag indices to be of index type");
|
|
|
|
|
2018-12-05 15:30:25 -08:00
|
|
|
// DMAs from different memory spaces supported.
|
2019-08-16 19:21:50 -07:00
|
|
|
if (getSrcMemorySpace() == getDstMemorySpace())
|
2018-12-05 15:30:25 -08:00
|
|
|
return emitOpError("DMA should be between different memory spaces");
|
|
|
|
|
2020-05-05 14:09:35 +02:00
|
|
|
// Optional stride-related operands must be either both present or both
|
|
|
|
// absent.
|
|
|
|
if (numOperands != numExpectedOperands &&
|
|
|
|
numOperands != numExpectedOperands + 2)
|
2018-12-05 15:30:25 -08:00
|
|
|
return emitOpError("incorrect number of operands");
|
2020-05-05 14:09:35 +02:00
|
|
|
|
|
|
|
// 5. Strides.
|
|
|
|
if (isStrided()) {
|
|
|
|
if (!getStride().getType().isIndex() ||
|
|
|
|
!getNumElementsPerStride().getType().isIndex())
|
|
|
|
return emitOpError(
|
|
|
|
"expected stride and num elements per stride to be of type index");
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
2020-05-05 14:09:35 +02:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// dma_start(memrefcast) -> dma_start
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:04:27 -07:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaWaitOp
|
|
|
|
// ---------------------------------------------------------------------------
|
2018-10-18 11:14:26 -07:00
|
|
|
|
2020-04-23 16:02:46 +02:00
|
|
|
void DmaWaitOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Value tagMemRef, ValueRange tagIndices,
|
|
|
|
Value numElements) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands(tagMemRef);
|
|
|
|
result.addOperands(tagIndices);
|
|
|
|
result.addOperands(numElements);
|
2018-11-08 17:31:01 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
void DmaWaitOp::print(OpAsmPrinter &p) {
|
2019-12-12 15:31:39 -08:00
|
|
|
p << "dma_wait " << getTagMemRef() << '[' << getTagIndices() << "], "
|
|
|
|
<< getNumElements();
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << getTagMemRef().getType();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2018-10-18 11:14:26 -07:00
|
|
|
// Parse DmaWaitOp.
|
|
|
|
// Eg:
|
|
|
|
// dma_wait %tag[%index], %num_elements : memref<1 x i32, (d0) -> (d0), 4>
|
|
|
|
//
|
2019-09-20 19:47:05 -07:00
|
|
|
ParseResult DmaWaitOp::parse(OpAsmParser &parser, OperationState &result) {
|
2018-10-09 15:04:27 -07:00
|
|
|
OpAsmParser::OperandType tagMemrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 2> tagIndexInfos;
|
2018-10-30 14:59:22 -07:00
|
|
|
Type type;
|
2019-09-20 11:36:49 -07:00
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
2018-10-18 11:14:26 -07:00
|
|
|
OpAsmParser::OperandType numElementsInfo;
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2018-10-18 11:14:26 -07:00
|
|
|
// Parse tag memref, its indices, and dma size.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperand(tagMemrefInfo) ||
|
|
|
|
parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(numElementsInfo) ||
|
|
|
|
parser.parseColonType(type) ||
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperand(tagMemrefInfo, type, result.operands) ||
|
|
|
|
parser.resolveOperands(tagIndexInfos, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(numElementsInfo, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-05-06 22:01:31 -07:00
|
|
|
return success();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// dma_wait(memrefcast) -> dma_wait
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2020-05-05 14:09:35 +02:00
|
|
|
LogicalResult DmaWaitOp::verify() {
|
|
|
|
// Mandatory non-variadic operands are tag and the number of elements.
|
|
|
|
if (getNumOperands() < 2)
|
|
|
|
return emitOpError() << "expected at least 2 operands";
|
|
|
|
|
|
|
|
// Check types of operands. The order of these calls is important: the later
|
|
|
|
// calls rely on some type properties to compute the operand position.
|
|
|
|
if (!getTagMemRef().getType().isa<MemRefType>())
|
|
|
|
return emitOpError() << "expected tag to be of memref type";
|
|
|
|
|
|
|
|
if (getNumOperands() != 2 + getTagMemRefRank())
|
|
|
|
return emitOpError() << "expected " << 2 + getTagMemRefRank()
|
|
|
|
<< " operands";
|
|
|
|
|
|
|
|
if (!getTagIndices().empty() &&
|
|
|
|
!llvm::all_of(getTagIndices().getTypes(),
|
|
|
|
[](Type t) { return t.isIndex(); }))
|
|
|
|
return emitOpError() << "expected tag indices to be of index type";
|
|
|
|
|
|
|
|
if (!getNumElements().getType().isIndex())
|
|
|
|
return emitOpError()
|
|
|
|
<< "expected the number of elements to be of index type";
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-09-07 11:41:27 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DynamicTensorFromElementsOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static ParseResult parseDynamicTensorFromElementsOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
// Parse operands.
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> dynamicExtents;
|
|
|
|
Type indexTy = parser.getBuilder().getIndexType();
|
|
|
|
if (parser.parseOperandList(dynamicExtents) ||
|
|
|
|
parser.resolveOperands(dynamicExtents, indexTy, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse body.
|
|
|
|
Region *body = result.addRegion();
|
|
|
|
if (parser.parseRegion(*body, {}, {}))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse result type.
|
|
|
|
Type resultType;
|
|
|
|
if (parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(resultType))
|
|
|
|
return failure();
|
|
|
|
result.addTypes(resultType);
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, DynamicTensorFromElementsOp op) {
|
|
|
|
p << "dynamic_tensor_from_elements " << op.dynamicExtents();
|
|
|
|
p.printRegion(op.body());
|
|
|
|
p.printOptionalAttrDict(op.getAttrs());
|
|
|
|
p << " : " << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(DynamicTensorFromElementsOp op) {
|
|
|
|
// Ensure that the tensor type has as many dynamic dimensions as are specified
|
|
|
|
// by the operands.
|
|
|
|
RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
|
|
|
|
if (op.getNumOperands() != resultTy.getNumDynamicDims())
|
|
|
|
return op.emitError("must have as many index operands as dynamic extents "
|
|
|
|
"in the result type");
|
|
|
|
|
|
|
|
// Ensure that region arguments span the index space.
|
|
|
|
if (!llvm::all_of(op.body().getArgumentTypes(),
|
|
|
|
[](Type ty) { return ty.isIndex(); }))
|
|
|
|
return op.emitError("all body arguments must be index");
|
|
|
|
if (op.body().getNumArguments() != resultTy.getRank())
|
|
|
|
return op.emitError("must have one body argument per input dimension");
|
|
|
|
|
|
|
|
// Ensure that the region yields an element of the right type.
|
|
|
|
auto yieldOp =
|
|
|
|
llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
|
|
|
|
if (yieldOp.value().getType() != resultTy.getElementType())
|
|
|
|
return op.emitOpError(
|
|
|
|
"body must be terminated with a `yield` operation of the tensor "
|
|
|
|
"element type");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-09-09 07:53:13 +00:00
|
|
|
void DynamicTensorFromElementsOp::build(
|
|
|
|
OpBuilder &b, OperationState &result, Type resultTy,
|
|
|
|
ValueRange dynamicExtents,
|
|
|
|
function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
|
|
|
|
build(b, result, resultTy, dynamicExtents);
|
|
|
|
|
|
|
|
// Build and populate body.
|
|
|
|
OpBuilder::InsertionGuard guard(b);
|
|
|
|
Region *bodyRegion = result.regions.front().get();
|
|
|
|
auto rank = resultTy.cast<RankedTensorType>().getRank();
|
|
|
|
SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
|
|
|
|
Block *bodyBlock =
|
|
|
|
b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes);
|
|
|
|
bodyBuilder(b, result.location, bodyBlock->getArguments());
|
|
|
|
}
|
|
|
|
|
2020-09-14 11:54:55 +02:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// Canonicalizes dynamic_tensor_from_elements operations with a constant
|
|
|
|
/// operand into the equivalent operation with the operand expressed in the
|
|
|
|
/// result type, instead. We also insert a type cast to make sure that the
|
|
|
|
/// resulting IR is still well-typed.
|
|
|
|
struct StaticDynamicTensorFromElements
|
|
|
|
: public OpRewritePattern<DynamicTensorFromElementsOp> {
|
|
|
|
using OpRewritePattern<DynamicTensorFromElementsOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(DynamicTensorFromElementsOp tensorFromElements,
|
|
|
|
PatternRewriter &rewriter) const final {
|
|
|
|
auto resultType =
|
|
|
|
tensorFromElements.getResult().getType().cast<RankedTensorType>();
|
|
|
|
|
|
|
|
if (resultType.hasStaticShape())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
SmallVector<Value, 4> newOperands;
|
|
|
|
SmallVector<int64_t, 4> newShape;
|
|
|
|
auto operandsIt = tensorFromElements.dynamicExtents().begin();
|
|
|
|
|
|
|
|
for (int64_t dim : resultType.getShape()) {
|
|
|
|
if (dim != RankedTensorType::kDynamicSize) {
|
|
|
|
newShape.push_back(dim);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
APInt index;
|
|
|
|
if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
|
|
|
|
newShape.push_back(RankedTensorType::kDynamicSize);
|
|
|
|
newOperands.push_back(*operandsIt++);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
newShape.push_back(index.getSExtValue());
|
|
|
|
operandsIt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newOperands.size() == tensorFromElements.dynamicExtents().size())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto loc = tensorFromElements.getLoc();
|
|
|
|
auto newOp = rewriter.create<DynamicTensorFromElementsOp>(
|
|
|
|
loc, RankedTensorType::get(newShape, resultType.getElementType()),
|
|
|
|
newOperands);
|
|
|
|
rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
|
|
|
|
newOp.body().begin());
|
|
|
|
rewriter.replaceOpWithNewOp<TensorCastOp>(tensorFromElements, resultType,
|
|
|
|
newOp);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Canonicalizes the pattern of the form
|
|
|
|
///
|
|
|
|
/// %tensor = dynamic_tensor_from_elements %x {
|
|
|
|
/// ^bb0(%arg0: index): // no predecessors
|
|
|
|
/// <computation>
|
|
|
|
/// yield %1 : index
|
|
|
|
/// } : tensor<?xindex>
|
|
|
|
/// %extracted_element = extract_element %tensor[%c0] : tensor<?xi32>
|
|
|
|
///
|
|
|
|
/// to just <computation> with %arg0 replaced by %c0. We only do this if the
|
|
|
|
/// dynamic_tensor_from_elements operation has no side-effects.
|
|
|
|
struct ExtractElementFromDynamicTensorFromElements
|
|
|
|
: public OpRewritePattern<ExtractElementOp> {
|
|
|
|
using OpRewritePattern<ExtractElementOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ExtractElementOp extract,
|
|
|
|
PatternRewriter &rewriter) const final {
|
|
|
|
auto tensorFromElements =
|
|
|
|
extract.aggregate().getDefiningOp<DynamicTensorFromElementsOp>();
|
|
|
|
if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
BlockAndValueMapping mapping;
|
|
|
|
Block *body = tensorFromElements.getBody();
|
|
|
|
mapping.map(body->getArguments(), extract.indices());
|
|
|
|
for (auto &op : body->without_terminator())
|
|
|
|
rewriter.clone(op, mapping);
|
|
|
|
|
|
|
|
auto yield = cast<YieldOp>(body->getTerminator());
|
|
|
|
|
|
|
|
rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void DynamicTensorFromElementsOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
|
|
|
results.insert<ExtractElementFromDynamicTensorFromElements,
|
|
|
|
StaticDynamicTensorFromElements>(context);
|
|
|
|
}
|
|
|
|
|
2018-08-23 09:58:23 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ExtractElementOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-10 15:26:23 -07:00
|
|
|
static LogicalResult verify(ExtractElementOp op) {
|
2018-08-23 09:58:23 -07:00
|
|
|
// Verify the # indices match if we have a ranked type.
|
2020-02-19 10:18:28 -08:00
|
|
|
auto aggregateType = op.getAggregate().getType().cast<ShapedType>();
|
2019-05-29 17:15:51 -07:00
|
|
|
if (aggregateType.hasRank() &&
|
|
|
|
aggregateType.getRank() != op.getNumOperands() - 1)
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("incorrect number of indices for extract_element");
|
2018-08-23 09:58:23 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-23 09:58:23 -07:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult ExtractElementOp::fold(ArrayRef<Attribute> operands) {
|
2019-10-04 04:37:14 -07:00
|
|
|
assert(!operands.empty() && "extract_element takes at least one operand");
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// The aggregate operand must be a known constant.
|
|
|
|
Attribute aggregate = operands.front();
|
|
|
|
if (!aggregate)
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// If this is a splat elements attribute, simply return the value. All of the
|
|
|
|
// elements of a splat attribute are the same.
|
|
|
|
if (auto splatAggregate = aggregate.dyn_cast<SplatElementsAttr>())
|
2019-06-18 18:26:26 -07:00
|
|
|
return splatAggregate.getSplatValue();
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// Otherwise, collect the constant indices into the aggregate.
|
|
|
|
SmallVector<uint64_t, 8> indices;
|
|
|
|
for (Attribute indice : llvm::drop_begin(operands, 1)) {
|
|
|
|
if (!indice || !indice.isa<IntegerAttr>())
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
indices.push_back(indice.cast<IntegerAttr>().getInt());
|
|
|
|
}
|
|
|
|
|
2019-02-27 16:15:16 -08:00
|
|
|
// If this is an elements attribute, query the value at the given indices.
|
2019-08-14 15:03:25 -07:00
|
|
|
auto elementsAttr = aggregate.dyn_cast<ElementsAttr>();
|
|
|
|
if (elementsAttr && elementsAttr.isValidIndex(indices))
|
2019-02-27 16:15:16 -08:00
|
|
|
return elementsAttr.getValue(indices);
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
}
|
|
|
|
|
2020-05-28 13:36:40 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TensorFromElementsOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-09-10 22:04:58 -07:00
|
|
|
void TensorFromElementsOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
Type elementType, ValueRange elements) {
|
|
|
|
Type resultTy = RankedTensorType::get({static_cast<int64_t>(elements.size())},
|
|
|
|
elementType);
|
|
|
|
result.addOperands(elements);
|
|
|
|
result.addTypes(resultTy);
|
|
|
|
}
|
|
|
|
|
2020-09-09 07:44:38 +00:00
|
|
|
void TensorFromElementsOp::build(OpBuilder &builder, OperationState &result,
|
|
|
|
ValueRange elements) {
|
|
|
|
assert(!elements.empty() && "expected at least one element");
|
2020-09-10 22:04:58 -07:00
|
|
|
build(builder, result, elements.front().getType(), elements);
|
2020-09-09 07:44:38 +00:00
|
|
|
}
|
|
|
|
|
2020-05-28 13:36:40 +02:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Canonicalizes the pattern of the form
|
|
|
|
//
|
|
|
|
// %tensor = "tensor_from_elements(%element) : (i32) -> tensor<1xi32>
|
|
|
|
// %extracted_element = extract_element %tensor[%c0] : tensor<1xi32>
|
|
|
|
//
|
|
|
|
// to just %element.
|
|
|
|
struct ExtractElementFromTensorFromElements
|
|
|
|
: public OpRewritePattern<ExtractElementOp> {
|
|
|
|
using OpRewritePattern<ExtractElementOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(ExtractElementOp extract,
|
|
|
|
PatternRewriter &rewriter) const final {
|
|
|
|
if (extract.indices().size() != 1)
|
|
|
|
return failure();
|
|
|
|
|
2020-09-14 11:54:55 +02:00
|
|
|
auto tensorFromElements = dyn_cast_or_null<TensorFromElementsOp>(
|
2020-06-16 21:04:57 +00:00
|
|
|
extract.aggregate().getDefiningOp());
|
2020-09-14 11:54:55 +02:00
|
|
|
if (tensorFromElements == nullptr)
|
2020-05-28 13:36:40 +02:00
|
|
|
return failure();
|
|
|
|
|
|
|
|
APInt index;
|
|
|
|
if (!matchPattern(*extract.indices().begin(), m_ConstantInt(&index)))
|
|
|
|
return failure();
|
|
|
|
rewriter.replaceOp(extract,
|
2020-09-14 11:54:55 +02:00
|
|
|
tensorFromElements.getOperand(index.getZExtValue()));
|
2020-05-28 13:36:40 +02:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void TensorFromElementsOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
|
|
|
results.insert<ExtractElementFromTensorFromElements>(context);
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FPExtOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPExtOp::areCastCompatible(Type a, Type b) {
|
|
|
|
if (auto fa = a.dyn_cast<FloatType>())
|
|
|
|
if (auto fb = b.dyn_cast<FloatType>())
|
|
|
|
return fa.getWidth() < fb.getWidth();
|
2020-09-09 12:34:08 -07:00
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-05-11 01:25:45 -07:00
|
|
|
// FPToSIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPToSIOp::areCastCompatible(Type a, Type b) {
|
2020-09-09 12:34:08 -07:00
|
|
|
if (a.isa<FloatType>() && b.isSignlessInteger())
|
|
|
|
return true;
|
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-05-11 01:25:45 -07:00
|
|
|
}
|
|
|
|
|
2020-08-19 22:45:18 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FPToUIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPToUIOp::areCastCompatible(Type a, Type b) {
|
2020-09-09 12:34:08 -07:00
|
|
|
if (a.isa<FloatType>() && b.isSignlessInteger())
|
|
|
|
return true;
|
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-08-19 22:45:18 +02:00
|
|
|
}
|
|
|
|
|
2020-05-11 01:25:45 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// FPTruncOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPTruncOp::areCastCompatible(Type a, Type b) {
|
|
|
|
if (auto fa = a.dyn_cast<FloatType>())
|
|
|
|
if (auto fb = b.dyn_cast<FloatType>())
|
|
|
|
return fa.getWidth() > fb.getWidth();
|
2020-09-09 12:34:08 -07:00
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
|
2020-11-02 11:21:29 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GlobalMemrefOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p,
|
|
|
|
GlobalMemrefOp op,
|
|
|
|
TypeAttr type,
|
|
|
|
Attribute initialValue) {
|
|
|
|
p << type;
|
|
|
|
if (!op.isExternal()) {
|
|
|
|
p << " = ";
|
2020-11-04 08:20:46 -08:00
|
|
|
if (op.isUninitialized())
|
2020-11-02 11:21:29 -08:00
|
|
|
p << "uninitialized";
|
|
|
|
else
|
|
|
|
p.printAttributeWithoutType(initialValue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult
|
|
|
|
parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
|
|
|
|
Attribute &initialValue) {
|
|
|
|
Type type;
|
|
|
|
if (parser.parseType(type))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto memrefType = type.dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType || !memrefType.hasStaticShape())
|
|
|
|
return parser.emitError(parser.getNameLoc())
|
|
|
|
<< "type should be static shaped memref, but got " << type;
|
|
|
|
typeAttr = TypeAttr::get(type);
|
|
|
|
|
|
|
|
if (parser.parseOptionalEqual())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
|
|
|
|
initialValue = UnitAttr::get(parser.getBuilder().getContext());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
Type tensorType = getTensorTypeFromMemRefType(memrefType);
|
|
|
|
if (parser.parseAttribute(initialValue, tensorType))
|
|
|
|
return failure();
|
|
|
|
if (!initialValue.isa<ElementsAttr>())
|
|
|
|
return parser.emitError(parser.getNameLoc())
|
|
|
|
<< "initial value should be a unit or elements attribute";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(GlobalMemrefOp op) {
|
|
|
|
auto memrefType = op.type().dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType || !memrefType.hasStaticShape())
|
|
|
|
return op.emitOpError("type should be static shaped memref, but got ")
|
|
|
|
<< op.type();
|
|
|
|
|
|
|
|
// Verify that the initial value, if present, is either a unit attribute or
|
|
|
|
// an elements attribute.
|
|
|
|
if (op.initial_value().hasValue()) {
|
|
|
|
Attribute initValue = op.initial_value().getValue();
|
|
|
|
if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
|
|
|
|
return op.emitOpError("initial value should be a unit or elements "
|
|
|
|
"attribute, but got ")
|
|
|
|
<< initValue;
|
|
|
|
|
|
|
|
// Check that the type of the initial value is compatible with the type of
|
|
|
|
// the global variable.
|
|
|
|
if (initValue.isa<ElementsAttr>()) {
|
|
|
|
Type initType = initValue.getType();
|
|
|
|
Type tensorType = getTensorTypeFromMemRefType(memrefType);
|
|
|
|
if (initType != tensorType)
|
|
|
|
return op.emitOpError("initial value expected to be of type ")
|
|
|
|
<< tensorType << ", but was of type " << initType;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: verify visibility for declarations.
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GetGlobalMemrefOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
LogicalResult
|
|
|
|
GetGlobalMemrefOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
|
|
|
// Verify that the result type is same as the type of the referenced
|
|
|
|
// global_memref op.
|
|
|
|
auto global =
|
|
|
|
symbolTable.lookupNearestSymbolFrom<GlobalMemrefOp>(*this, nameAttr());
|
|
|
|
if (!global)
|
|
|
|
return emitOpError("'")
|
|
|
|
<< name() << "' does not reference a valid global memref";
|
|
|
|
|
|
|
|
Type resultType = result().getType();
|
|
|
|
if (global.type() != resultType)
|
|
|
|
return emitOpError("result type ")
|
|
|
|
<< resultType << " does not match type " << global.type()
|
|
|
|
<< " of the global memref @" << name();
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-06-17 11:35:05 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// IndexCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Index cast is applicable from index to integer and backwards.
|
|
|
|
bool IndexCastOp::areCastCompatible(Type a, Type b) {
|
2020-06-10 17:18:33 -07:00
|
|
|
if (a.isa<ShapedType>() && b.isa<ShapedType>()) {
|
|
|
|
auto aShaped = a.cast<ShapedType>();
|
|
|
|
auto bShaped = b.cast<ShapedType>();
|
|
|
|
|
|
|
|
return (aShaped.getShape() == bShaped.getShape()) &&
|
|
|
|
areCastCompatible(aShaped.getElementType(),
|
|
|
|
bShaped.getElementType());
|
|
|
|
}
|
|
|
|
|
2020-01-10 14:48:24 -05:00
|
|
|
return (a.isIndex() && b.isSignlessInteger()) ||
|
|
|
|
(a.isSignlessInteger() && b.isIndex());
|
2019-06-17 11:35:05 -07:00
|
|
|
}
|
|
|
|
|
2019-12-04 16:15:10 -08:00
|
|
|
OpFoldResult IndexCastOp::fold(ArrayRef<Attribute> cstOperands) {
|
|
|
|
// Fold IndexCast(IndexCast(x)) -> x
|
2020-05-09 17:52:35 -07:00
|
|
|
auto cast = getOperand().getDefiningOp<IndexCastOp>();
|
2019-12-04 16:15:10 -08:00
|
|
|
if (cast && cast.getOperand().getType() == getType())
|
|
|
|
return cast.getOperand();
|
2019-12-10 11:59:13 -08:00
|
|
|
|
|
|
|
// Fold IndexCast(constant) -> constant
|
|
|
|
// A little hack because we go through int. Otherwise, the size
|
|
|
|
// of the constant might need to change.
|
|
|
|
if (auto value = cstOperands[0].dyn_cast_or_null<IntegerAttr>())
|
|
|
|
return IntegerAttr::get(getType(), value.getInt());
|
|
|
|
|
2019-12-04 16:15:10 -08:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoadOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-24 18:01:38 -07:00
|
|
|
static LogicalResult verify(LoadOp op) {
|
2019-09-27 11:57:52 -07:00
|
|
|
if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
|
2019-05-24 18:01:38 -07:00
|
|
|
return op.emitOpError("incorrect number of indices for load");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-24 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// load(memrefcast) -> load
|
2019-12-13 14:52:39 -08:00
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
return OpFoldResult();
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-22 09:00:03 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MemRefCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-07-08 13:44:36 +02:00
|
|
|
Value MemRefCastOp::getViewSource() { return source(); }
|
|
|
|
|
2019-04-27 20:55:38 -07:00
|
|
|
bool MemRefCastOp::areCastCompatible(Type a, Type b) {
|
|
|
|
auto aT = a.dyn_cast<MemRefType>();
|
|
|
|
auto bT = b.dyn_cast<MemRefType>();
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto uaT = a.dyn_cast<UnrankedMemRefType>();
|
|
|
|
auto ubT = b.dyn_cast<UnrankedMemRefType>();
|
|
|
|
|
|
|
|
if (aT && bT) {
|
|
|
|
if (aT.getElementType() != bT.getElementType())
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
if (aT.getAffineMaps() != bT.getAffineMaps()) {
|
|
|
|
int64_t aOffset, bOffset;
|
|
|
|
SmallVector<int64_t, 4> aStrides, bStrides;
|
|
|
|
if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
|
|
|
|
failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
|
|
|
|
aStrides.size() != bStrides.size())
|
|
|
|
return false;
|
2019-11-25 10:10:58 -08:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
// Strides along a dimension/offset are compatible if the value in the
|
|
|
|
// source memref is static and the value in the target memref is the
|
|
|
|
// same. They are also compatible if either one is dynamic (see
|
|
|
|
// description of MemRefCastOp for details).
|
|
|
|
auto checkCompatible = [](int64_t a, int64_t b) {
|
|
|
|
return (a == MemRefType::getDynamicStrideOrOffset() ||
|
|
|
|
b == MemRefType::getDynamicStrideOrOffset() || a == b);
|
|
|
|
};
|
|
|
|
if (!checkCompatible(aOffset, bOffset))
|
|
|
|
return false;
|
|
|
|
for (auto aStride : enumerate(aStrides))
|
|
|
|
if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (aT.getMemorySpace() != bT.getMemorySpace())
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
|
|
|
|
// They must have the same rank, and any specified dimensions must match.
|
|
|
|
if (aT.getRank() != bT.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
|
|
|
|
int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
|
|
|
|
if (aDim != -1 && bDim != -1 && aDim != bDim)
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
if (!aT && !uaT)
|
|
|
|
return false;
|
|
|
|
if (!bT && !ubT)
|
|
|
|
return false;
|
|
|
|
// Unranked to unranked casting is unsupported
|
|
|
|
if (uaT && ubT)
|
|
|
|
return false;
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
|
|
|
|
auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
|
|
|
|
if (aEltType != bEltType)
|
|
|
|
return false;
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
|
|
|
|
auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
|
|
|
|
if (aMemSpace != bMemSpace)
|
2019-04-27 20:55:38 -07:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
|
|
|
|
return true;
|
2019-04-27 20:55:38 -07:00
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
return false;
|
2019-04-27 20:55:38 -07:00
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MemRefCastOp::fold(ArrayRef<Attribute> operands) {
|
2020-11-06 10:20:08 +01:00
|
|
|
if (Value folded = impl::foldCastOp(*this))
|
|
|
|
return folded;
|
|
|
|
return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
|
2019-05-16 12:51:45 -07:00
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2020-10-22 14:48:52 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MemRefReinterpretCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-28 21:08:58 +01:00
|
|
|
void mlir::MemRefReinterpretCastOp::build(
|
|
|
|
OpBuilder &b, OperationState &result, MemRefType resultType, Value source,
|
|
|
|
int64_t staticOffset, ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides, ValueRange offset, ValueRange sizes,
|
|
|
|
ValueRange strides, ArrayRef<NamedAttribute> attrs) {
|
|
|
|
build(b, result, resultType, source, offset, sizes, strides,
|
|
|
|
b.getI64ArrayAttr(staticOffset), b.getI64ArrayAttr(staticSizes),
|
|
|
|
b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a MemRefReinterpretCastOp with all dynamic entries: `staticOffsets`,
|
|
|
|
/// `staticSizes` and `staticStrides` are automatically filled with
|
|
|
|
/// source-memref-rank sentinel values that encode dynamic entries.
|
|
|
|
void mlir::MemRefReinterpretCastOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
Value offset, ValueRange sizes,
|
|
|
|
ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
unsigned rank = resultType.getRank();
|
|
|
|
SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
|
|
|
|
SmallVector<int64_t, 4> staticStridesVector(
|
|
|
|
rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, resultType, source,
|
|
|
|
/*staticOffset=*/ShapedType::kDynamicStrideOrOffset, staticSizesVector,
|
|
|
|
staticStridesVector, offset, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
2020-10-22 14:48:52 +02:00
|
|
|
/// Print of the form:
|
|
|
|
/// ```
|
|
|
|
/// `name` ssa-name to
|
|
|
|
/// offset: `[` offset `]`
|
|
|
|
/// sizes: `[` size-list `]`
|
|
|
|
/// strides:`[` stride-list `]`
|
|
|
|
/// `:` any-memref-type to strided-memref-type
|
|
|
|
/// ```
|
|
|
|
static void print(OpAsmPrinter &p, MemRefReinterpretCastOp op) {
|
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
|
|
|
p << op.getOperationName().drop_front(stdDotLen) << " " << op.source()
|
|
|
|
<< " to offset: ";
|
|
|
|
printListOfOperandsOrIntegers(p, op.offsets(), op.static_offsets(),
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
p << ", sizes: ";
|
|
|
|
printListOfOperandsOrIntegers(p, op.sizes(), op.static_sizes(),
|
|
|
|
ShapedType::isDynamic);
|
|
|
|
p << ", strides: ";
|
|
|
|
printListOfOperandsOrIntegers(p, op.strides(), op.static_strides(),
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
p.printOptionalAttrDict(
|
|
|
|
op.getAttrs(),
|
|
|
|
/*elidedAttrs=*/{MemRefReinterpretCastOp::getOperandSegmentSizeAttr(),
|
|
|
|
MemRefReinterpretCastOp::getStaticOffsetsAttrName(),
|
|
|
|
MemRefReinterpretCastOp::getStaticSizesAttrName(),
|
|
|
|
MemRefReinterpretCastOp::getStaticStridesAttrName()});
|
|
|
|
p << ": " << op.source().getType() << " to " << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Parse of the form:
|
|
|
|
/// ```
|
|
|
|
/// `name` ssa-name to
|
|
|
|
/// offset: `[` offset `]`
|
|
|
|
/// sizes: `[` size-list `]`
|
|
|
|
/// strides:`[` stride-list `]`
|
|
|
|
/// `:` any-memref-type to strided-memref-type
|
|
|
|
/// ```
|
|
|
|
static ParseResult parseMemRefReinterpretCastOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
// Parse `operand` and `offset`.
|
|
|
|
OpAsmParser::OperandType operand;
|
|
|
|
if (parser.parseOperand(operand))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse offset.
|
|
|
|
SmallVector<OpAsmParser::OperandType, 1> offset;
|
|
|
|
if (parser.parseKeyword("to") || parser.parseKeyword("offset") ||
|
|
|
|
parser.parseColon() ||
|
|
|
|
parseListOfOperandsOrIntegers(
|
|
|
|
parser, result, MemRefReinterpretCastOp::getStaticOffsetsAttrName(),
|
|
|
|
ShapedType::kDynamicStrideOrOffset, offset) ||
|
|
|
|
parser.parseComma())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse `sizes`.
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> sizes;
|
|
|
|
if (parser.parseKeyword("sizes") || parser.parseColon() ||
|
|
|
|
parseListOfOperandsOrIntegers(
|
|
|
|
parser, result, MemRefReinterpretCastOp::getStaticSizesAttrName(),
|
|
|
|
ShapedType::kDynamicSize, sizes) ||
|
|
|
|
parser.parseComma())
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Parse `strides`.
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> strides;
|
|
|
|
if (parser.parseKeyword("strides") || parser.parseColon() ||
|
|
|
|
parseListOfOperandsOrIntegers(
|
|
|
|
parser, result, MemRefReinterpretCastOp::getStaticStridesAttrName(),
|
|
|
|
ShapedType::kDynamicStrideOrOffset, strides))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Handle segment sizes.
|
|
|
|
auto b = parser.getBuilder();
|
|
|
|
SmallVector<int, 4> segmentSizes = {1, static_cast<int>(offset.size()),
|
|
|
|
static_cast<int>(sizes.size()),
|
|
|
|
static_cast<int>(strides.size())};
|
|
|
|
result.addAttribute(MemRefReinterpretCastOp::getOperandSegmentSizeAttr(),
|
|
|
|
|
|
|
|
b.getI32VectorAttr(segmentSizes));
|
|
|
|
|
|
|
|
// Parse types and resolve.
|
|
|
|
Type indexType = b.getIndexType();
|
|
|
|
Type operandType, resultType;
|
|
|
|
return failure(
|
|
|
|
(parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(operandType) || parser.parseKeyword("to") ||
|
|
|
|
parser.parseType(resultType) ||
|
|
|
|
parser.resolveOperand(operand, operandType, result.operands) ||
|
|
|
|
parser.resolveOperands(offset, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(sizes, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(strides, indexType, result.operands) ||
|
|
|
|
parser.addTypeToList(resultType, result.types)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(MemRefReinterpretCastOp op) {
|
|
|
|
// The source and result memrefs should be in the same memory space.
|
|
|
|
auto srcType = op.source().getType().cast<BaseMemRefType>();
|
|
|
|
auto resultType = op.getType().cast<MemRefType>();
|
|
|
|
if (srcType.getMemorySpace() != resultType.getMemorySpace())
|
|
|
|
return op.emitError("different memory spaces specified for source type ")
|
|
|
|
<< srcType << " and result memref type " << resultType;
|
|
|
|
if (srcType.getElementType() != resultType.getElementType())
|
|
|
|
return op.emitError("different element types specified for source type ")
|
|
|
|
<< srcType << " and result memref type " << resultType;
|
|
|
|
|
|
|
|
// Verify that dynamic and static offset/sizes/strides arguments/attributes
|
|
|
|
// are consistent.
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "offset", 1, op.getStaticOffsetsAttrName(), op.static_offsets(),
|
|
|
|
ShapedType::isDynamicStrideOrOffset, op.offsets())))
|
|
|
|
return failure();
|
|
|
|
unsigned resultRank = op.getResultRank();
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "size", resultRank, op.getStaticSizesAttrName(),
|
|
|
|
op.static_sizes(), ShapedType::isDynamic, op.sizes())))
|
|
|
|
return failure();
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStridesPart(
|
|
|
|
op, "stride", resultRank, op.getStaticStridesAttrName(),
|
|
|
|
op.static_strides(), ShapedType::isDynamicStrideOrOffset,
|
|
|
|
op.strides())))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Match sizes in result memref type and in static_sizes attribute.
|
|
|
|
for (auto &en :
|
|
|
|
llvm::enumerate(llvm::zip(resultType.getShape(),
|
|
|
|
extractFromI64ArrayAttr(op.static_sizes())))) {
|
|
|
|
int64_t resultSize = std::get<0>(en.value());
|
|
|
|
int64_t expectedSize = std::get<1>(en.value());
|
|
|
|
if (resultSize != expectedSize)
|
|
|
|
return op.emitError("expected result type with size = ")
|
|
|
|
<< expectedSize << " instead of " << resultSize
|
|
|
|
<< " in dim = " << en.index();
|
|
|
|
}
|
|
|
|
|
2020-10-28 21:08:58 +01:00
|
|
|
// Match offset and strides in static_offset and static_strides attributes if
|
|
|
|
// result memref type has an affine map specified.
|
|
|
|
if (!resultType.getAffineMaps().empty()) {
|
|
|
|
int64_t resultOffset;
|
|
|
|
SmallVector<int64_t, 4> resultStrides;
|
|
|
|
if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Match offset in result memref type and in static_offsets attribute.
|
|
|
|
int64_t expectedOffset =
|
|
|
|
extractFromI64ArrayAttr(op.static_offsets()).front();
|
|
|
|
if (resultOffset != expectedOffset)
|
|
|
|
return op.emitError("expected result type with offset = ")
|
|
|
|
<< resultOffset << " instead of " << expectedOffset;
|
|
|
|
|
|
|
|
// Match strides in result memref type and in static_strides attribute.
|
|
|
|
for (auto &en : llvm::enumerate(llvm::zip(
|
|
|
|
resultStrides, extractFromI64ArrayAttr(op.static_strides())))) {
|
|
|
|
int64_t resultStride = std::get<0>(en.value());
|
|
|
|
int64_t expectedStride = std::get<1>(en.value());
|
|
|
|
if (resultStride != expectedStride)
|
|
|
|
return op.emitError("expected result type with stride = ")
|
|
|
|
<< expectedStride << " instead of " << resultStride
|
|
|
|
<< " in dim = " << en.index();
|
|
|
|
}
|
2020-10-22 14:48:52 +02:00
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-10-21 21:05:06 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MemRefReshapeOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(MemRefReshapeOp op) {
|
|
|
|
Type operandType = op.source().getType();
|
|
|
|
Type resultType = op.result().getType();
|
|
|
|
|
|
|
|
Type operandElementType = operandType.cast<ShapedType>().getElementType();
|
|
|
|
Type resultElementType = resultType.cast<ShapedType>().getElementType();
|
|
|
|
if (operandElementType != resultElementType)
|
|
|
|
return op.emitOpError("element types of source and destination memref "
|
|
|
|
"types should be the same");
|
|
|
|
|
|
|
|
if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
|
|
|
|
if (!operandMemRefType.getAffineMaps().empty())
|
|
|
|
return op.emitOpError(
|
|
|
|
"source memref type should have identity affine map");
|
|
|
|
|
|
|
|
int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0);
|
|
|
|
auto resultMemRefType = resultType.dyn_cast<MemRefType>();
|
|
|
|
if (resultMemRefType) {
|
|
|
|
if (!resultMemRefType.getAffineMaps().empty())
|
|
|
|
return op.emitOpError(
|
|
|
|
"result memref type should have identity affine map");
|
|
|
|
if (shapeSize == ShapedType::kDynamicSize)
|
|
|
|
return op.emitOpError("cannot use shape operand with dynamic length to "
|
|
|
|
"reshape to statically-ranked memref type");
|
|
|
|
if (shapeSize != resultMemRefType.getRank())
|
|
|
|
return op.emitOpError(
|
|
|
|
"length of shape operand differs from the result's memref rank");
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2018-09-26 10:07:16 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MulFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MulFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a * b; });
|
2018-09-26 10:07:16 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MulIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MulIOp::fold(ArrayRef<Attribute> operands) {
|
2019-02-07 08:26:31 -08:00
|
|
|
/// muli(x, 0) -> 0
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return rhs();
|
2019-02-07 08:26:31 -08:00
|
|
|
/// muli(x, 1) -> x
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_One()))
|
2019-02-07 08:26:31 -08:00
|
|
|
return getOperand(0);
|
2019-05-16 12:51:45 -07:00
|
|
|
|
|
|
|
// TODO: Handle the overflow case.
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a * b; });
|
2018-10-26 11:28:06 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// OrOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult OrOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// or(x, 0) -> x
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
|
|
|
/// or(x,x) -> x
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return rhs();
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a | b; });
|
|
|
|
}
|
|
|
|
|
2019-12-18 09:59:37 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PrefetchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, PrefetchOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
p << PrefetchOp::getOperationName() << " " << op.memref() << '[';
|
2019-12-18 09:59:37 -08:00
|
|
|
p.printOperands(op.indices());
|
|
|
|
p << ']' << ", " << (op.isWrite() ? "write" : "read");
|
|
|
|
p << ", locality<" << op.localityHint();
|
|
|
|
p << ">, " << (op.isDataCache() ? "data" : "instr");
|
|
|
|
p.printOptionalAttrDict(
|
|
|
|
op.getAttrs(),
|
|
|
|
/*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
|
|
|
|
p << " : " << op.getMemRefType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parsePrefetchOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
OpAsmParser::OperandType memrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> indexInfo;
|
|
|
|
IntegerAttr localityHint;
|
|
|
|
MemRefType type;
|
|
|
|
StringRef readOrWrite, cacheType;
|
|
|
|
|
|
|
|
auto indexTy = parser.getBuilder().getIndexType();
|
|
|
|
auto i32Type = parser.getBuilder().getIntegerType(32);
|
|
|
|
if (parser.parseOperand(memrefInfo) ||
|
|
|
|
parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword("locality") ||
|
|
|
|
parser.parseLess() ||
|
|
|
|
parser.parseAttribute(localityHint, i32Type, "localityHint",
|
|
|
|
result.attributes) ||
|
|
|
|
parser.parseGreater() || parser.parseComma() ||
|
|
|
|
parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
|
|
|
|
parser.resolveOperand(memrefInfo, type, result.operands) ||
|
|
|
|
parser.resolveOperands(indexInfo, indexTy, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"rw specifier has to be 'read' or 'write'");
|
|
|
|
result.addAttribute(
|
|
|
|
PrefetchOp::getIsWriteAttrName(),
|
|
|
|
parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
|
|
|
|
|
|
|
|
if (!cacheType.equals("data") && !cacheType.equals("instr"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"cache type has to be 'data' or 'instr'");
|
|
|
|
|
|
|
|
result.addAttribute(
|
|
|
|
PrefetchOp::getIsDataCacheAttrName(),
|
|
|
|
parser.getBuilder().getBoolAttr(cacheType.equals("data")));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(PrefetchOp op) {
|
|
|
|
if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
|
|
|
|
return op.emitOpError("too few indices");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
// prefetch(memrefcast) -> prefetch
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2019-05-29 09:22:30 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// RankOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
|
2020-07-29 12:50:05 +02:00
|
|
|
// Constant fold rank when the rank of the operand is known.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto type = getOperand().getType();
|
2020-07-29 12:50:05 +02:00
|
|
|
if (auto shapedType = type.dyn_cast<ShapedType>())
|
|
|
|
if (shapedType.hasRank())
|
|
|
|
return IntegerAttr::get(IndexType::get(getContext()),
|
|
|
|
shapedType.getRank());
|
2019-05-29 09:22:30 -07:00
|
|
|
return IntegerAttr();
|
|
|
|
}
|
|
|
|
|
2018-11-28 07:08:55 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-03-01 16:58:00 -08:00
|
|
|
// ReturnOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-13 11:53:14 -07:00
|
|
|
static LogicalResult verify(ReturnOp op) {
|
2019-07-29 10:45:17 -07:00
|
|
|
auto function = cast<FuncOp>(op.getParentOp());
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
// The operand number and types must match the function signature.
|
2019-07-01 10:29:09 -07:00
|
|
|
const auto &results = function.getType().getResults();
|
2019-05-13 11:53:14 -07:00
|
|
|
if (op.getNumOperands() != results.size())
|
|
|
|
return op.emitOpError("has ")
|
2020-06-10 17:22:30 -07:00
|
|
|
<< op.getNumOperands() << " operands, but enclosing function (@"
|
|
|
|
<< function.getName() << ") returns " << results.size();
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = results.size(); i != e; ++i)
|
2020-01-11 08:54:04 -08:00
|
|
|
if (op.getOperand(i).getType() != results[i])
|
2019-05-16 14:12:18 -07:00
|
|
|
return op.emitError()
|
|
|
|
<< "type of return operand " << i << " ("
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op.getOperand(i).getType()
|
2020-06-10 17:22:30 -07:00
|
|
|
<< ") doesn't match function result type (" << results[i] << ")"
|
|
|
|
<< " in function @" << function.getName();
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-11-28 07:08:55 -08:00
|
|
|
// SelectOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-11-28 15:09:39 -08:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
|
2019-12-22 21:59:55 -08:00
|
|
|
auto condition = getCondition();
|
2018-11-28 07:08:55 -08:00
|
|
|
|
|
|
|
// select true, %0, %1 => %0
|
2019-02-07 08:26:31 -08:00
|
|
|
if (matchPattern(condition, m_One()))
|
|
|
|
return getTrueValue();
|
2018-11-28 07:08:55 -08:00
|
|
|
|
2019-02-07 08:26:31 -08:00
|
|
|
// select false, %0, %1 => %1
|
|
|
|
if (matchPattern(condition, m_Zero()))
|
|
|
|
return getFalseValue();
|
|
|
|
return nullptr;
|
2018-11-28 07:08:55 -08:00
|
|
|
}
|
|
|
|
|
2020-04-23 04:40:42 -07:00
|
|
|
static void print(OpAsmPrinter &p, SelectOp op) {
|
|
|
|
p << "select " << op.getOperands();
|
|
|
|
p.printOptionalAttrDict(op.getAttrs());
|
|
|
|
p << " : ";
|
|
|
|
if (ShapedType condType = op.getCondition().getType().dyn_cast<ShapedType>())
|
|
|
|
p << condType << ", ";
|
|
|
|
p << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseSelectOp(OpAsmParser &parser, OperationState &result) {
|
|
|
|
Type conditionType, resultType;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 3> operands;
|
|
|
|
if (parser.parseOperandList(operands, /*requiredOperandCount=*/3) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(resultType))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Check for the explicit condition type if this is a masked tensor or vector.
|
|
|
|
if (succeeded(parser.parseOptionalComma())) {
|
|
|
|
conditionType = resultType;
|
|
|
|
if (parser.parseType(resultType))
|
|
|
|
return failure();
|
|
|
|
} else {
|
|
|
|
conditionType = parser.getBuilder().getI1Type();
|
|
|
|
}
|
|
|
|
|
|
|
|
result.addTypes(resultType);
|
|
|
|
return parser.resolveOperands(operands,
|
|
|
|
{conditionType, resultType, resultType},
|
|
|
|
parser.getNameLoc(), result.operands);
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(SelectOp op) {
|
|
|
|
Type conditionType = op.getCondition().getType();
|
|
|
|
if (conditionType.isSignlessInteger(1))
|
|
|
|
return success();
|
|
|
|
|
|
|
|
// If the result type is a vector or tensor, the type can be a mask with the
|
|
|
|
// same elements.
|
|
|
|
Type resultType = op.getType();
|
2020-06-29 07:31:48 -07:00
|
|
|
if (!resultType.isa<TensorType, VectorType>())
|
2020-04-23 04:40:42 -07:00
|
|
|
return op.emitOpError()
|
|
|
|
<< "expected condition to be a signless i1, but got "
|
|
|
|
<< conditionType;
|
|
|
|
Type shapedConditionType = getI1SameShape(resultType);
|
|
|
|
if (conditionType != shapedConditionType)
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "expected condition type to have the same shape "
|
|
|
|
"as the result type, expected "
|
|
|
|
<< shapedConditionType << ", but got " << conditionType;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-09-21 16:14:07 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignExtendIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(SignExtendIOp op) {
|
|
|
|
// Get the scalar type (which is either directly the type of the operand
|
|
|
|
// or the vector's/tensor's element type.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
2019-09-21 16:14:07 -07:00
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
|
|
|
|
|
|
|
// For now, index is forbidden for the source and the destination type.
|
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
|
|
|
|
|
|
|
if (srcType.cast<IntegerType>().getWidth() >=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("result type ")
|
|
|
|
<< dstType << " must be wider than operand type " << srcType;
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-09-24 12:44:11 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// SignedDivIOp
|
2019-09-24 12:44:11 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult SignedDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Don't fold if it would overflow or if it requires a division by zero.
|
|
|
|
bool overflowOrDiv0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (overflowOrDiv0 || !b) {
|
|
|
|
overflowOrDiv0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
return a.sdiv_ov(b, overflowOrDiv0);
|
|
|
|
});
|
2020-04-27 19:59:16 +00:00
|
|
|
|
|
|
|
// Fold out division by one. Assumes all tensors of all ones are splats.
|
|
|
|
if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
|
|
|
|
if (rhs.getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
} else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
|
|
|
|
if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
return overflowOrDiv0 ? Attribute() : result;
|
2019-09-24 12:44:11 -07:00
|
|
|
}
|
|
|
|
|
2020-11-04 13:51:10 -05:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignedFloorDivIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static APInt signedCeilNonnegInputs(APInt a, APInt b, bool &overflow) {
|
|
|
|
// Returns (a-1)/b + 1
|
|
|
|
APInt one(a.getBitWidth(), 1, true); // Signed value 1.
|
|
|
|
APInt val = a.ssub_ov(one, overflow).sdiv_ov(b, overflow);
|
|
|
|
return val.sadd_ov(one, overflow);
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult SignedFloorDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
|
|
|
|
|
|
|
// Don't fold if it would overflow or if it requires a division by zero.
|
|
|
|
bool overflowOrDiv0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (overflowOrDiv0 || !b) {
|
|
|
|
overflowOrDiv0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
unsigned bits = a.getBitWidth();
|
|
|
|
APInt zero = APInt::getNullValue(bits);
|
|
|
|
if (a.sge(zero) && b.sgt(zero)) {
|
|
|
|
// Both positive (or a is zero), return a / b.
|
|
|
|
return a.sdiv_ov(b, overflowOrDiv0);
|
|
|
|
} else if (a.sle(zero) && b.slt(zero)) {
|
|
|
|
// Both negative (or a is zero), return -a / -b.
|
|
|
|
APInt posA = zero.ssub_ov(a, overflowOrDiv0);
|
|
|
|
APInt posB = zero.ssub_ov(b, overflowOrDiv0);
|
|
|
|
return posA.sdiv_ov(posB, overflowOrDiv0);
|
|
|
|
} else if (a.slt(zero) && b.sgt(zero)) {
|
|
|
|
// A is negative, b is positive, return - ceil(-a, b).
|
|
|
|
APInt posA = zero.ssub_ov(a, overflowOrDiv0);
|
|
|
|
APInt ceil = signedCeilNonnegInputs(posA, b, overflowOrDiv0);
|
|
|
|
return zero.ssub_ov(ceil, overflowOrDiv0);
|
|
|
|
} else {
|
|
|
|
// A is positive, b is negative, return - ceil(a, -b).
|
|
|
|
APInt posB = zero.ssub_ov(b, overflowOrDiv0);
|
|
|
|
APInt ceil = signedCeilNonnegInputs(a, posB, overflowOrDiv0);
|
|
|
|
return zero.ssub_ov(ceil, overflowOrDiv0);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Fold out floor division by one. Assumes all tensors of all ones are
|
|
|
|
// splats.
|
|
|
|
if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
|
|
|
|
if (rhs.getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
} else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
|
|
|
|
if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
}
|
|
|
|
|
|
|
|
return overflowOrDiv0 ? Attribute() : result;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignedCeilDivIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult SignedCeilDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
|
|
|
|
|
|
|
// Don't fold if it would overflow or if it requires a division by zero.
|
|
|
|
bool overflowOrDiv0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (overflowOrDiv0 || !b) {
|
|
|
|
overflowOrDiv0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
unsigned bits = a.getBitWidth();
|
|
|
|
APInt zero = APInt::getNullValue(bits);
|
|
|
|
if (a.sgt(zero) && b.sgt(zero)) {
|
|
|
|
// Both positive, return ceil(a, b).
|
|
|
|
return signedCeilNonnegInputs(a, b, overflowOrDiv0);
|
|
|
|
} else if (a.slt(zero) && b.slt(zero)) {
|
|
|
|
// Both negative, return ceil(-a, -b).
|
|
|
|
APInt posA = zero.ssub_ov(a, overflowOrDiv0);
|
|
|
|
APInt posB = zero.ssub_ov(b, overflowOrDiv0);
|
|
|
|
return signedCeilNonnegInputs(posA, posB, overflowOrDiv0);
|
|
|
|
} else if (a.slt(zero) && b.sgt(zero)) {
|
|
|
|
// A is negative, b is positive, return - ( -a / b).
|
|
|
|
APInt posA = zero.ssub_ov(a, overflowOrDiv0);
|
|
|
|
APInt div = posA.sdiv_ov(b, overflowOrDiv0);
|
|
|
|
return zero.ssub_ov(div, overflowOrDiv0);
|
|
|
|
} else {
|
|
|
|
// A is positive (or zero), b is negative, return - (a / -b).
|
|
|
|
APInt posB = zero.ssub_ov(b, overflowOrDiv0);
|
|
|
|
APInt div = a.sdiv_ov(posB, overflowOrDiv0);
|
|
|
|
return zero.ssub_ov(div, overflowOrDiv0);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Fold out floor division by one. Assumes all tensors of all ones are
|
|
|
|
// splats.
|
|
|
|
if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
|
|
|
|
if (rhs.getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
} else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
|
|
|
|
if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
}
|
|
|
|
|
|
|
|
return overflowOrDiv0 ? Attribute() : result;
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignedRemIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult SignedRemIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "remi_signed takes two operands");
|
|
|
|
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!rhs)
|
2019-09-24 12:44:11 -07:00
|
|
|
return {};
|
2020-03-04 09:44:36 -08:00
|
|
|
auto rhsValue = rhs.getValue();
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// x % 1 = 0
|
|
|
|
if (rhsValue.isOneValue())
|
|
|
|
return IntegerAttr::get(rhs.getType(), APInt(rhsValue.getBitWidth(), 0));
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Don't fold if it requires division by zero.
|
|
|
|
if (rhsValue.isNullValue())
|
|
|
|
return {};
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs)
|
|
|
|
return {};
|
|
|
|
return IntegerAttr::get(lhs.getType(), lhs.getValue().srem(rhsValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SIToFPOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// sitofp is applicable from integer types to float types.
|
|
|
|
bool SIToFPOp::areCastCompatible(Type a, Type b) {
|
2020-09-09 12:34:08 -07:00
|
|
|
if (a.isSignlessInteger() && b.isa<FloatType>())
|
|
|
|
return true;
|
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SplatOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(SplatOp op) {
|
|
|
|
// TODO: we could replace this by a trait.
|
|
|
|
if (op.getOperand().getType() !=
|
|
|
|
op.getType().cast<ShapedType>().getElementType())
|
|
|
|
return op.emitError("operand should be of elemental type of result type");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for SplatOp.
|
|
|
|
OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 1 && "splat takes one operand");
|
|
|
|
|
|
|
|
auto constOperand = operands.front();
|
2020-06-29 07:31:48 -07:00
|
|
|
if (!constOperand || !constOperand.isa<IntegerAttr, FloatAttr>())
|
2020-03-04 09:44:36 -08:00
|
|
|
return {};
|
|
|
|
|
|
|
|
auto shapedType = getType().cast<ShapedType>();
|
|
|
|
assert(shapedType.getElementType() == constOperand.getType() &&
|
|
|
|
"incorrect input attribute type for folding");
|
|
|
|
|
|
|
|
// SplatElementsAttr::get treats single value for second arg as being a splat.
|
|
|
|
return SplatElementsAttr::get(shapedType, {constOperand});
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StoreOp
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-24 18:01:38 -07:00
|
|
|
static LogicalResult verify(StoreOp op) {
|
|
|
|
if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
|
|
|
|
return op.emitOpError("store index operand count not equal to memref rank");
|
2018-07-31 14:11:38 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-31 14:11:38 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// store(memrefcast) -> store
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SubFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a - b; });
|
2018-10-03 09:43:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SubIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
// subi(x,x) -> 0
|
|
|
|
if (getOperand(0) == getOperand(1))
|
|
|
|
return Builder(getContext()).getZeroAttr(getType());
|
2020-06-15 22:28:43 -07:00
|
|
|
// subi(x,0) -> x
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
2019-05-16 12:51:45 -07:00
|
|
|
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a - b; });
|
2018-10-03 09:43:13 -07:00
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-04-08 00:00:46 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-08-19 22:45:18 +02:00
|
|
|
// UIToFPOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// uitofp is applicable from integer types to float types.
|
|
|
|
bool UIToFPOp::areCastCompatible(Type a, Type b) {
|
2020-09-09 12:34:08 -07:00
|
|
|
if (a.isSignlessInteger() && b.isa<FloatType>())
|
|
|
|
return true;
|
|
|
|
return areVectorCastSimpleCompatible(a, b, areCastCompatible);
|
2020-08-19 22:45:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// SubViewOp
|
2018-10-25 16:44:04 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
namespace {
|
|
|
|
/// Helpers to write more idiomatic operations.
|
|
|
|
namespace saturated_arith {
|
|
|
|
struct Wrapper {
|
|
|
|
explicit Wrapper(int64_t v) : v(v) {}
|
|
|
|
operator int64_t() { return v; }
|
|
|
|
int64_t v;
|
|
|
|
};
|
|
|
|
Wrapper operator+(Wrapper a, int64_t b) {
|
|
|
|
if (ShapedType::isDynamicStrideOrOffset(a) ||
|
|
|
|
ShapedType::isDynamicStrideOrOffset(b))
|
|
|
|
return Wrapper(ShapedType::kDynamicStrideOrOffset);
|
|
|
|
return Wrapper(a.v + b);
|
|
|
|
}
|
|
|
|
Wrapper operator*(Wrapper a, int64_t b) {
|
|
|
|
if (ShapedType::isDynamicStrideOrOffset(a) ||
|
|
|
|
ShapedType::isDynamicStrideOrOffset(b))
|
|
|
|
return Wrapper(ShapedType::kDynamicStrideOrOffset);
|
|
|
|
return Wrapper(a.v * b);
|
|
|
|
}
|
|
|
|
} // end namespace saturated_arith
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
/// A subview result type can be fully inferred from the source type and the
|
|
|
|
/// static representation of offsets, sizes and strides. Special sentinels
|
|
|
|
/// encode the dynamic case.
|
2020-10-02 05:32:35 -04:00
|
|
|
Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
|
|
|
|
ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides) {
|
2020-05-12 17:17:34 -04:00
|
|
|
unsigned rank = sourceMemRefType.getRank();
|
|
|
|
(void)rank;
|
|
|
|
assert(staticOffsets.size() == rank &&
|
|
|
|
"unexpected staticOffsets size mismatch");
|
|
|
|
assert(staticSizes.size() == rank && "unexpected staticSizes size mismatch");
|
|
|
|
assert(staticStrides.size() == rank &&
|
|
|
|
"unexpected staticStrides size mismatch");
|
|
|
|
|
|
|
|
// Extract source offset and strides.
|
|
|
|
int64_t sourceOffset;
|
|
|
|
SmallVector<int64_t, 4> sourceStrides;
|
|
|
|
auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
|
2020-03-04 09:44:36 -08:00
|
|
|
assert(succeeded(res) && "SubViewOp expected strided memref type");
|
|
|
|
(void)res;
|
2019-04-27 20:55:38 -07:00
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
// Compute target offset whose value is:
|
|
|
|
// `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
|
|
|
|
int64_t targetOffset = sourceOffset;
|
|
|
|
for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
|
|
|
|
auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
|
|
|
|
using namespace saturated_arith;
|
|
|
|
targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute target stride whose value is:
|
|
|
|
// `sourceStrides_i * staticStrides_i`.
|
|
|
|
SmallVector<int64_t, 4> targetStrides;
|
|
|
|
targetStrides.reserve(staticOffsets.size());
|
|
|
|
for (auto it : llvm::zip(sourceStrides, staticStrides)) {
|
|
|
|
auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
|
|
|
|
using namespace saturated_arith;
|
|
|
|
targetStrides.push_back(Wrapper(sourceStride) * staticStride);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The type is now known.
|
|
|
|
return MemRefType::get(
|
|
|
|
staticSizes, sourceMemRefType.getElementType(),
|
|
|
|
makeStridedLinearLayoutMap(targetStrides, targetOffset,
|
|
|
|
sourceMemRefType.getContext()),
|
|
|
|
sourceMemRefType.getMemorySpace());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Print SubViewOp in the form:
|
|
|
|
/// ```
|
|
|
|
/// subview ssa-name `[` offset-list `]` `[` size-list `]` `[` stride-list `]`
|
|
|
|
/// `:` strided-memref-type `to` strided-memref-type
|
|
|
|
/// ```
|
2020-10-02 05:32:35 -04:00
|
|
|
template <typename OpType>
|
2020-10-02 06:30:56 -04:00
|
|
|
static void printOpWithOffsetsSizesAndStrides(
|
|
|
|
OpAsmPrinter &p, OpType op,
|
|
|
|
llvm::function_ref<void(OpAsmPrinter &p, OpType op)> printExtraOperands =
|
|
|
|
[](OpAsmPrinter &p, OpType op) {},
|
2020-10-02 10:02:53 -04:00
|
|
|
StringRef resultTypeKeyword = "to") {
|
2020-05-12 17:17:34 -04:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
|
|
|
p << op.getOperation()->getName().getStringRef().drop_front(stdDotLen) << ' ';
|
2020-10-02 06:30:56 -04:00
|
|
|
p << op.source();
|
|
|
|
printExtraOperands(p, op);
|
2020-10-22 14:48:52 +02:00
|
|
|
printListOfOperandsOrIntegers(p, op.offsets(), op.static_offsets(),
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
p << ' ';
|
|
|
|
printListOfOperandsOrIntegers(p, op.sizes(), op.static_sizes(),
|
|
|
|
ShapedType::isDynamic);
|
|
|
|
p << ' ';
|
|
|
|
printListOfOperandsOrIntegers(p, op.strides(), op.static_strides(),
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
p << ' ';
|
2020-05-12 17:17:34 -04:00
|
|
|
p.printOptionalAttrDict(op.getAttrs(),
|
2020-10-02 05:32:35 -04:00
|
|
|
/*elidedAttrs=*/{OpType::getSpecialAttrNames()});
|
2020-10-02 06:30:56 -04:00
|
|
|
p << " : " << op.getSourceType() << " " << resultTypeKeyword << " "
|
|
|
|
<< op.getType();
|
2020-05-12 17:17:34 -04:00
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
static void print(OpAsmPrinter &p, SubViewOp op) {
|
|
|
|
return printOpWithOffsetsSizesAndStrides<SubViewOp>(p, op);
|
|
|
|
}
|
|
|
|
|
2020-10-02 06:30:56 -04:00
|
|
|
/// Parse of the form:
|
2020-05-12 17:17:34 -04:00
|
|
|
/// ```
|
2020-10-02 06:30:56 -04:00
|
|
|
/// `name` ssa-name (extra-operands)?
|
|
|
|
/// `[` offset-list `]` `[` size-list `]` `[` stride-list `]`
|
|
|
|
/// `:` strided-memref-type `resultTypeKeyword strided-memref-type
|
2020-05-12 17:17:34 -04:00
|
|
|
/// ```
|
2020-10-02 05:32:35 -04:00
|
|
|
template <typename OpType>
|
2020-10-02 06:30:56 -04:00
|
|
|
static ParseResult parseOpWithOffsetsSizesAndStrides(
|
|
|
|
OpAsmParser &parser, OperationState &result,
|
|
|
|
std::function<ParseResult(OpAsmParser &p,
|
|
|
|
OpAsmParser::OperandType &dstInfo)>
|
|
|
|
parseExtraOperand = nullptr,
|
2020-10-02 10:02:53 -04:00
|
|
|
StringRef resultTypeKeyword = "to") {
|
2020-10-02 06:30:56 -04:00
|
|
|
OpAsmParser::OperandType srcInfo, dstInfo;
|
2020-05-12 17:17:34 -04:00
|
|
|
SmallVector<OpAsmParser::OperandType, 4> offsetsInfo, sizesInfo, stridesInfo;
|
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
|
|
|
Type srcType, dstType;
|
|
|
|
if (parser.parseOperand(srcInfo))
|
|
|
|
return failure();
|
2020-10-02 06:30:56 -04:00
|
|
|
if (parseExtraOperand && parseExtraOperand(parser, dstInfo))
|
|
|
|
return failure();
|
2020-05-12 17:17:34 -04:00
|
|
|
if (parseListOfOperandsOrIntegers(
|
2020-10-02 05:32:35 -04:00
|
|
|
parser, result, OpType::getStaticOffsetsAttrName(),
|
2020-05-12 17:17:34 -04:00
|
|
|
ShapedType::kDynamicStrideOrOffset, offsetsInfo) ||
|
|
|
|
parseListOfOperandsOrIntegers(parser, result,
|
2020-10-02 05:32:35 -04:00
|
|
|
OpType::getStaticSizesAttrName(),
|
2020-05-12 17:17:34 -04:00
|
|
|
ShapedType::kDynamicSize, sizesInfo) ||
|
|
|
|
parseListOfOperandsOrIntegers(
|
2020-10-02 05:32:35 -04:00
|
|
|
parser, result, OpType::getStaticStridesAttrName(),
|
2020-05-12 17:17:34 -04:00
|
|
|
ShapedType::kDynamicStrideOrOffset, stridesInfo))
|
|
|
|
return failure();
|
|
|
|
|
2020-10-02 06:30:56 -04:00
|
|
|
// Handle segment sizes.
|
2020-05-12 17:17:34 -04:00
|
|
|
auto b = parser.getBuilder();
|
2020-10-02 06:30:56 -04:00
|
|
|
SmallVector<int, 4> segmentSizes = {1, static_cast<int>(offsetsInfo.size()),
|
|
|
|
static_cast<int>(sizesInfo.size()),
|
|
|
|
static_cast<int>(stridesInfo.size())};
|
|
|
|
// If we parse an extra operand it needs to appear in the segmentSizes
|
|
|
|
if (parseExtraOperand)
|
|
|
|
segmentSizes.insert(segmentSizes.begin(), 1);
|
2020-10-02 05:32:35 -04:00
|
|
|
result.addAttribute(OpType::getOperandSegmentSizeAttr(),
|
2020-05-12 17:17:34 -04:00
|
|
|
b.getI32VectorAttr(segmentSizes));
|
|
|
|
|
|
|
|
return failure(
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(srcType) ||
|
2020-10-02 06:30:56 -04:00
|
|
|
parser.parseKeywordType(resultTypeKeyword.str().c_str(), dstType) ||
|
2020-05-12 17:17:34 -04:00
|
|
|
parser.resolveOperand(srcInfo, srcType, result.operands) ||
|
2020-10-02 06:30:56 -04:00
|
|
|
(parseExtraOperand &&
|
|
|
|
parser.resolveOperand(dstInfo, dstType, result.operands)) ||
|
2020-05-12 17:17:34 -04:00
|
|
|
parser.resolveOperands(offsetsInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(sizesInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(stridesInfo, indexType, result.operands) ||
|
|
|
|
parser.addTypeToList(dstType, result.types));
|
2019-05-16 12:51:45 -07:00
|
|
|
}
|
2019-03-20 17:25:34 -07:00
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
static ParseResult parseSubViewOp(OpAsmParser &parser, OperationState &result) {
|
|
|
|
return parseOpWithOffsetsSizesAndStrides<SubViewOp>(parser, result);
|
|
|
|
}
|
|
|
|
|
[mlir] Revisit std.subview handling of static information.
Summary:
The main objective of this revision is to change the way static information is represented, propagated and canonicalized in the SubViewOp.
In the current implementation the issue is that canonicalization may strictly lose information because static offsets are combined in irrecoverable ways into the result type, in order to fit the strided memref representation.
The core semantics of the op do not change but the parser and printer do: the op always requires `rank` offsets, sizes and strides. These quantities can now be either SSA values or static integer attributes.
The result type is automatically deduced from the static information and more powerful canonicalizations (as powerful as the representation with sentinel `?` values allows). Previously static information was inferred on a best-effort basis from looking at the source and destination type.
Relevant tests are rewritten to use the idiomatic `offset: x, strides : [...]`-form. Bugs are corrected along the way that were not trivially visible in flattened strided memref form.
It is an open question, and a longer discussion, whether a better result type representation would be a nicer alternative. For now, the subview op carries the required semantic.
Reviewers: ftynse, mravishankar, antiagainst, rriddle!, andydavis1, timshen, asaadaldien, stellaraccident
Reviewed By: mravishankar
Subscribers: aartbik, bondhugula, mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, liufengdb, stephenneuendorffer, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D79662
2020-05-11 17:38:20 -04:00
|
|
|
void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
|
2020-05-12 17:17:34 -04:00
|
|
|
ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides, ValueRange offsets,
|
|
|
|
ValueRange sizes, ValueRange strides,
|
[mlir] Revisit std.subview handling of static information.
Summary:
The main objective of this revision is to change the way static information is represented, propagated and canonicalized in the SubViewOp.
In the current implementation the issue is that canonicalization may strictly lose information because static offsets are combined in irrecoverable ways into the result type, in order to fit the strided memref representation.
The core semantics of the op do not change but the parser and printer do: the op always requires `rank` offsets, sizes and strides. These quantities can now be either SSA values or static integer attributes.
The result type is automatically deduced from the static information and more powerful canonicalizations (as powerful as the representation with sentinel `?` values allows). Previously static information was inferred on a best-effort basis from looking at the source and destination type.
Relevant tests are rewritten to use the idiomatic `offset: x, strides : [...]`-form. Bugs are corrected along the way that were not trivially visible in flattened strided memref form.
It is an open question, and a longer discussion, whether a better result type representation would be a nicer alternative. For now, the subview op carries the required semantic.
Reviewers: ftynse, mravishankar, antiagainst, rriddle!, andydavis1, timshen, asaadaldien, stellaraccident
Reviewed By: mravishankar
Subscribers: aartbik, bondhugula, mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, liufengdb, stephenneuendorffer, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D79662
2020-05-11 17:38:20 -04:00
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
2020-05-12 17:17:34 -04:00
|
|
|
auto sourceMemRefType = source.getType().cast<MemRefType>();
|
2020-10-02 05:32:35 -04:00
|
|
|
auto resultType = inferResultType(sourceMemRefType, staticOffsets,
|
|
|
|
staticSizes, staticStrides);
|
2020-05-12 17:17:34 -04:00
|
|
|
build(b, result, resultType, source, offsets, sizes, strides,
|
|
|
|
b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes),
|
|
|
|
b.getI64ArrayAttr(staticStrides));
|
2020-05-12 15:18:50 +02:00
|
|
|
result.addAttributes(attrs);
|
[mlir] Revisit std.subview handling of static information.
Summary:
The main objective of this revision is to change the way static information is represented, propagated and canonicalized in the SubViewOp.
In the current implementation the issue is that canonicalization may strictly lose information because static offsets are combined in irrecoverable ways into the result type, in order to fit the strided memref representation.
The core semantics of the op do not change but the parser and printer do: the op always requires `rank` offsets, sizes and strides. These quantities can now be either SSA values or static integer attributes.
The result type is automatically deduced from the static information and more powerful canonicalizations (as powerful as the representation with sentinel `?` values allows). Previously static information was inferred on a best-effort basis from looking at the source and destination type.
Relevant tests are rewritten to use the idiomatic `offset: x, strides : [...]`-form. Bugs are corrected along the way that were not trivially visible in flattened strided memref form.
It is an open question, and a longer discussion, whether a better result type representation would be a nicer alternative. For now, the subview op carries the required semantic.
Reviewers: ftynse, mravishankar, antiagainst, rriddle!, andydavis1, timshen, asaadaldien, stellaraccident
Reviewed By: mravishankar
Subscribers: aartbik, bondhugula, mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, liufengdb, stephenneuendorffer, Joonsoo, bader, grosul1, frgossen, Kayjukh, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D79662
2020-05-11 17:38:20 -04:00
|
|
|
}
|
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
/// Build a SubViewOp with all dynamic entries: `staticOffsets`, `staticSizes`
|
|
|
|
/// and `staticStrides` are automatically filled with source-memref-rank
|
|
|
|
/// sentinel values that encode dynamic entries.
|
|
|
|
void mlir::SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
|
|
|
|
ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto sourceMemRefType = source.getType().cast<MemRefType>();
|
|
|
|
unsigned rank = sourceMemRefType.getRank();
|
|
|
|
SmallVector<int64_t, 4> staticOffsetsVector;
|
|
|
|
staticOffsetsVector.assign(rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
SmallVector<int64_t, 4> staticSizesVector;
|
|
|
|
staticSizesVector.assign(rank, ShapedType::kDynamicSize);
|
|
|
|
SmallVector<int64_t, 4> staticStridesVector;
|
|
|
|
staticStridesVector.assign(rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, source, staticOffsetsVector, staticSizesVector,
|
|
|
|
staticStridesVector, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
2020-09-30 07:42:43 +00:00
|
|
|
/// Build a SubViewOp as above but with custom result type.
|
|
|
|
void mlir::SubViewOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides, ValueRange offsets,
|
|
|
|
ValueRange sizes, ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
build(b, result, resultType, source, offsets, sizes, strides,
|
|
|
|
b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes),
|
|
|
|
b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a SubViewOp as above but with custom result type.
|
|
|
|
void mlir::SubViewOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
MemRefType resultType, Value source,
|
|
|
|
ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto sourceMemRefType = source.getType().cast<MemRefType>();
|
|
|
|
unsigned rank = sourceMemRefType.getRank();
|
|
|
|
SmallVector<int64_t, 4> staticOffsetsVector;
|
|
|
|
staticOffsetsVector.assign(rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
SmallVector<int64_t, 4> staticSizesVector;
|
|
|
|
staticSizesVector.assign(rank, ShapedType::kDynamicSize);
|
|
|
|
SmallVector<int64_t, 4> staticStridesVector;
|
|
|
|
staticStridesVector.assign(rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, resultType, source, staticOffsetsVector, staticSizesVector,
|
|
|
|
staticStridesVector, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
/// For ViewLikeOpInterface.
|
|
|
|
Value SubViewOp::getViewSource() { return source(); }
|
|
|
|
|
2020-10-08 13:28:55 +00:00
|
|
|
llvm::Optional<SmallVector<bool, 4>>
|
|
|
|
mlir::computeRankReductionMask(ArrayRef<int64_t> originalShape,
|
|
|
|
ArrayRef<int64_t> reducedShape) {
|
|
|
|
size_t originalRank = originalShape.size(), reducedRank = reducedShape.size();
|
|
|
|
SmallVector<bool, 4> mask(originalRank);
|
|
|
|
unsigned reducedIdx = 0;
|
|
|
|
for (unsigned originalIdx = 0; originalIdx < originalRank; ++originalIdx) {
|
|
|
|
// Skip matching dims greedily.
|
|
|
|
mask[originalIdx] =
|
|
|
|
(reducedIdx < reducedRank) &&
|
|
|
|
(originalShape[originalIdx] == reducedShape[reducedIdx]);
|
|
|
|
if (mask[originalIdx])
|
|
|
|
reducedIdx++;
|
|
|
|
// 1 is the only non-matching allowed.
|
|
|
|
else if (originalShape[originalIdx] != 1)
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reducedIdx != reducedRank)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2020-10-08 07:38:41 +00:00
|
|
|
enum SubViewVerificationResult {
|
|
|
|
Success,
|
|
|
|
RankTooLarge,
|
|
|
|
SizeMismatch,
|
|
|
|
StrideMismatch,
|
|
|
|
ElemTypeMismatch,
|
|
|
|
MemSpaceMismatch,
|
|
|
|
AffineMapMismatch
|
|
|
|
};
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
/// Checks if `original` Type type can be rank reduced to `reduced` type.
|
2020-09-30 07:42:43 +00:00
|
|
|
/// This function is slight variant of `is subsequence` algorithm where
|
|
|
|
/// not matching dimension must be 1.
|
2020-10-08 07:38:41 +00:00
|
|
|
static SubViewVerificationResult isRankReducedType(Type originalType,
|
|
|
|
Type reducedType) {
|
2020-09-30 07:42:43 +00:00
|
|
|
if (originalType == reducedType)
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::Success;
|
2020-10-02 05:32:35 -04:00
|
|
|
if (!originalType.isa<RankedTensorType>() && !originalType.isa<MemRefType>())
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::Success;
|
2020-10-02 05:32:35 -04:00
|
|
|
if (originalType.isa<RankedTensorType>() &&
|
|
|
|
!reducedType.isa<RankedTensorType>())
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::Success;
|
2020-10-02 05:32:35 -04:00
|
|
|
if (originalType.isa<MemRefType>() && !reducedType.isa<MemRefType>())
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::Success;
|
2020-09-30 07:42:43 +00:00
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
ShapedType originalShapedType = originalType.cast<ShapedType>();
|
|
|
|
ShapedType reducedShapedType = reducedType.cast<ShapedType>();
|
|
|
|
|
|
|
|
// Rank and size logic is valid for all ShapedTypes.
|
|
|
|
ArrayRef<int64_t> originalShape = originalShapedType.getShape();
|
|
|
|
ArrayRef<int64_t> reducedShape = reducedShapedType.getShape();
|
2020-09-30 07:42:43 +00:00
|
|
|
unsigned originalRank = originalShape.size(),
|
|
|
|
reducedRank = reducedShape.size();
|
|
|
|
if (reducedRank > originalRank)
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::RankTooLarge;
|
2020-09-30 07:42:43 +00:00
|
|
|
|
2020-10-08 13:28:55 +00:00
|
|
|
auto optionalMask = computeRankReductionMask(originalShape, reducedShape);
|
|
|
|
|
|
|
|
// Sizes cannot be matched in case empty vector is returned.
|
|
|
|
if (!optionalMask.hasValue())
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::SizeMismatch;
|
2020-09-30 07:42:43 +00:00
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
// We are done for the tensor case.
|
|
|
|
if (originalType.isa<RankedTensorType>())
|
2020-10-08 07:38:41 +00:00
|
|
|
return SubViewVerificationResult::Success;
|
2020-10-02 05:32:35 -04:00
|
|
|
|
|
|
|
// Strided layout logic is relevant for MemRefType only.
|
|
|
|
MemRefType original = originalType.cast<MemRefType>();
|
|
|
|
MemRefType reduced = reducedType.cast<MemRefType>();
|
2020-09-30 07:42:43 +00:00
|
|
|
MLIRContext *c = original.getContext();
|
2020-10-08 07:38:41 +00:00
|
|
|
int64_t originalOffset, reducedOffset;
|
|
|
|
SmallVector<int64_t, 4> originalStrides, reducedStrides, keepStrides;
|
2020-10-08 13:28:55 +00:00
|
|
|
SmallVector<bool, 4> keepMask = optionalMask.getValue();
|
2020-09-30 07:42:43 +00:00
|
|
|
getStridesAndOffset(original, originalStrides, originalOffset);
|
2020-10-08 07:38:41 +00:00
|
|
|
getStridesAndOffset(reduced, reducedStrides, reducedOffset);
|
|
|
|
|
|
|
|
// Filter strides based on the mask and check that they are the same
|
|
|
|
// as reduced ones.
|
2020-10-08 13:28:55 +00:00
|
|
|
unsigned reducedIdx = 0;
|
2020-10-08 07:38:41 +00:00
|
|
|
for (unsigned originalIdx = 0; originalIdx < originalRank; ++originalIdx) {
|
|
|
|
if (keepMask[originalIdx]) {
|
|
|
|
if (originalStrides[originalIdx] != reducedStrides[reducedIdx++])
|
|
|
|
return SubViewVerificationResult::StrideMismatch;
|
|
|
|
keepStrides.push_back(originalStrides[originalIdx]);
|
|
|
|
}
|
2020-09-30 07:42:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 07:38:41 +00:00
|
|
|
if (original.getElementType() != reduced.getElementType())
|
|
|
|
return SubViewVerificationResult::ElemTypeMismatch;
|
|
|
|
|
|
|
|
if (original.getMemorySpace() != reduced.getMemorySpace())
|
|
|
|
return SubViewVerificationResult::MemSpaceMismatch;
|
|
|
|
|
|
|
|
auto reducedMap = makeStridedLinearLayoutMap(keepStrides, originalOffset, c);
|
|
|
|
if (!reduced.getAffineMaps().empty() &&
|
|
|
|
reducedMap != reduced.getAffineMaps().front())
|
|
|
|
return SubViewVerificationResult::AffineMapMismatch;
|
|
|
|
|
|
|
|
return SubViewVerificationResult::Success;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename OpTy>
|
|
|
|
static LogicalResult produceSubViewErrorMsg(SubViewVerificationResult result,
|
|
|
|
OpTy op, Type expectedType) {
|
|
|
|
auto memrefType = expectedType.cast<ShapedType>();
|
|
|
|
switch (result) {
|
|
|
|
case SubViewVerificationResult::Success:
|
|
|
|
return success();
|
|
|
|
case SubViewVerificationResult::RankTooLarge:
|
|
|
|
return op.emitError("expected result rank to be smaller or equal to ")
|
|
|
|
<< "the source rank.";
|
|
|
|
case SubViewVerificationResult::SizeMismatch:
|
|
|
|
return op.emitError("expected result type to be ")
|
|
|
|
<< expectedType
|
|
|
|
<< " or a rank-reduced version. (mismatch of result sizes)";
|
|
|
|
case SubViewVerificationResult::StrideMismatch:
|
|
|
|
return op.emitError("expected result type to be ")
|
|
|
|
<< expectedType
|
|
|
|
<< " or a rank-reduced version. (mismatch of result strides)";
|
|
|
|
case SubViewVerificationResult::ElemTypeMismatch:
|
|
|
|
return op.emitError("expected result element type to be ")
|
|
|
|
<< memrefType.getElementType();
|
|
|
|
case SubViewVerificationResult::MemSpaceMismatch:
|
|
|
|
return op.emitError("expected result and source memory spaces to match.");
|
|
|
|
case SubViewVerificationResult::AffineMapMismatch:
|
|
|
|
return op.emitError("expected result type to be ")
|
|
|
|
<< expectedType
|
|
|
|
<< " or a rank-reduced version. (mismatch of result affine map)";
|
|
|
|
}
|
2020-10-19 19:20:16 -07:00
|
|
|
llvm_unreachable("unexpected subview verification result");
|
2020-09-30 07:42:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
/// Verifier for SubViewOp.
|
2020-03-04 09:44:36 -08:00
|
|
|
static LogicalResult verify(SubViewOp op) {
|
2020-10-02 06:30:56 -04:00
|
|
|
MemRefType baseType = op.getSourceType();
|
2020-10-02 05:32:35 -04:00
|
|
|
MemRefType subViewType = op.getType();
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2019-11-06 08:53:39 -08:00
|
|
|
// The base memref and the view memref should be in the same memory space.
|
2020-03-04 09:44:36 -08:00
|
|
|
if (baseType.getMemorySpace() != subViewType.getMemorySpace())
|
2019-11-06 08:53:39 -08:00
|
|
|
return op.emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
2020-03-04 09:44:36 -08:00
|
|
|
<< baseType << " and subview memref type " << subViewType;
|
|
|
|
|
|
|
|
// Verify that the base memref type has a strided layout map.
|
2020-05-12 17:17:34 -04:00
|
|
|
if (!isStrided(baseType))
|
|
|
|
return op.emitError("base type ") << baseType << " is not strided";
|
2020-03-04 09:44:36 -08:00
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
if (failed(verifyOpWithOffsetSizesAndStrides(op)))
|
2020-05-12 17:17:34 -04:00
|
|
|
return failure();
|
2020-05-12 15:18:50 +02:00
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
// Verify result type against inferred type.
|
2020-10-02 05:32:35 -04:00
|
|
|
auto expectedType = SubViewOp::inferResultType(
|
|
|
|
baseType, extractFromI64ArrayAttr(op.static_offsets()),
|
2020-05-12 17:17:34 -04:00
|
|
|
extractFromI64ArrayAttr(op.static_sizes()),
|
|
|
|
extractFromI64ArrayAttr(op.static_strides()));
|
2020-03-04 09:44:36 -08:00
|
|
|
|
2020-10-08 07:38:41 +00:00
|
|
|
auto result = isRankReducedType(expectedType, subViewType);
|
|
|
|
return produceSubViewErrorMsg(result, op, expectedType);
|
2019-11-06 08:53:39 -08:00
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
raw_ostream &mlir::operator<<(raw_ostream &os, Range &range) {
|
2020-03-04 09:44:36 -08:00
|
|
|
return os << "range " << range.offset << ":" << range.size << ":"
|
|
|
|
<< range.stride;
|
|
|
|
}
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
/// Return the list of Range (i.e. offset, size, stride). Each Range
|
2020-05-12 22:21:36 -04:00
|
|
|
/// entry contains either the dynamic value or a ConstantIndexOp constructed
|
|
|
|
/// with `b` at location `loc`.
|
2020-10-02 05:32:35 -04:00
|
|
|
template <typename OpType>
|
|
|
|
static SmallVector<Range, 8> getOrCreateRangesImpl(OpType op, OpBuilder &b,
|
|
|
|
Location loc) {
|
2020-05-12 22:21:36 -04:00
|
|
|
SmallVector<Range, 8> res;
|
2020-10-02 05:32:35 -04:00
|
|
|
unsigned rank = op.getSourceRank();
|
2020-05-12 22:21:36 -04:00
|
|
|
res.reserve(rank);
|
|
|
|
for (unsigned idx = 0; idx < rank; ++idx) {
|
2020-10-02 05:32:35 -04:00
|
|
|
Value offset =
|
|
|
|
op.isDynamicOffset(idx)
|
|
|
|
? op.getDynamicOffset(idx)
|
|
|
|
: b.create<ConstantIndexOp>(loc, op.getStaticOffset(idx));
|
|
|
|
Value size = op.isDynamicSize(idx)
|
|
|
|
? op.getDynamicSize(idx)
|
|
|
|
: b.create<ConstantIndexOp>(loc, op.getStaticSize(idx));
|
|
|
|
Value stride =
|
|
|
|
op.isDynamicStride(idx)
|
|
|
|
? op.getDynamicStride(idx)
|
|
|
|
: b.create<ConstantIndexOp>(loc, op.getStaticStride(idx));
|
2020-05-12 22:21:36 -04:00
|
|
|
res.emplace_back(Range{offset, size, stride});
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
SmallVector<Range, 8> SubViewOp::getOrCreateRanges(OpBuilder &b, Location loc) {
|
|
|
|
return ::getOrCreateRangesImpl(*this, b, loc);
|
2020-05-20 11:48:22 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
namespace {
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
/// Take a list of `values` with potential new constant to extract and a list
|
|
|
|
/// of `constantValues` with`values.size()` sentinel that evaluate to true by
|
|
|
|
/// applying `isDynamic`.
|
|
|
|
/// Detects the `values` produced by a ConstantIndexOp and places the new
|
|
|
|
/// constant in place of the corresponding sentinel value.
|
|
|
|
void canonicalizeSubViewPart(SmallVectorImpl<Value> &values,
|
|
|
|
SmallVectorImpl<int64_t> &constantValues,
|
|
|
|
llvm::function_ref<bool(int64_t)> isDynamic) {
|
|
|
|
bool hasNewStaticValue = llvm::any_of(
|
|
|
|
values, [](Value val) { return matchPattern(val, m_ConstantIndex()); });
|
|
|
|
if (hasNewStaticValue) {
|
|
|
|
for (unsigned cstIdx = 0, valIdx = 0, e = constantValues.size();
|
|
|
|
cstIdx != e; ++cstIdx) {
|
|
|
|
// Was already static, skip.
|
|
|
|
if (!isDynamic(constantValues[cstIdx]))
|
|
|
|
continue;
|
|
|
|
// Newly static, move from Value to constant.
|
|
|
|
if (matchPattern(values[valIdx], m_ConstantIndex())) {
|
|
|
|
constantValues[cstIdx] =
|
|
|
|
cast<ConstantIndexOp>(values[valIdx].getDefiningOp()).getValue();
|
|
|
|
// Erase for impl. simplicity. Reverse iterator if we really must.
|
|
|
|
values.erase(std::next(values.begin(), valIdx));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Remains dynamic move to next value.
|
|
|
|
++valIdx;
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
2020-05-12 17:17:34 -04:00
|
|
|
}
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
static void replaceWithNewOp(PatternRewriter &rewriter, SubViewOp op,
|
|
|
|
SubViewOp newOp) {
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(op, newOp, op.getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replaceWithNewOp(PatternRewriter &rewriter, SubTensorOp op,
|
|
|
|
SubTensorOp newOp) {
|
|
|
|
rewriter.replaceOpWithNewOp<TensorCastOp>(op, newOp, op.getType());
|
|
|
|
}
|
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
/// Pattern to rewrite a subview op with constant arguments.
|
2020-10-02 05:40:52 -04:00
|
|
|
template <typename OpType>
|
|
|
|
class OpWithOffsetSizesAndStridesConstantArgumentFolder final
|
|
|
|
: public OpRewritePattern<OpType> {
|
2020-03-04 09:44:36 -08:00
|
|
|
public:
|
2020-10-02 05:40:52 -04:00
|
|
|
using OpRewritePattern<OpType>::OpRewritePattern;
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
LogicalResult matchAndRewrite(OpType op,
|
2020-03-17 20:07:55 -07:00
|
|
|
PatternRewriter &rewriter) const override {
|
2020-05-12 17:17:34 -04:00
|
|
|
// No constant operand, just return;
|
2020-10-02 05:40:52 -04:00
|
|
|
if (llvm::none_of(op.getOperands(), [](Value operand) {
|
2020-05-12 17:17:34 -04:00
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-05-12 17:17:34 -04:00
|
|
|
// At least one of offsets/sizes/strides is a new constant.
|
|
|
|
// Form the new list of operands and constant attributes from the existing.
|
2020-10-02 05:40:52 -04:00
|
|
|
SmallVector<Value, 8> newOffsets(op.offsets());
|
2020-05-12 17:17:34 -04:00
|
|
|
SmallVector<int64_t, 8> newStaticOffsets =
|
2020-10-02 05:40:52 -04:00
|
|
|
extractFromI64ArrayAttr(op.static_offsets());
|
|
|
|
assert(newStaticOffsets.size() == op.getSourceRank());
|
2020-05-12 17:17:34 -04:00
|
|
|
canonicalizeSubViewPart(newOffsets, newStaticOffsets,
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
SmallVector<Value, 8> newSizes(op.sizes());
|
2020-05-12 17:17:34 -04:00
|
|
|
SmallVector<int64_t, 8> newStaticSizes =
|
2020-10-02 05:40:52 -04:00
|
|
|
extractFromI64ArrayAttr(op.static_sizes());
|
|
|
|
assert(newStaticOffsets.size() == op.getSourceRank());
|
2020-05-12 17:17:34 -04:00
|
|
|
canonicalizeSubViewPart(newSizes, newStaticSizes, ShapedType::isDynamic);
|
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
SmallVector<Value, 8> newStrides(op.strides());
|
2020-05-12 17:17:34 -04:00
|
|
|
SmallVector<int64_t, 8> newStaticStrides =
|
2020-10-02 05:40:52 -04:00
|
|
|
extractFromI64ArrayAttr(op.static_strides());
|
|
|
|
assert(newStaticOffsets.size() == op.getSourceRank());
|
2020-05-12 17:17:34 -04:00
|
|
|
canonicalizeSubViewPart(newStrides, newStaticStrides,
|
|
|
|
ShapedType::isDynamicStrideOrOffset);
|
|
|
|
|
|
|
|
// Create the new op in canonical form.
|
2020-10-02 05:40:52 -04:00
|
|
|
auto newOp = rewriter.create<OpType>(
|
|
|
|
op.getLoc(), op.source(), newStaticOffsets, newStaticSizes,
|
|
|
|
newStaticStrides, newOffsets, newSizes, newStrides);
|
2020-05-12 15:18:50 +02:00
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
replaceWithNewOp(rewriter, op, newOp);
|
2020-05-12 17:17:34 -04:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-07 17:46:40 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-07 08:04:33 -08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2020-05-06 09:05:15 -04:00
|
|
|
/// Determines whether MemRefCastOp casts to a more dynamic version of the
|
|
|
|
/// source memref. This is useful to to fold a memref_cast into a consuming op
|
|
|
|
/// and implement canonicalization patterns for ops in different dialects that
|
|
|
|
/// may consume the results of memref_cast operations. Such foldable memref_cast
|
|
|
|
/// operations are typically inserted as `view` and `subview` ops are
|
|
|
|
/// canonicalized, to preserve the type compatibility of their uses.
|
|
|
|
///
|
|
|
|
/// Returns true when all conditions are met:
|
|
|
|
/// 1. source and result are ranked memrefs with strided semantics and same
|
|
|
|
/// element type and rank.
|
|
|
|
/// 2. each of the source's size, offset or stride has more static information
|
|
|
|
/// than the corresponding result's size, offset or stride.
|
|
|
|
///
|
|
|
|
/// Example 1:
|
|
|
|
/// ```mlir
|
|
|
|
/// %1 = memref_cast %0 : memref<8x16xf32> to memref<?x?xf32>
|
|
|
|
/// %2 = consumer %1 ... : memref<?x?xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// may fold into:
|
|
|
|
///
|
|
|
|
/// ```mlir
|
|
|
|
/// %2 = consumer %0 ... : memref<8x16xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// Example 2:
|
|
|
|
/// ```
|
|
|
|
/// %1 = memref_cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
|
|
|
|
/// to memref<?x?xf32>
|
|
|
|
/// consumer %1 : memref<?x?xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// may fold into:
|
|
|
|
///
|
|
|
|
/// ```
|
|
|
|
/// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
|
|
|
|
/// ```
|
|
|
|
bool mlir::canFoldIntoConsumerOp(MemRefCastOp castOp) {
|
|
|
|
MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
|
|
|
|
MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
|
|
|
|
|
|
|
|
// Requires ranked MemRefType.
|
|
|
|
if (!sourceType || !resultType)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same elemental type.
|
|
|
|
if (sourceType.getElementType() != resultType.getElementType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same rank.
|
|
|
|
if (sourceType.getRank() != resultType.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only fold casts between strided memref forms.
|
|
|
|
int64_t sourceOffset, resultOffset;
|
|
|
|
SmallVector<int64_t, 4> sourceStrides, resultStrides;
|
|
|
|
if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
|
|
|
|
failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If cast is towards more static sizes along any dimension, don't fold.
|
|
|
|
for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
|
|
|
|
auto ss = std::get<0>(it), st = std::get<1>(it);
|
|
|
|
if (ss != st)
|
|
|
|
if (MemRefType::isDynamic(ss) && !MemRefType::isDynamic(st))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If cast is towards more static offset along any dimension, don't fold.
|
|
|
|
if (sourceOffset != resultOffset)
|
|
|
|
if (MemRefType::isDynamicStrideOrOffset(sourceOffset) &&
|
|
|
|
!MemRefType::isDynamicStrideOrOffset(resultOffset))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If cast is towards more static strides along any dimension, don't fold.
|
|
|
|
for (auto it : llvm::zip(sourceStrides, resultStrides)) {
|
|
|
|
auto ss = std::get<0>(it), st = std::get<1>(it);
|
|
|
|
if (ss != st)
|
|
|
|
if (MemRefType::isDynamicStrideOrOffset(ss) &&
|
|
|
|
!MemRefType::isDynamicStrideOrOffset(st))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-05 14:36:19 +00:00
|
|
|
/// Counterpart of `canFoldIntoConsumerOp(MemRefCastOp castOp)` for tensors.
|
|
|
|
/// Determines whether TensorCastOp casts to a more dynamic version of the
|
|
|
|
/// source tensor. This is useful to fold a tensor_cast into a consuming op and
|
|
|
|
/// implement canonicalization patterns for ops in different dialects that may
|
|
|
|
/// consume the results of tensor_cast operations. Such foldable tensor_cast
|
|
|
|
/// operations are typically inserted as `subtensor` ops and are canonicalized,
|
|
|
|
/// to preserve the type compatibility of their uses.
|
|
|
|
///
|
|
|
|
/// Returns true when all conditions are met:
|
|
|
|
/// 1. source and result are ranked tensors with same element type and rank.
|
|
|
|
/// 2. the tensor type has more static information than the result
|
|
|
|
///
|
|
|
|
/// Example:
|
|
|
|
/// ```mlir
|
|
|
|
/// %1 = tensor_cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
|
|
|
|
/// %2 = consumer %1 ... : tensor<?x?xf32> ...
|
|
|
|
/// ```
|
|
|
|
///
|
|
|
|
/// folds into:
|
|
|
|
///
|
|
|
|
/// ```mlir
|
|
|
|
/// %2 = consumer %0 ... : tensor<8x16xf32> ...
|
|
|
|
/// ```
|
|
|
|
bool mlir::canFoldIntoConsumerOp(TensorCastOp castOp) {
|
|
|
|
if (!castOp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
RankedTensorType sourceType =
|
|
|
|
castOp.source().getType().dyn_cast<RankedTensorType>();
|
|
|
|
RankedTensorType resultType = castOp.getType().dyn_cast<RankedTensorType>();
|
|
|
|
|
|
|
|
// Requires RankedTensorType.
|
|
|
|
if (!sourceType || !resultType)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same elemental type.
|
|
|
|
if (sourceType.getElementType() != resultType.getElementType())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Requires same rank.
|
|
|
|
if (sourceType.getRank() != resultType.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If cast is towards more static sizes along any dimension, don't fold.
|
|
|
|
for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
|
|
|
|
auto ss = std::get<0>(it), st = std::get<1>(it);
|
|
|
|
if (ss != st)
|
|
|
|
if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-19 22:16:15 +02:00
|
|
|
namespace {
|
2020-05-12 22:21:36 -04:00
|
|
|
/// Pattern to rewrite a subview op with MemRefCast arguments.
|
|
|
|
/// This essentially pushes memref_cast past its consuming subview when
|
|
|
|
/// `canFoldIntoConsumerOp` is true.
|
|
|
|
///
|
|
|
|
/// Example:
|
|
|
|
/// ```
|
|
|
|
/// %0 = memref_cast %V : memref<16x16xf32> to memref<?x?xf32>
|
|
|
|
/// %1 = subview %0[0, 0][3, 4][1, 1] :
|
|
|
|
/// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
|
|
|
|
/// ```
|
|
|
|
/// is rewritten into:
|
|
|
|
/// ```
|
|
|
|
/// %0 = subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
|
|
|
|
/// %1 = memref_cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
|
|
|
|
/// memref<3x4xf32, offset:?, strides:[?, 1]>
|
|
|
|
/// ```
|
|
|
|
class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
2020-05-06 09:05:15 -04:00
|
|
|
|
2020-05-12 22:21:36 -04:00
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
|
|
|
// Any constant operand, just return to let SubViewOpConstantFolder kick in.
|
|
|
|
if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
|
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto castOp = subViewOp.source().getDefiningOp<MemRefCastOp>();
|
|
|
|
if (!castOp)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!canFoldIntoConsumerOp(castOp))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
/// Deduce the resultType of the SubViewOp using `inferSubViewResultType` on
|
|
|
|
/// the cast source operand type and the SubViewOp static information. This
|
|
|
|
/// is the resulting type if the MemRefCastOp were folded.
|
2020-10-02 05:32:35 -04:00
|
|
|
Type resultType = SubViewOp::inferResultType(
|
2020-05-12 22:21:36 -04:00
|
|
|
castOp.source().getType().cast<MemRefType>(),
|
|
|
|
extractFromI64ArrayAttr(subViewOp.static_offsets()),
|
|
|
|
extractFromI64ArrayAttr(subViewOp.static_sizes()),
|
|
|
|
extractFromI64ArrayAttr(subViewOp.static_strides()));
|
|
|
|
Value newSubView = rewriter.create<SubViewOp>(
|
|
|
|
subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
|
|
|
|
subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
|
|
|
|
subViewOp.static_sizes(), subViewOp.static_strides());
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(subViewOp, subViewOp.getType(),
|
|
|
|
newSubView);
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
2020-05-19 22:16:15 +02:00
|
|
|
} // namespace
|
2020-05-06 09:05:15 -04:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void SubViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
2020-10-02 05:40:52 -04:00
|
|
|
results.insert<OpWithOffsetSizesAndStridesConstantArgumentFolder<SubViewOp>,
|
|
|
|
SubViewOpMemRefCastFolder>(context);
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
|
|
|
|
2020-10-15 16:27:31 +02:00
|
|
|
OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
if (getResultRank() == 0 && getSourceRank() == 0)
|
|
|
|
return getViewSource();
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-10-02 05:32:35 -04:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubTensorOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, SubTensorOp op) {
|
|
|
|
return printOpWithOffsetsSizesAndStrides<SubTensorOp>(p, op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseSubTensorOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
return parseOpWithOffsetsSizesAndStrides<SubTensorOp>(parser, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A subtensor result type can be fully inferred from the source type and the
|
|
|
|
/// static representation of offsets, sizes and strides. Special sentinels
|
|
|
|
/// encode the dynamic case.
|
|
|
|
Type SubTensorOp::inferResultType(RankedTensorType sourceRankedTensorType,
|
|
|
|
ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides) {
|
|
|
|
unsigned rank = sourceRankedTensorType.getRank();
|
|
|
|
(void)rank;
|
|
|
|
assert(staticOffsets.size() == rank &&
|
|
|
|
"unexpected staticOffsets size mismatch");
|
|
|
|
assert(staticSizes.size() == rank && "unexpected staticSizes size mismatch");
|
|
|
|
assert(staticStrides.size() == rank &&
|
|
|
|
"unexpected staticStrides size mismatch");
|
|
|
|
return RankedTensorType::get(staticSizes,
|
|
|
|
sourceRankedTensorType.getElementType());
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
Value source, ArrayRef<int64_t> staticOffsets,
|
|
|
|
ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides,
|
|
|
|
ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
|
|
|
|
auto resultType = inferResultType(sourceRankedTensorType, staticOffsets,
|
|
|
|
staticSizes, staticStrides);
|
|
|
|
build(b, result, resultType, source, offsets, sizes, strides,
|
|
|
|
b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes),
|
|
|
|
b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a SubTensorOp with all dynamic entries: `staticOffsets`, `staticSizes`
|
|
|
|
/// and `staticStrides` are automatically filled with sentinel values that
|
|
|
|
/// encode dynamic entries.
|
|
|
|
void mlir::SubTensorOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
Value source, ValueRange offsets,
|
|
|
|
ValueRange sizes, ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
|
|
|
|
unsigned rank = sourceRankedTensorType.getRank();
|
|
|
|
SmallVector<int64_t, 4> staticOffsetsVector(
|
|
|
|
rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
|
|
|
|
SmallVector<int64_t, 4> staticStridesVector(
|
|
|
|
rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, source, staticOffsetsVector, staticSizesVector,
|
|
|
|
staticStridesVector, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Range, 8> SubTensorOp::getOrCreateRanges(OpBuilder &b,
|
|
|
|
Location loc) {
|
|
|
|
return ::getOrCreateRangesImpl(*this, b, loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verifier for SubTensorOp.
|
|
|
|
static LogicalResult verify(SubTensorOp op) {
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStrides(op)))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// Verify result type against inferred type.
|
|
|
|
auto expectedType = SubTensorOp::inferResultType(
|
2020-10-02 06:30:56 -04:00
|
|
|
op.getSourceType(), extractFromI64ArrayAttr(op.static_offsets()),
|
2020-10-02 05:32:35 -04:00
|
|
|
extractFromI64ArrayAttr(op.static_sizes()),
|
|
|
|
extractFromI64ArrayAttr(op.static_strides()));
|
2020-10-08 07:38:41 +00:00
|
|
|
auto result = isRankReducedType(expectedType, op.getType());
|
|
|
|
return produceSubViewErrorMsg(result, op, expectedType);
|
2020-10-02 05:32:35 -04:00
|
|
|
}
|
|
|
|
|
2020-10-02 05:40:52 -04:00
|
|
|
void SubTensorOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results
|
|
|
|
.insert<OpWithOffsetSizesAndStridesConstantArgumentFolder<SubTensorOp>>(
|
|
|
|
context);
|
|
|
|
}
|
|
|
|
|
2020-10-02 06:30:56 -04:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubTensorInsertOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, SubTensorInsertOp op) {
|
|
|
|
return printOpWithOffsetsSizesAndStrides<SubTensorInsertOp>(
|
|
|
|
p, op,
|
|
|
|
[](OpAsmPrinter &p, SubTensorInsertOp op) { p << " into " << op.dest(); },
|
|
|
|
/*resultTypeKeyword=*/"into");
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseSubTensorInsertOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
return parseOpWithOffsetsSizesAndStrides<SubTensorInsertOp>(
|
|
|
|
parser, result,
|
|
|
|
[](OpAsmParser &parser, OpAsmParser::OperandType &dstInfo) {
|
|
|
|
return failure(parser.parseKeyword("into") ||
|
|
|
|
parser.parseOperand(dstInfo));
|
|
|
|
},
|
|
|
|
"into");
|
|
|
|
}
|
|
|
|
|
|
|
|
void mlir::SubTensorInsertOp::build(
|
|
|
|
OpBuilder &b, OperationState &result, Value source, Value dest,
|
|
|
|
ArrayRef<int64_t> staticOffsets, ArrayRef<int64_t> staticSizes,
|
|
|
|
ArrayRef<int64_t> staticStrides, ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides, ArrayRef<NamedAttribute> attrs) {
|
|
|
|
build(b, result, dest.getType(), source, dest, offsets, sizes, strides,
|
|
|
|
b.getI64ArrayAttr(staticOffsets), b.getI64ArrayAttr(staticSizes),
|
|
|
|
b.getI64ArrayAttr(staticStrides));
|
|
|
|
result.addAttributes(attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a SubViewOp with all dynamic entries: `staticOffsets`, `staticSizes`
|
|
|
|
/// and `staticStrides` are automatically filled with source-memref-rank
|
|
|
|
/// sentinel values that encode dynamic entries.
|
|
|
|
void mlir::SubTensorInsertOp::build(OpBuilder &b, OperationState &result,
|
|
|
|
Value source, Value dest,
|
|
|
|
ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
|
|
|
|
unsigned rank = sourceRankedTensorType.getRank();
|
|
|
|
SmallVector<int64_t, 4> staticOffsetsVector(
|
|
|
|
rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
SmallVector<int64_t, 4> staticSizesVector(rank, ShapedType::kDynamicSize);
|
|
|
|
SmallVector<int64_t, 4> staticStridesVector(
|
|
|
|
rank, ShapedType::kDynamicStrideOrOffset);
|
|
|
|
build(b, result, source, dest, staticOffsetsVector, staticSizesVector,
|
|
|
|
staticStridesVector, offsets, sizes, strides, attrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<Range, 8> SubTensorInsertOp::getOrCreateRanges(OpBuilder &b,
|
|
|
|
Location loc) {
|
|
|
|
return ::getOrCreateRangesImpl(*this, b, loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verifier for SubViewOp.
|
|
|
|
static LogicalResult verify(SubTensorInsertOp op) {
|
|
|
|
if (failed(verifyOpWithOffsetSizesAndStrides(op)))
|
|
|
|
return failure();
|
|
|
|
if (op.getType() != op.dest().getType())
|
|
|
|
return op.emitError("expected result type to be ") << op.dest().getType();
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-11-11 10:32:52 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// TensorCastOp
|
2019-11-11 10:32:52 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
bool TensorCastOp::areCastCompatible(Type a, Type b) {
|
|
|
|
auto aT = a.dyn_cast<TensorType>();
|
|
|
|
auto bT = b.dyn_cast<TensorType>();
|
|
|
|
if (!aT || !bT)
|
|
|
|
return false;
|
2019-11-13 12:09:40 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (aT.getElementType() != bT.getElementType())
|
|
|
|
return false;
|
2019-11-13 12:09:40 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
return succeeded(verifyCompatibleShape(aT, bT));
|
2019-11-20 11:16:37 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult TensorCastOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return impl::foldCastOp(*this);
|
2019-11-11 10:32:52 -08:00
|
|
|
}
|
|
|
|
|
2020-09-16 10:01:54 +02:00
|
|
|
/// Compute a TensorType that has the joined shape knowledge of the two
|
|
|
|
/// given TensorTypes. The element types need to match.
|
|
|
|
static TensorType joinShapes(TensorType one, TensorType two) {
|
|
|
|
assert(one.getElementType() == two.getElementType());
|
|
|
|
|
|
|
|
if (!one.hasRank())
|
|
|
|
return two;
|
|
|
|
if (!two.hasRank())
|
|
|
|
return one;
|
|
|
|
|
|
|
|
int64_t rank = one.getRank();
|
|
|
|
if (rank != two.getRank())
|
|
|
|
return {};
|
|
|
|
|
|
|
|
SmallVector<int64_t, 4> join;
|
|
|
|
join.reserve(rank);
|
|
|
|
for (int64_t i = 0; i < rank; ++i) {
|
|
|
|
if (one.isDynamicDim(i)) {
|
|
|
|
join.push_back(two.getDimSize(i));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (two.isDynamicDim(i)) {
|
|
|
|
join.push_back(one.getDimSize(i));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (one.getDimSize(i) != two.getDimSize(i))
|
|
|
|
return {};
|
|
|
|
join.push_back(one.getDimSize(i));
|
|
|
|
}
|
|
|
|
return RankedTensorType::get(join, one.getElementType());
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// Replaces chains of two tensor_cast operations by a single tensor_cast
|
|
|
|
/// operation if doing so does not remove runtime constraints.
|
|
|
|
struct ChainedTensorCast : public OpRewritePattern<TensorCastOp> {
|
|
|
|
using OpRewritePattern<TensorCastOp>::OpRewritePattern;
|
|
|
|
|
|
|
|
LogicalResult matchAndRewrite(TensorCastOp tensorCast,
|
|
|
|
PatternRewriter &rewriter) const final {
|
|
|
|
auto tensorCastOperand =
|
|
|
|
tensorCast.getOperand().getDefiningOp<TensorCastOp>();
|
|
|
|
|
|
|
|
if (!tensorCastOperand)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
auto sourceType =
|
|
|
|
tensorCastOperand.getOperand().getType().cast<TensorType>();
|
|
|
|
auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
|
|
|
|
auto resultType = tensorCast.getType().cast<TensorType>();
|
|
|
|
|
|
|
|
// We can remove the intermediate cast if joining all three produces the
|
|
|
|
// same result as just joining the source and result shapes.
|
|
|
|
auto firstJoin =
|
|
|
|
joinShapes(joinShapes(sourceType, intermediateType), resultType);
|
|
|
|
|
|
|
|
// The join might not exist if the cast sequence would fail at runtime.
|
|
|
|
if (!firstJoin)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
// The newJoin always exists if the above join exists, it might just contain
|
|
|
|
// less information. If so, we cannot drop the intermediate cast, as doing
|
|
|
|
// so would remove runtime checks.
|
|
|
|
auto newJoin = joinShapes(sourceType, resultType);
|
|
|
|
if (firstJoin != newJoin)
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
rewriter.replaceOpWithNewOp<TensorCastOp>(tensorCast, resultType,
|
|
|
|
tensorCastOperand.getOperand());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
void TensorCastOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
|
|
|
results.insert<ChainedTensorCast>(context);
|
|
|
|
}
|
|
|
|
|
2020-09-30 19:19:04 +02:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-10-14 11:26:22 -07:00
|
|
|
// TensorLoadOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult TensorLoadOp::fold(ArrayRef<Attribute>) {
|
|
|
|
if (auto tensorToMemref = memref().getDefiningOp<TensorToMemrefOp>())
|
|
|
|
return tensorToMemref.tensor();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TensorToMemrefOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult TensorToMemrefOp::fold(ArrayRef<Attribute>) {
|
|
|
|
if (auto tensorLoad = tensor().getDefiningOp<TensorLoadOp>())
|
|
|
|
if (tensorLoad.memref().getType() == getType())
|
|
|
|
return tensorLoad.memref();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-09-30 19:19:04 +02:00
|
|
|
// TransposeOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Build a strided memref type by applying `permutationMap` tp `memRefType`.
|
|
|
|
static MemRefType inferTransposeResultType(MemRefType memRefType,
|
|
|
|
AffineMap permutationMap) {
|
|
|
|
auto rank = memRefType.getRank();
|
|
|
|
auto originalSizes = memRefType.getShape();
|
|
|
|
// Compute permuted sizes.
|
|
|
|
SmallVector<int64_t, 4> sizes(rank, 0);
|
|
|
|
for (auto en : llvm::enumerate(permutationMap.getResults()))
|
|
|
|
sizes[en.index()] =
|
|
|
|
originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
|
|
|
|
|
|
|
|
// Compute permuted strides.
|
|
|
|
int64_t offset;
|
|
|
|
SmallVector<int64_t, 4> strides;
|
|
|
|
auto res = getStridesAndOffset(memRefType, strides, offset);
|
|
|
|
assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
|
|
|
|
(void)res;
|
|
|
|
auto map =
|
|
|
|
makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
|
|
|
|
map = permutationMap ? map.compose(permutationMap) : map;
|
|
|
|
return MemRefType::Builder(memRefType).setShape(sizes).setAffineMaps(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
|
|
|
|
AffineMapAttr permutation,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
auto permutationMap = permutation.getValue();
|
|
|
|
assert(permutationMap);
|
|
|
|
|
|
|
|
auto memRefType = in.getType().cast<MemRefType>();
|
|
|
|
// Compute result type.
|
|
|
|
MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
|
|
|
|
|
|
|
|
build(b, result, resultType, in, attrs);
|
|
|
|
result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
|
|
|
|
}
|
|
|
|
|
|
|
|
// transpose $in $permutation attr-dict : type($in) `to` type(results)
|
|
|
|
static void print(OpAsmPrinter &p, TransposeOp op) {
|
|
|
|
p << "transpose " << op.in() << " " << op.permutation();
|
|
|
|
p.printOptionalAttrDict(op.getAttrs(),
|
|
|
|
{TransposeOp::getPermutationAttrName()});
|
|
|
|
p << " : " << op.in().getType() << " to " << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseTransposeOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
OpAsmParser::OperandType in;
|
|
|
|
AffineMap permutation;
|
|
|
|
MemRefType srcType, dstType;
|
|
|
|
if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(srcType) ||
|
|
|
|
parser.resolveOperand(in, srcType, result.operands) ||
|
|
|
|
parser.parseKeywordType("to", dstType) ||
|
|
|
|
parser.addTypeToList(dstType, result.types))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
result.addAttribute(TransposeOp::getPermutationAttrName(),
|
|
|
|
AffineMapAttr::get(permutation));
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(TransposeOp op) {
|
|
|
|
if (!op.permutation().isPermutation())
|
|
|
|
return op.emitOpError("expected a permutation map");
|
|
|
|
if (op.permutation().getNumDims() != op.getShapedType().getRank())
|
|
|
|
return op.emitOpError(
|
|
|
|
"expected a permutation map of same rank as the input");
|
|
|
|
|
|
|
|
auto srcType = op.in().getType().cast<MemRefType>();
|
|
|
|
auto dstType = op.getType().cast<MemRefType>();
|
|
|
|
auto transposedType = inferTransposeResultType(srcType, op.permutation());
|
|
|
|
if (dstType != transposedType)
|
|
|
|
return op.emitOpError("output type ")
|
|
|
|
<< dstType << " does not match transposed input type " << srcType
|
|
|
|
<< ", " << transposedType;
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
|
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TruncateIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-02 07:51:27 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static LogicalResult verify(TruncateIOp op) {
|
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (srcType.cast<IntegerType>().getWidth() <=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("operand type ")
|
|
|
|
<< srcType << " must be wider than result type " << dstType;
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2019-11-11 10:32:52 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnsignedDivIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
|
|
|
|
|
|
|
// Don't fold if it would require a division by zero.
|
|
|
|
bool div0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (div0 || !b) {
|
|
|
|
div0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
return a.udiv(b);
|
|
|
|
});
|
2020-04-27 19:59:16 +00:00
|
|
|
|
|
|
|
// Fold out division by one. Assumes all tensors of all ones are splats.
|
|
|
|
if (auto rhs = operands[1].dyn_cast_or_null<IntegerAttr>()) {
|
|
|
|
if (rhs.getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
} else if (auto rhs = operands[1].dyn_cast_or_null<SplatElementsAttr>()) {
|
|
|
|
if (rhs.getSplatValue<IntegerAttr>().getValue() == 1)
|
|
|
|
return lhs();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
return div0 ? Attribute() : result;
|
2019-11-13 12:09:40 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnsignedRemIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult UnsignedRemIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "remi_unsigned takes two operands");
|
|
|
|
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!rhs)
|
|
|
|
return {};
|
|
|
|
auto rhsValue = rhs.getValue();
|
|
|
|
|
|
|
|
// x % 1 = 0
|
|
|
|
if (rhsValue.isOneValue())
|
|
|
|
return IntegerAttr::get(rhs.getType(), APInt(rhsValue.getBitWidth(), 0));
|
|
|
|
|
|
|
|
// Don't fold if it requires division by zero.
|
|
|
|
if (rhsValue.isNullValue())
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs)
|
|
|
|
return {};
|
|
|
|
return IntegerAttr::get(lhs.getType(), lhs.getValue().urem(rhsValue));
|
2019-11-13 12:09:40 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ViewOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-03 16:05:46 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
|
|
|
|
OpAsmParser::OperandType srcInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
|
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
|
|
|
Type srcType, dstType;
|
|
|
|
llvm::SMLoc offsetLoc;
|
|
|
|
if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
|
|
|
|
parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
|
2019-12-03 16:05:46 -08:00
|
|
|
return failure();
|
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
if (offsetInfo.size() != 1)
|
|
|
|
return parser.emitError(offsetLoc) << "expects 1 offset operand";
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
return failure(
|
|
|
|
parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(srcType) ||
|
|
|
|
parser.resolveOperand(srcInfo, srcType, result.operands) ||
|
|
|
|
parser.resolveOperands(offsetInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(sizesInfo, indexType, result.operands) ||
|
|
|
|
parser.parseKeywordType("to", dstType) ||
|
|
|
|
parser.addTypeToList(dstType, result.types));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, ViewOp op) {
|
|
|
|
p << op.getOperationName() << ' ' << op.getOperand(0) << '[';
|
2020-05-11 12:09:18 -04:00
|
|
|
p.printOperand(op.byte_shift());
|
|
|
|
p << "][" << op.sizes() << ']';
|
2020-03-04 09:44:36 -08:00
|
|
|
p.printOptionalAttrDict(op.getAttrs());
|
|
|
|
p << " : " << op.getOperand(0).getType() << " to " << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(ViewOp op) {
|
|
|
|
auto baseType = op.getOperand(0).getType().cast<MemRefType>();
|
2020-05-11 12:09:18 -04:00
|
|
|
auto viewType = op.getType();
|
[MLIR] Add std.assume_alignment op.
Reviewers: ftynse, nicolasvasilache, andydavis1
Subscribers: bixia, sanjoy.google, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74378
2020-02-10 19:44:42 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// The base memref should have identity layout map (or none).
|
|
|
|
if (baseType.getAffineMaps().size() > 1 ||
|
|
|
|
(baseType.getAffineMaps().size() == 1 &&
|
|
|
|
!baseType.getAffineMaps()[0].isIdentity()))
|
|
|
|
return op.emitError("unsupported map for base memref type ") << baseType;
|
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
// The result memref should have identity layout map (or none).
|
|
|
|
if (viewType.getAffineMaps().size() > 1 ||
|
|
|
|
(viewType.getAffineMaps().size() == 1 &&
|
|
|
|
!viewType.getAffineMaps()[0].isIdentity()))
|
|
|
|
return op.emitError("unsupported map for result memref type ") << viewType;
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// The base memref and the view memref should be in the same memory space.
|
|
|
|
if (baseType.getMemorySpace() != viewType.getMemorySpace())
|
|
|
|
return op.emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
|
|
|
<< baseType << " and view memref type " << viewType;
|
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
// Verify that we have the correct number of sizes for the result type.
|
2020-03-04 09:44:36 -08:00
|
|
|
unsigned numDynamicDims = viewType.getNumDynamicDims();
|
2020-05-11 12:09:18 -04:00
|
|
|
if (op.sizes().size() != numDynamicDims)
|
|
|
|
return op.emitError("incorrect number of size operands for type ")
|
2020-03-04 09:44:36 -08:00
|
|
|
<< viewType;
|
2020-05-11 12:09:18 -04:00
|
|
|
|
[MLIR] Add std.assume_alignment op.
Reviewers: ftynse, nicolasvasilache, andydavis1
Subscribers: bixia, sanjoy.google, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74378
2020-02-10 19:44:42 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-04-22 11:16:34 -04:00
|
|
|
Value ViewOp::getViewSource() { return source(); }
|
|
|
|
|
2019-11-14 12:22:28 -08:00
|
|
|
namespace {
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
// Return if none of the operands are constants.
|
|
|
|
if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
|
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Get result memref type.
|
|
|
|
auto memrefType = viewOp.getType();
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Get offset from old memref view type 'memRefType'.
|
|
|
|
int64_t oldOffset;
|
|
|
|
SmallVector<int64_t, 4> oldStrides;
|
|
|
|
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-05-11 12:09:18 -04:00
|
|
|
assert(oldOffset == 0 && "Expected 0 offset");
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
SmallVector<Value, 4> newOperands;
|
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
// Offset cannot be folded into result type.
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
// Fold any dynamic dim operands which are produced by a constant.
|
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
unsigned dynamicDimPos = 0;
|
2020-03-04 09:44:36 -08:00
|
|
|
unsigned rank = memrefType.getRank();
|
|
|
|
for (unsigned dim = 0, e = rank; dim < e; ++dim) {
|
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (!ShapedType::isDynamic(dimSize)) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
2020-05-11 12:09:18 -04:00
|
|
|
auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
|
2020-03-04 09:44:36 -08:00
|
|
|
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
|
|
|
|
// Dynamic shape dimension will be folded.
|
|
|
|
newShapeConstants.push_back(constantIndexOp.getValue());
|
|
|
|
} else {
|
|
|
|
// Dynamic shape dimension not folded; copy operand from old memref.
|
|
|
|
newShapeConstants.push_back(dimSize);
|
2020-05-11 12:09:18 -04:00
|
|
|
newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
2019-11-22 11:41:29 -08:00
|
|
|
}
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-05-11 12:09:18 -04:00
|
|
|
// Create new memref type with constant folded dims.
|
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(memrefType).setShape(newShapeConstants);
|
|
|
|
// Nothing new, don't fold.
|
|
|
|
if (newMemRefType == memrefType)
|
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
// Create new ViewOp.
|
|
|
|
auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
|
2020-05-11 12:09:18 -04:00
|
|
|
viewOp.getOperand(0),
|
|
|
|
viewOp.byte_shift(), newOperands);
|
2020-03-04 09:44:36 -08:00
|
|
|
// Insert a cast so we have the same type as the old memref type.
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(viewOp, newViewOp,
|
|
|
|
viewOp.getType());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-22 11:41:29 -08:00
|
|
|
}
|
|
|
|
};
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
Value memrefOperand = viewOp.getOperand(0);
|
2020-05-09 17:52:35 -07:00
|
|
|
MemRefCastOp memrefCastOp = memrefOperand.getDefiningOp<MemRefCastOp>();
|
2020-03-04 09:44:36 -08:00
|
|
|
if (!memrefCastOp)
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
Value allocOperand = memrefCastOp.getOperand();
|
2020-05-09 17:52:35 -07:00
|
|
|
AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
|
2020-03-04 09:44:36 -08:00
|
|
|
if (!allocOp)
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
|
2020-05-11 12:09:18 -04:00
|
|
|
viewOp.byte_shift(), viewOp.sizes());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void ViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// XOrOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult XOrOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// xor(x, 0) -> x
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
|
|
|
/// xor(x,x) -> 0
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return Builder(getContext()).getZeroAttr(getType());
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a ^ b; });
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
|
|
|
|
2019-09-21 16:14:07 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ZeroExtendIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(ZeroExtendIOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
2019-09-21 16:14:07 -07:00
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
|
|
|
|
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
|
|
|
|
|
|
|
if (srcType.cast<IntegerType>().getWidth() >=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("result type ")
|
|
|
|
<< dstType << " must be wider than operand type " << srcType;
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-03-20 17:25:34 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TableGen'd op method definitions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define GET_OP_CLASSES
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.cpp.inc"
|