2019-03-01 13:48:24 -08:00
|
|
|
//===- Ops.cpp - Standard MLIR Operations ---------------------------------===//
|
2018-07-05 09:12:11 -07:00
|
|
|
//
|
2020-01-26 03:58:30 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-23 09:35:36 -08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-07-05 09:12:11 -07:00
|
|
|
//
|
2019-12-23 09:35:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-07-05 09:12:11 -07:00
|
|
|
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
2019-04-27 20:55:38 -07:00
|
|
|
|
2019-12-08 06:25:17 -08:00
|
|
|
#include "mlir/Dialect/CommonFolders.h"
|
2018-09-24 10:23:02 -07:00
|
|
|
#include "mlir/IR/AffineExpr.h"
|
2018-07-24 10:13:31 -07:00
|
|
|
#include "mlir/IR/AffineMap.h"
|
2018-07-25 11:15:20 -07:00
|
|
|
#include "mlir/IR/Builders.h"
|
2019-07-11 11:41:04 -07:00
|
|
|
#include "mlir/IR/Function.h"
|
2018-10-29 10:22:49 -07:00
|
|
|
#include "mlir/IR/Matchers.h"
|
2019-05-22 13:41:23 -07:00
|
|
|
#include "mlir/IR/Module.h"
|
2018-07-24 16:07:22 -07:00
|
|
|
#include "mlir/IR/OpImplementation.h"
|
2018-10-25 16:44:04 -07:00
|
|
|
#include "mlir/IR/PatternMatch.h"
|
2019-01-03 14:29:52 -08:00
|
|
|
#include "mlir/IR/StandardTypes.h"
|
2019-09-21 16:14:07 -07:00
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
2018-12-27 14:35:10 -08:00
|
|
|
#include "mlir/IR/Value.h"
|
2018-10-03 10:07:54 -07:00
|
|
|
#include "mlir/Support/MathExtras.h"
|
2018-08-09 12:28:58 -07:00
|
|
|
#include "mlir/Support/STLExtras.h"
|
2019-09-05 12:23:45 -07:00
|
|
|
#include "mlir/Transforms/InliningUtils.h"
|
2018-11-08 04:02:00 -08:00
|
|
|
#include "llvm/ADT/StringSwitch.h"
|
2019-04-27 20:55:38 -07:00
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
2018-07-05 09:12:11 -07:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2019-11-07 06:32:39 -08:00
|
|
|
|
2019-11-15 10:16:33 -08:00
|
|
|
// Pull in all enum type definitions and utility function declarations.
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/OpsEnums.cpp.inc"
|
2019-11-15 10:16:33 -08:00
|
|
|
|
2018-07-05 09:12:11 -07:00
|
|
|
using namespace mlir;
|
|
|
|
|
2019-08-21 16:50:30 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StandardOpsDialect Interfaces
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
namespace {
|
2019-09-05 12:23:45 -07:00
|
|
|
/// This class defines the interface for handling inlining with standard
|
|
|
|
/// operations.
|
|
|
|
struct StdInlinerInterface : public DialectInlinerInterface {
|
|
|
|
using DialectInlinerInterface::DialectInlinerInterface;
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Analysis Hooks
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// All operations within standard ops can be inlined.
|
|
|
|
bool isLegalToInline(Operation *, Region *,
|
|
|
|
BlockAndValueMapping &) const final {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
// Transformation Hooks
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Handle the given inlined terminator by replacing it with a new operation
|
|
|
|
/// as necessary.
|
|
|
|
void handleTerminator(Operation *op, Block *newDest) const final {
|
|
|
|
// Only "std.return" needs to be handled here.
|
|
|
|
auto returnOp = dyn_cast<ReturnOp>(op);
|
|
|
|
if (!returnOp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Replace the return with a branch to the dest.
|
|
|
|
OpBuilder builder(op);
|
2019-12-07 10:35:01 -08:00
|
|
|
builder.create<BranchOp>(op->getLoc(), newDest, returnOp.getOperands());
|
2019-09-05 12:23:45 -07:00
|
|
|
op->erase();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Handle the given inlined terminator by replacing it with a new operation
|
|
|
|
/// as necessary.
|
|
|
|
void handleTerminator(Operation *op,
|
2019-12-23 14:45:01 -08:00
|
|
|
ArrayRef<Value> valuesToRepl) const final {
|
2019-09-05 12:23:45 -07:00
|
|
|
// Only "std.return" needs to be handled here.
|
|
|
|
auto returnOp = cast<ReturnOp>(op);
|
|
|
|
|
|
|
|
// Replace the values directly with the return operands.
|
|
|
|
assert(returnOp.getNumOperands() == valuesToRepl.size());
|
|
|
|
for (const auto &it : llvm::enumerate(returnOp.getOperands()))
|
2020-01-11 08:54:04 -08:00
|
|
|
valuesToRepl[it.index()].replaceAllUsesWith(it.value());
|
2019-09-05 12:23:45 -07:00
|
|
|
}
|
|
|
|
};
|
2019-08-21 16:50:30 -07:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2018-10-21 19:49:31 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StandardOpsDialect
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-10-11 05:13:18 -07:00
|
|
|
/// A custom unary operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
|
|
|
static void printStandardUnaryOp(Operation *op, OpAsmPrinter &p) {
|
|
|
|
assert(op->getNumOperands() == 1 && "unary op should have one operand");
|
|
|
|
assert(op->getNumResults() == 1 && "unary op should have one result");
|
|
|
|
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0);
|
2019-10-11 05:13:18 -07:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << op->getOperand(0).getType();
|
2019-10-11 05:13:18 -07:00
|
|
|
}
|
|
|
|
|
2019-03-02 18:03:03 -08:00
|
|
|
/// A custom binary operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
2019-09-20 20:43:02 -07:00
|
|
|
static void printStandardBinaryOp(Operation *op, OpAsmPrinter &p) {
|
2019-03-02 18:03:03 -08:00
|
|
|
assert(op->getNumOperands() == 2 && "binary op should have two operands");
|
|
|
|
assert(op->getNumResults() == 1 && "binary op should have one result");
|
|
|
|
|
|
|
|
// If not all the operand and result types are the same, just use the
|
|
|
|
// generic assembly form to avoid omitting information in printing.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto resultType = op->getResult(0).getType();
|
|
|
|
if (op->getOperand(0).getType() != resultType ||
|
|
|
|
op->getOperand(1).getType() != resultType) {
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printGenericOp(op);
|
2019-03-02 18:03:03 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0) << ", " << op->getOperand(1);
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2019-03-02 18:03:03 -08:00
|
|
|
|
|
|
|
// Now we can output only one type for all operands and the result.
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << op->getResult(0).getType();
|
2019-03-02 18:03:03 -08:00
|
|
|
}
|
|
|
|
|
2019-05-13 11:56:21 -07:00
|
|
|
/// A custom cast operation printer that omits the "std." prefix from the
|
|
|
|
/// operation names.
|
2019-09-20 20:43:02 -07:00
|
|
|
static void printStandardCastOp(Operation *op, OpAsmPrinter &p) {
|
2019-10-23 22:18:58 -07:00
|
|
|
int stdDotLen = StandardOpsDialect::getDialectNamespace().size() + 1;
|
2019-10-11 05:13:18 -07:00
|
|
|
p << op->getName().getStringRef().drop_front(stdDotLen) << ' '
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op->getOperand(0) << " : " << op->getOperand(0).getType() << " to "
|
|
|
|
<< op->getResult(0).getType();
|
2019-05-13 11:56:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A custom cast operation verifier.
|
2020-02-24 16:49:52 -08:00
|
|
|
template <typename T>
|
|
|
|
static LogicalResult verifyCastOp(T op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
auto opType = op.getOperand().getType();
|
2019-05-13 11:56:21 -07:00
|
|
|
auto resType = op.getType();
|
|
|
|
if (!T::areCastCompatible(opType, resType))
|
|
|
|
return op.emitError("operand type ") << opType << " and result type "
|
|
|
|
<< resType << " are cast incompatible";
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2018-10-21 19:49:31 -07:00
|
|
|
StandardOpsDialect::StandardOpsDialect(MLIRContext *context)
|
2019-05-26 08:28:28 -07:00
|
|
|
: Dialect(getDialectNamespace(), context) {
|
2019-07-05 05:04:53 -07:00
|
|
|
addOperations<DmaStartOp, DmaWaitOp,
|
2019-01-04 01:34:16 -08:00
|
|
|
#define GET_OP_LIST
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.cpp.inc"
|
2019-01-04 01:34:16 -08:00
|
|
|
>();
|
2019-11-20 10:19:01 -08:00
|
|
|
addInterfaces<StdInlinerInterface>();
|
2018-10-21 19:49:31 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 12:21:42 -08:00
|
|
|
/// Materialize a single constant operation from a given attribute value with
|
|
|
|
/// the desired resultant type.
|
|
|
|
Operation *StandardOpsDialect::materializeConstant(OpBuilder &builder,
|
|
|
|
Attribute value, Type type,
|
|
|
|
Location loc) {
|
|
|
|
return builder.create<ConstantOp>(loc, type, value);
|
|
|
|
}
|
|
|
|
|
2019-03-28 08:24:38 -07:00
|
|
|
void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
|
|
|
|
Operation::operand_iterator end,
|
2019-09-20 20:43:02 -07:00
|
|
|
unsigned numDims, OpAsmPrinter &p) {
|
2019-12-12 15:31:39 -08:00
|
|
|
Operation::operand_range operands(begin, end);
|
|
|
|
p << '(' << operands.take_front(numDims) << ')';
|
|
|
|
if (operands.size() != numDims)
|
|
|
|
p << '[' << operands.drop_front(numDims) << ']';
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parses dimension and symbol list, and sets 'numDims' to the number of
|
|
|
|
// dimension operands parsed.
|
|
|
|
// Returns 'false' on success and 'true' on error.
|
2019-09-20 11:36:49 -07:00
|
|
|
ParseResult mlir::parseDimAndSymbolList(OpAsmParser &parser,
|
2019-12-23 14:45:01 -08:00
|
|
|
SmallVectorImpl<Value> &operands,
|
2019-05-06 22:01:31 -07:00
|
|
|
unsigned &numDims) {
|
2019-03-01 16:58:00 -08:00
|
|
|
SmallVector<OpAsmParser::OperandType, 8> opInfos;
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperandList(opInfos, OpAsmParser::Delimiter::Paren))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2019-03-01 16:58:00 -08:00
|
|
|
// Store number of dimensions for validation by caller.
|
|
|
|
numDims = opInfos.size();
|
|
|
|
|
|
|
|
// Parse the optional symbol operands.
|
2019-09-24 12:44:11 -07:00
|
|
|
auto indexTy = parser.getBuilder().getIndexType();
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperandList(opInfos,
|
|
|
|
OpAsmParser::Delimiter::OptionalSquare) ||
|
2019-09-24 12:44:11 -07:00
|
|
|
parser.resolveOperands(opInfos, indexTy, operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Matches a ConstantIndexOp.
|
|
|
|
/// TODO: This should probably just be a general matcher that uses m_Constant
|
|
|
|
/// and checks the operation for an index type.
|
|
|
|
static detail::op_matcher<ConstantIndexOp> m_ConstantIndex() {
|
|
|
|
return detail::op_matcher<ConstantIndexOp>();
|
|
|
|
}
|
|
|
|
|
2018-10-25 16:44:04 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Common canonicalization pattern support logic
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// This is a common class used for patterns of the form
|
|
|
|
/// "someop(memrefcast) -> someop". It folds the source of any memref_cast
|
|
|
|
/// into the root operation directly.
|
2019-12-13 14:52:39 -08:00
|
|
|
static LogicalResult foldMemRefCast(Operation *op) {
|
|
|
|
bool folded = false;
|
|
|
|
for (OpOperand &operand : op->getOpOperands()) {
|
2020-01-11 08:54:04 -08:00
|
|
|
auto cast = dyn_cast_or_null<MemRefCastOp>(operand.get().getDefiningOp());
|
|
|
|
if (cast && !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
|
2019-12-13 14:52:39 -08:00
|
|
|
operand.set(cast.getOperand());
|
|
|
|
folded = true;
|
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
2019-12-13 14:52:39 -08:00
|
|
|
return success(folded);
|
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AddFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult AddFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a + b; });
|
2018-09-19 21:35:11 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AddIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult AddIOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-24 12:34:00 -08:00
|
|
|
/// addi(x, 0) -> x
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a + b; });
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-22 21:20:21 +05:30
|
|
|
// AllocOp / AllocaOp
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
template <typename AllocLikeOp>
|
|
|
|
static void printAllocLikeOp(OpAsmPrinter &p, AllocLikeOp op, StringRef name) {
|
|
|
|
static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
|
|
|
|
"applies to only alloc or alloca");
|
|
|
|
p << name;
|
2019-05-08 22:38:01 -07:00
|
|
|
|
2018-07-30 13:08:05 -07:00
|
|
|
// Print dynamic dimension operands.
|
2019-05-08 22:38:01 -07:00
|
|
|
MemRefType type = op.getType();
|
|
|
|
printDimAndSymbolList(op.operand_begin(), op.operand_end(),
|
2018-10-30 14:59:22 -07:00
|
|
|
type.getNumDynamicDims(), p);
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"map"});
|
|
|
|
p << " : " << type;
|
2018-07-30 13:08:05 -07:00
|
|
|
}
|
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
static void print(OpAsmPrinter &p, AllocOp op) {
|
|
|
|
printAllocLikeOp(p, op, "alloc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, AllocaOp op) {
|
|
|
|
printAllocLikeOp(p, op, "alloca");
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parseAllocLikeOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
2018-10-30 14:59:22 -07:00
|
|
|
MemRefType type;
|
2018-07-30 13:08:05 -07:00
|
|
|
|
2018-07-31 16:21:36 -07:00
|
|
|
// Parse the dimension operands and optional symbol operands, followed by a
|
|
|
|
// memref type.
|
2018-07-30 13:08:05 -07:00
|
|
|
unsigned numDimOperands;
|
2019-09-20 19:47:05 -07:00
|
|
|
if (parseDimAndSymbolList(parser, result.operands, numDimOperands) ||
|
2019-11-05 13:32:07 -08:00
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-20 11:36:49 -07:00
|
|
|
parser.parseColonType(type))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-07-30 13:08:05 -07:00
|
|
|
|
|
|
|
// Check numDynamicDims against number of question marks in memref type.
|
2018-09-20 09:39:55 -07:00
|
|
|
// Note: this check remains here (instead of in verify()), because the
|
|
|
|
// partition between dim operands and symbol operands is lost after parsing.
|
|
|
|
// Verification still checks that the total number of operands matches
|
|
|
|
// the number of symbols in the affine map, plus the number of dynamic
|
|
|
|
// dimensions in the memref.
|
2019-05-08 22:38:01 -07:00
|
|
|
if (numDimOperands != type.getNumDynamicDims())
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc())
|
2019-05-08 22:38:01 -07:00
|
|
|
<< "dimension operand count does not equal memref dynamic dimension "
|
|
|
|
"count";
|
2019-09-20 19:47:05 -07:00
|
|
|
result.types.push_back(type);
|
2019-05-06 22:01:31 -07:00
|
|
|
return success();
|
2018-07-30 13:08:05 -07:00
|
|
|
}
|
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
template <typename AllocLikeOp>
|
|
|
|
static LogicalResult verify(AllocLikeOp op) {
|
|
|
|
static_assert(std::is_same<AllocLikeOp, AllocOp>::value ||
|
|
|
|
std::is_same<AllocLikeOp, AllocaOp>::value,
|
|
|
|
"applies to only alloc or alloca");
|
|
|
|
auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
|
2018-09-20 09:39:55 -07:00
|
|
|
if (!memRefType)
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError("result must be a memref");
|
2018-09-20 09:39:55 -07:00
|
|
|
|
|
|
|
unsigned numSymbols = 0;
|
2018-10-30 14:59:22 -07:00
|
|
|
if (!memRefType.getAffineMaps().empty()) {
|
2018-09-20 09:39:55 -07:00
|
|
|
// Store number of symbols used in affine map (used in subsequent check).
|
2019-08-16 19:21:50 -07:00
|
|
|
AffineMap affineMap = memRefType.getAffineMaps()[0];
|
2018-10-09 16:39:24 -07:00
|
|
|
numSymbols = affineMap.getNumSymbols();
|
2018-09-20 09:39:55 -07:00
|
|
|
}
|
2019-08-16 19:21:50 -07:00
|
|
|
|
2018-09-20 09:39:55 -07:00
|
|
|
// Check that the total number of operands matches the number of symbols in
|
|
|
|
// the affine map, plus the number of dynamic dimensions specified in the
|
|
|
|
// memref type.
|
2019-08-16 19:21:50 -07:00
|
|
|
unsigned numDynamicDims = memRefType.getNumDynamicDims();
|
2019-09-27 11:57:52 -07:00
|
|
|
if (op.getNumOperands() != numDynamicDims + numSymbols)
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError(
|
2018-09-20 09:39:55 -07:00
|
|
|
"operand count does not equal dimension plus symbol operand count");
|
2019-04-02 13:09:34 -07:00
|
|
|
|
2018-10-06 17:21:53 -07:00
|
|
|
// Verify that all operands are of type Index.
|
2019-05-24 13:28:55 -07:00
|
|
|
for (auto operandType : op.getOperandTypes())
|
|
|
|
if (!operandType.isIndex())
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError("requires operands to be of type Index");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-30 13:08:05 -07:00
|
|
|
}
|
|
|
|
|
2018-10-25 16:44:04 -07:00
|
|
|
namespace {
|
2020-03-22 21:20:21 +05:30
|
|
|
/// Fold constant dimensions into an alloc like operation.
|
|
|
|
template <typename AllocLikeOp>
|
|
|
|
struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
|
|
|
|
using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2020-03-22 21:20:21 +05:30
|
|
|
LogicalResult matchAndRewrite(AllocLikeOp alloc,
|
2020-03-17 20:07:55 -07:00
|
|
|
PatternRewriter &rewriter) const override {
|
2018-10-25 16:44:04 -07:00
|
|
|
// Check to see if any dimensions operands are constants. If so, we can
|
|
|
|
// substitute and drop them.
|
2019-12-23 14:45:01 -08:00
|
|
|
if (llvm::none_of(alloc.getOperands(), [](Value operand) {
|
2019-05-25 17:22:27 -07:00
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-05-25 17:22:27 -07:00
|
|
|
auto memrefType = alloc.getType();
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
// Ok, we have one or more constant operands. Collect the non-constant ones
|
|
|
|
// and keep track of the resultant memref type to build.
|
2019-01-23 14:39:45 -08:00
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
2018-10-30 14:59:22 -07:00
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
2019-12-23 14:45:01 -08:00
|
|
|
SmallVector<Value, 4> newOperands;
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
unsigned dynamicDimPos = 0;
|
2018-10-30 14:59:22 -07:00
|
|
|
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
|
2019-01-23 14:39:45 -08:00
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
2018-10-25 16:44:04 -07:00
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (dimSize != -1) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
2020-01-11 08:54:04 -08:00
|
|
|
auto *defOp = alloc.getOperand(dynamicDimPos).getDefiningOp();
|
2019-04-05 12:24:03 -07:00
|
|
|
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
|
2018-10-25 16:44:04 -07:00
|
|
|
// Dynamic shape dimension will be folded.
|
2019-03-24 19:53:05 -07:00
|
|
|
newShapeConstants.push_back(constantIndexOp.getValue());
|
2018-10-25 16:44:04 -07:00
|
|
|
} else {
|
|
|
|
// Dynamic shape dimension not folded; copy operand from old memref.
|
|
|
|
newShapeConstants.push_back(-1);
|
2019-05-25 17:22:27 -07:00
|
|
|
newOperands.push_back(alloc.getOperand(dynamicDimPos));
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create new memref type (which will have fewer dynamic dimensions).
|
2020-01-22 13:46:11 -08:00
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(memrefType).setShape(newShapeConstants);
|
2019-05-31 16:41:21 -07:00
|
|
|
assert(static_cast<int64_t>(newOperands.size()) ==
|
|
|
|
newMemRefType.getNumDynamicDims());
|
2018-10-25 16:44:04 -07:00
|
|
|
|
|
|
|
// Create and insert the alloc op for the new memref.
|
2020-03-22 21:20:21 +05:30
|
|
|
auto newAlloc = rewriter.create<AllocLikeOp>(alloc.getLoc(), newMemRefType,
|
|
|
|
newOperands, IntegerAttr());
|
2018-10-25 16:44:04 -07:00
|
|
|
// Insert a cast so we have the same type as the old alloc.
|
2019-05-25 17:22:27 -07:00
|
|
|
auto resultCast = rewriter.create<MemRefCastOp>(alloc.getLoc(), newAlloc,
|
|
|
|
alloc.getType());
|
2018-10-25 16:44:04 -07:00
|
|
|
|
[mlir] Remove 'valuesToRemoveIfDead' from PatternRewriter API
Summary:
Remove 'valuesToRemoveIfDead' from PatternRewriter API. The removal
functionality wasn't implemented and we decided [1] not to implement it in
favor of having more powerful DCE approaches.
[1] https://github.com/tensorflow/mlir/pull/212
Reviewers: rriddle, bondhugula
Reviewed By: rriddle
Subscribers: liufengdb, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72545
2020-01-27 13:13:20 -08:00
|
|
|
rewriter.replaceOp(alloc, {resultCast});
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
};
|
2019-01-16 11:40:37 -08:00
|
|
|
|
2019-03-28 08:24:38 -07:00
|
|
|
/// Fold alloc operations with no uses. Alloc has side effects on the heap,
|
2019-01-16 11:40:37 -08:00
|
|
|
/// but can still be deleted if it has zero uses.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyDeadAlloc : public OpRewritePattern<AllocOp> {
|
|
|
|
using OpRewritePattern<AllocOp>::OpRewritePattern;
|
2019-01-16 11:40:37 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(AllocOp alloc,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-10-16 09:50:28 -07:00
|
|
|
if (alloc.use_empty()) {
|
|
|
|
rewriter.eraseOp(alloc);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-10-16 09:50:28 -07:00
|
|
|
}
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 11:40:37 -08:00
|
|
|
}
|
|
|
|
};
|
2018-10-25 16:44:04 -07:00
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2018-11-28 15:09:39 -08:00
|
|
|
void AllocOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
2018-10-25 16:44:04 -07:00
|
|
|
MLIRContext *context) {
|
2020-03-22 21:20:21 +05:30
|
|
|
results.insert<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc>(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AllocaOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<SimplifyAllocConst<AllocaOp>>(context);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AndOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult AndOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// and(x, 0) -> 0
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return rhs();
|
|
|
|
/// and(x,x) -> x
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return rhs();
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a & b; });
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AssumeAlignmentOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(AssumeAlignmentOp op) {
|
|
|
|
unsigned alignment = op.alignment().getZExtValue();
|
|
|
|
if (!llvm::isPowerOf2_32(alignment))
|
|
|
|
return op.emitOpError("alignment must be power of 2");
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AtomicRMWOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(AtomicRMWOp op) {
|
|
|
|
if (op.getMemRefType().getRank() != op.getNumOperands() - 2)
|
|
|
|
return op.emitOpError(
|
|
|
|
"expects the number of subscripts to be equal to memref rank");
|
|
|
|
switch (op.kind()) {
|
|
|
|
case AtomicRMWKind::addf:
|
|
|
|
case AtomicRMWKind::maxf:
|
|
|
|
case AtomicRMWKind::minf:
|
|
|
|
case AtomicRMWKind::mulf:
|
|
|
|
if (!op.value().getType().isa<FloatType>())
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "with kind '" << stringifyAtomicRMWKind(op.kind())
|
|
|
|
<< "' expects a floating-point type";
|
|
|
|
break;
|
|
|
|
case AtomicRMWKind::addi:
|
|
|
|
case AtomicRMWKind::maxs:
|
|
|
|
case AtomicRMWKind::maxu:
|
|
|
|
case AtomicRMWKind::mins:
|
|
|
|
case AtomicRMWKind::minu:
|
|
|
|
case AtomicRMWKind::muli:
|
|
|
|
if (!op.value().getType().isa<IntegerType>())
|
|
|
|
return op.emitOpError()
|
|
|
|
<< "with kind '" << stringifyAtomicRMWKind(op.kind())
|
|
|
|
<< "' expects an integer type";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BranchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-11-05 11:57:03 -08:00
|
|
|
namespace {
|
|
|
|
/// Simplify a branch to a block that has a single predecessor. This effectively
|
|
|
|
/// merges the two blocks.
|
|
|
|
struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
|
|
|
|
using OpRewritePattern<BranchOp>::OpRewritePattern;
|
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(BranchOp op,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-11-05 11:57:03 -08:00
|
|
|
// Check that the successor block has a single predecessor.
|
|
|
|
Block *succ = op.getDest();
|
|
|
|
Block *opParent = op.getOperation()->getBlock();
|
|
|
|
if (succ == opParent || !has_single_element(succ->getPredecessors()))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-05 11:57:03 -08:00
|
|
|
|
|
|
|
// Merge the successor into the current block and erase the branch.
|
2019-12-06 20:06:48 -08:00
|
|
|
rewriter.mergeBlocks(succ, opParent, op.getOperands());
|
2019-11-05 11:57:03 -08:00
|
|
|
rewriter.eraseOp(op);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-05 11:57:03 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
|
|
|
|
2020-03-05 12:39:46 -08:00
|
|
|
Block *BranchOp::getDest() { return getSuccessor(); }
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2020-03-05 12:39:46 -08:00
|
|
|
void BranchOp::setDest(Block *block) { return setSuccessor(block); }
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
void BranchOp::eraseOperand(unsigned index) {
|
2020-03-05 12:40:23 -08:00
|
|
|
getOperation()->eraseOperand(index);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-11-05 11:57:03 -08:00
|
|
|
void BranchOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<SimplifyBrToBlockWithSinglePred>(context);
|
|
|
|
}
|
|
|
|
|
2020-03-05 12:40:23 -08:00
|
|
|
Optional<OperandRange> BranchOp::getSuccessorOperands(unsigned index) {
|
|
|
|
assert(index == 0 && "invalid successor index");
|
|
|
|
return getOperands();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BranchOp::canEraseSuccessorOperand() { return true; }
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-08-21 17:55:22 -07:00
|
|
|
// CallOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
static LogicalResult verify(CallOp op) {
|
2018-08-21 17:55:22 -07:00
|
|
|
// Check that the callee attribute was specified.
|
2019-11-11 18:18:02 -08:00
|
|
|
auto fnAttr = op.getAttrOfType<FlatSymbolRefAttr>("callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
if (!fnAttr)
|
2019-07-11 11:41:04 -07:00
|
|
|
return op.emitOpError("requires a 'callee' symbol reference attribute");
|
2019-07-10 15:49:27 -07:00
|
|
|
auto fn =
|
|
|
|
op.getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(fnAttr.getValue());
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fn)
|
|
|
|
return op.emitOpError() << "'" << fnAttr.getValue()
|
|
|
|
<< "' does not reference a valid function";
|
2018-08-21 17:55:22 -07:00
|
|
|
|
|
|
|
// Verify that the operand and result types match the callee.
|
2019-07-01 10:29:09 -07:00
|
|
|
auto fnType = fn.getType();
|
2019-05-10 15:27:34 -07:00
|
|
|
if (fnType.getNumInputs() != op.getNumOperands())
|
|
|
|
return op.emitOpError("incorrect number of operands for callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
for (unsigned i = 0, e = fnType.getNumInputs(); i != e; ++i)
|
2020-01-11 08:54:04 -08:00
|
|
|
if (op.getOperand(i).getType() != fnType.getInput(i))
|
2019-05-10 15:27:34 -07:00
|
|
|
return op.emitOpError("operand type mismatch");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
if (fnType.getNumResults() != op.getNumResults())
|
|
|
|
return op.emitOpError("incorrect number of results for callee");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-05-10 15:27:34 -07:00
|
|
|
for (unsigned i = 0, e = fnType.getNumResults(); i != e; ++i)
|
2020-01-11 08:54:04 -08:00
|
|
|
if (op.getResult(i).getType() != fnType.getResult(i))
|
2019-05-10 15:27:34 -07:00
|
|
|
return op.emitOpError("result type mismatch");
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-21 17:55:22 -07:00
|
|
|
}
|
|
|
|
|
2019-05-23 17:01:16 -07:00
|
|
|
FunctionType CallOp::getCalleeType() {
|
2019-05-24 13:28:55 -07:00
|
|
|
SmallVector<Type, 8> argTypes(getOperandTypes());
|
2020-01-27 19:57:14 -08:00
|
|
|
return FunctionType::get(argTypes, getResultTypes(), getContext());
|
2019-05-22 13:41:23 -07:00
|
|
|
}
|
|
|
|
|
2018-08-21 17:55:22 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CallIndirectOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-01-29 18:08:28 -08:00
|
|
|
namespace {
|
|
|
|
/// Fold indirect calls that have a constant function as the callee operand.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyIndirectCallWithKnownCallee
|
|
|
|
: public OpRewritePattern<CallIndirectOp> {
|
|
|
|
using OpRewritePattern<CallIndirectOp>::OpRewritePattern;
|
2019-01-29 18:08:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(CallIndirectOp indirectCall,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-05-24 18:49:45 -07:00
|
|
|
// Check that the callee is a constant callee.
|
2019-07-11 11:41:04 -07:00
|
|
|
SymbolRefAttr calledFn;
|
2019-05-24 18:49:45 -07:00
|
|
|
if (!matchPattern(indirectCall.getCallee(), m_Constant(&calledFn)))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-29 18:08:28 -08:00
|
|
|
|
|
|
|
// Replace with a direct call.
|
2020-01-27 19:57:14 -08:00
|
|
|
rewriter.replaceOpWithNewOp<CallOp>(indirectCall, calledFn,
|
|
|
|
indirectCall.getResultTypes(),
|
2019-12-07 10:35:01 -08:00
|
|
|
indirectCall.getArgOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-01-29 18:08:28 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
2018-08-21 17:55:22 -07:00
|
|
|
|
2019-01-29 18:08:28 -08:00
|
|
|
void CallIndirectOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
2019-08-05 18:37:56 -07:00
|
|
|
results.insert<SimplifyIndirectCallWithKnownCallee>(context);
|
2019-01-29 18:08:28 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-05-06 17:51:08 -07:00
|
|
|
// General helpers for comparison ops
|
2019-01-29 18:08:28 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-11-08 04:02:00 -08:00
|
|
|
// Return the type of the same shape (scalar, vector or tensor) containing i1.
|
2020-02-19 10:18:28 -08:00
|
|
|
static Type getCheckedI1SameShape(Type type) {
|
|
|
|
auto i1Type = IntegerType::get(1, type.getContext());
|
2020-01-10 14:48:24 -05:00
|
|
|
if (type.isSignlessIntOrIndexOrFloat())
|
2018-11-08 04:02:00 -08:00
|
|
|
return i1Type;
|
|
|
|
if (auto tensorType = type.dyn_cast<RankedTensorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return RankedTensorType::get(tensorType.getShape(), i1Type);
|
2019-07-07 14:04:46 -07:00
|
|
|
if (type.isa<UnrankedTensorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return UnrankedTensorType::get(i1Type);
|
2018-11-08 04:02:00 -08:00
|
|
|
if (auto vectorType = type.dyn_cast<VectorType>())
|
2019-10-17 20:08:01 -07:00
|
|
|
return VectorType::get(vectorType.getShape(), i1Type);
|
2019-01-15 09:30:39 -08:00
|
|
|
return Type();
|
|
|
|
}
|
2018-11-08 04:02:00 -08:00
|
|
|
|
2020-02-19 10:18:28 -08:00
|
|
|
static Type getI1SameShape(Type type) {
|
|
|
|
Type res = getCheckedI1SameShape(type);
|
2019-01-15 09:30:39 -08:00
|
|
|
assert(res && "expected type with valid i1 shape");
|
|
|
|
return res;
|
2018-11-08 04:02:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-06 17:51:08 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CmpIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
static void buildCmpIOp(Builder *build, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
CmpIPredicate predicate, Value lhs, Value rhs) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({lhs, rhs});
|
2020-02-19 10:18:28 -08:00
|
|
|
result.types.push_back(getI1SameShape(lhs.getType()));
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addAttribute(
|
2019-05-24 18:01:20 -07:00
|
|
|
CmpIOp::getPredicateAttrName(),
|
2019-05-06 17:51:08 -07:00
|
|
|
build->getI64IntegerAttr(static_cast<int64_t>(predicate)));
|
2018-11-08 04:02:00 -08:00
|
|
|
}
|
|
|
|
|
2019-01-06 14:09:15 -08:00
|
|
|
// Compute `lhs` `pred` `rhs`, where `pred` is one of the known integer
|
|
|
|
// comparison predicates.
|
|
|
|
static bool applyCmpPredicate(CmpIPredicate predicate, const APInt &lhs,
|
|
|
|
const APInt &rhs) {
|
|
|
|
switch (predicate) {
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::eq:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.eq(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ne:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ne(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::slt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.slt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sle:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sle(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sgt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sgt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::sge:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.sge(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ult:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ult(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ule:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ule(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::ugt:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.ugt(rhs);
|
2019-11-15 10:16:33 -08:00
|
|
|
case CmpIPredicate::uge:
|
2019-01-06 14:09:15 -08:00
|
|
|
return lhs.uge(rhs);
|
|
|
|
}
|
2019-12-26 18:29:54 -08:00
|
|
|
llvm_unreachable("unknown comparison predicate");
|
2019-01-06 14:09:15 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for comparisons.
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult CmpIOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-06 14:09:15 -08:00
|
|
|
assert(operands.size() == 2 && "cmpi takes two arguments");
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs || !rhs)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto val = applyCmpPredicate(getPredicate(), lhs.getValue(), rhs.getValue());
|
2019-05-16 12:51:45 -07:00
|
|
|
return IntegerAttr::get(IntegerType::get(1, getContext()), APInt(1, val));
|
2019-01-06 14:09:15 -08:00
|
|
|
}
|
|
|
|
|
2019-05-06 17:51:08 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CmpFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
static void buildCmpFOp(Builder *build, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
CmpFPredicate predicate, Value lhs, Value rhs) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({lhs, rhs});
|
2020-02-19 10:18:28 -08:00
|
|
|
result.types.push_back(getI1SameShape(lhs.getType()));
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addAttribute(
|
2019-05-24 18:01:20 -07:00
|
|
|
CmpFOp::getPredicateAttrName(),
|
2019-05-06 17:51:08 -07:00
|
|
|
build->getI64IntegerAttr(static_cast<int64_t>(predicate)));
|
|
|
|
}
|
|
|
|
|
2020-03-03 13:02:02 -08:00
|
|
|
/// Compute `lhs` `pred` `rhs`, where `pred` is one of the known floating point
|
|
|
|
/// comparison predicates.
|
2019-05-06 17:51:08 -07:00
|
|
|
static bool applyCmpPredicate(CmpFPredicate predicate, const APFloat &lhs,
|
|
|
|
const APFloat &rhs) {
|
|
|
|
auto cmpResult = lhs.compare(rhs);
|
|
|
|
switch (predicate) {
|
2019-07-01 17:40:40 -07:00
|
|
|
case CmpFPredicate::AlwaysFalse:
|
2019-05-06 17:51:08 -07:00
|
|
|
return false;
|
|
|
|
case CmpFPredicate::OEQ:
|
|
|
|
return cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::OGT:
|
|
|
|
return cmpResult == APFloat::cmpGreaterThan;
|
|
|
|
case CmpFPredicate::OGE:
|
|
|
|
return cmpResult == APFloat::cmpGreaterThan ||
|
|
|
|
cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::OLT:
|
|
|
|
return cmpResult == APFloat::cmpLessThan;
|
|
|
|
case CmpFPredicate::OLE:
|
|
|
|
return cmpResult == APFloat::cmpLessThan || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ONE:
|
|
|
|
return cmpResult != APFloat::cmpUnordered && cmpResult != APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ORD:
|
|
|
|
return cmpResult != APFloat::cmpUnordered;
|
|
|
|
case CmpFPredicate::UEQ:
|
|
|
|
return cmpResult == APFloat::cmpUnordered || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UGT:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpGreaterThan;
|
|
|
|
case CmpFPredicate::UGE:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpGreaterThan ||
|
|
|
|
cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::ULT:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpLessThan;
|
|
|
|
case CmpFPredicate::ULE:
|
|
|
|
return cmpResult == APFloat::cmpUnordered ||
|
|
|
|
cmpResult == APFloat::cmpLessThan || cmpResult == APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UNE:
|
|
|
|
return cmpResult != APFloat::cmpEqual;
|
|
|
|
case CmpFPredicate::UNO:
|
|
|
|
return cmpResult == APFloat::cmpUnordered;
|
2019-07-01 17:40:40 -07:00
|
|
|
case CmpFPredicate::AlwaysTrue:
|
2019-05-06 17:51:08 -07:00
|
|
|
return true;
|
|
|
|
}
|
Fix a warning about an unreachable default in a switch statement.
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75663
2020-03-04 22:06:54 -08:00
|
|
|
llvm_unreachable("unknown comparison predicate");
|
2019-05-06 17:51:08 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for comparisons.
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult CmpFOp::fold(ArrayRef<Attribute> operands) {
|
2019-05-06 17:51:08 -07:00
|
|
|
assert(operands.size() == 2 && "cmpf takes two arguments");
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<FloatAttr>();
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<FloatAttr>();
|
2019-09-12 15:43:26 -07:00
|
|
|
|
|
|
|
// TODO(gcmn) We could actually do some intelligent things if we know only one
|
|
|
|
// of the operands, but it's inf or nan.
|
|
|
|
if (!lhs || !rhs)
|
2019-05-06 17:51:08 -07:00
|
|
|
return {};
|
|
|
|
|
|
|
|
auto val = applyCmpPredicate(getPredicate(), lhs.getValue(), rhs.getValue());
|
2019-05-16 12:51:45 -07:00
|
|
|
return IntegerAttr::get(IntegerType::get(1, getContext()), APInt(1, val));
|
2019-05-06 17:51:08 -07:00
|
|
|
}
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CondBranchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// cond_br true, ^bb1, ^bb2 -> br ^bb1
|
|
|
|
/// cond_br false, ^bb1, ^bb2 -> br ^bb2
|
|
|
|
///
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyConstCondBranchPred : public OpRewritePattern<CondBranchOp> {
|
|
|
|
using OpRewritePattern<CondBranchOp>::OpRewritePattern;
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(CondBranchOp condbr,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-11-06 13:51:19 -08:00
|
|
|
if (matchPattern(condbr.getCondition(), m_NonZero())) {
|
|
|
|
// True branch taken.
|
2019-12-07 10:35:01 -08:00
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getTrueDest(),
|
|
|
|
condbr.getTrueOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-06 13:51:19 -08:00
|
|
|
} else if (matchPattern(condbr.getCondition(), m_Zero())) {
|
|
|
|
// False branch taken.
|
2019-12-07 10:35:01 -08:00
|
|
|
rewriter.replaceOpWithNewOp<BranchOp>(condbr, condbr.getFalseDest(),
|
|
|
|
condbr.getFalseOperands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
|
|
|
|
|
|
|
void CondBranchOp::getCanonicalizationPatterns(
|
|
|
|
OwningRewritePatternList &results, MLIRContext *context) {
|
2019-08-05 18:37:56 -07:00
|
|
|
results.insert<SimplifyConstCondBranchPred>(context);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2020-03-05 12:40:23 -08:00
|
|
|
Optional<OperandRange> CondBranchOp::getSuccessorOperands(unsigned index) {
|
|
|
|
assert(index < getNumSuccessors() && "invalid successor index");
|
|
|
|
return index == trueIndex ? getTrueOperands() : getFalseOperands();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool CondBranchOp::canEraseSuccessorOperand() { return true; }
|
|
|
|
|
2019-03-01 16:58:00 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Constant*Op
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
static void print(OpAsmPrinter &p, ConstantOp &op) {
|
|
|
|
p << "constant ";
|
|
|
|
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"value"});
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-05-02 11:52:33 -07:00
|
|
|
if (op.getAttrs().size() > 1)
|
2019-09-20 20:43:02 -07:00
|
|
|
p << ' ';
|
2019-12-12 15:31:39 -08:00
|
|
|
p << op.getValue();
|
2019-05-22 13:41:23 -07:00
|
|
|
|
2019-07-11 11:41:04 -07:00
|
|
|
// If the value is a symbol reference, print a trailing type.
|
2019-08-16 19:21:50 -07:00
|
|
|
if (op.getValue().isa<SymbolRefAttr>())
|
2019-09-20 20:43:02 -07:00
|
|
|
p << " : " << op.getType();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 11:36:49 -07:00
|
|
|
static ParseResult parseConstantOp(OpAsmParser &parser,
|
2019-09-20 19:47:05 -07:00
|
|
|
OperationState &result) {
|
2019-03-01 16:58:00 -08:00
|
|
|
Attribute valueAttr;
|
2019-11-05 13:32:07 -08:00
|
|
|
if (parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.parseAttribute(valueAttr, "value", result.attributes))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-07-11 11:41:04 -07:00
|
|
|
// If the attribute is a symbol reference, then we expect a trailing type.
|
2019-05-22 13:41:23 -07:00
|
|
|
Type type;
|
2019-07-11 11:41:04 -07:00
|
|
|
if (!valueAttr.isa<SymbolRefAttr>())
|
2019-05-22 13:41:23 -07:00
|
|
|
type = valueAttr.getType();
|
2019-09-20 11:36:49 -07:00
|
|
|
else if (parser.parseColonType(type))
|
2019-05-22 13:41:23 -07:00
|
|
|
return failure();
|
|
|
|
|
2019-05-08 14:46:39 -07:00
|
|
|
// Add the attribute type to the list.
|
2019-09-20 19:47:05 -07:00
|
|
|
return parser.addTypeToList(type, result.types);
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// The constant op requires an attribute, and furthermore requires that it
|
|
|
|
/// matches the return type.
|
2019-05-02 11:52:33 -07:00
|
|
|
static LogicalResult verify(ConstantOp &op) {
|
|
|
|
auto value = op.getValue();
|
2019-03-01 16:58:00 -08:00
|
|
|
if (!value)
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires a 'value' attribute");
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-05-02 11:52:33 -07:00
|
|
|
auto type = op.getType();
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!value.getType().isa<NoneType>() && type != value.getType())
|
2019-05-08 12:11:10 -07:00
|
|
|
return op.emitOpError() << "requires attribute's type (" << value.getType()
|
|
|
|
<< ") to match op's return type (" << type << ")";
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-07-22 21:43:14 -07:00
|
|
|
if (type.isa<IndexType>() || value.isa<BoolAttr>())
|
2019-05-08 12:11:10 -07:00
|
|
|
return success();
|
|
|
|
|
|
|
|
if (auto intAttr = value.dyn_cast<IntegerAttr>()) {
|
2019-03-01 16:58:00 -08:00
|
|
|
// If the type has a known bitwidth we verify that the value can be
|
|
|
|
// represented with the given bitwidth.
|
2019-05-08 12:11:10 -07:00
|
|
|
auto bitwidth = type.cast<IntegerType>().getWidth();
|
|
|
|
auto intVal = intAttr.getValue();
|
|
|
|
if (!intVal.isSignedIntN(bitwidth) && !intVal.isIntN(bitwidth))
|
|
|
|
return op.emitOpError("requires 'value' to be an integer within the "
|
|
|
|
"range of the integer result type");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type.isa<FloatType>()) {
|
|
|
|
if (!value.isa<FloatAttr>())
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a floating point constant");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-16 00:12:45 -07:00
|
|
|
if (type.isa<ShapedType>()) {
|
2019-03-01 16:58:00 -08:00
|
|
|
if (!value.isa<ElementsAttr>())
|
2019-05-16 00:12:45 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a shaped constant");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type.isa<FunctionType>()) {
|
2019-11-11 18:18:02 -08:00
|
|
|
auto fnAttr = value.dyn_cast<FlatSymbolRefAttr>();
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fnAttr)
|
2019-05-02 11:52:33 -07:00
|
|
|
return op.emitOpError("requires 'value' to be a function reference");
|
2019-05-22 13:41:23 -07:00
|
|
|
|
|
|
|
// Try to find the referenced function.
|
2019-07-08 11:20:26 -07:00
|
|
|
auto fn =
|
2019-07-10 15:49:27 -07:00
|
|
|
op.getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(fnAttr.getValue());
|
2019-05-22 13:41:23 -07:00
|
|
|
if (!fn)
|
|
|
|
return op.emitOpError("reference to undefined function 'bar'");
|
|
|
|
|
|
|
|
// Check that the referenced function has the correct type.
|
2019-07-01 10:29:09 -07:00
|
|
|
if (fn.getType() != type)
|
2019-05-22 13:41:23 -07:00
|
|
|
return op.emitOpError("reference to function with mismatched type");
|
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-06-01 10:10:24 -07:00
|
|
|
if (type.isa<NoneType>() && value.isa<UnitAttr>())
|
|
|
|
return success();
|
|
|
|
|
2019-07-22 21:43:14 -07:00
|
|
|
return op.emitOpError("unsupported 'value' attribute: ") << value;
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult ConstantOp::fold(ArrayRef<Attribute> operands) {
|
2019-03-01 16:58:00 -08:00
|
|
|
assert(operands.empty() && "constant has no operands");
|
|
|
|
return getValue();
|
|
|
|
}
|
|
|
|
|
2019-11-20 10:19:01 -08:00
|
|
|
void ConstantOp::getAsmResultNames(
|
2019-12-23 14:45:01 -08:00
|
|
|
function_ref<void(Value, StringRef)> setNameFn) {
|
2019-11-20 10:19:01 -08:00
|
|
|
Type type = getType();
|
|
|
|
if (auto intCst = getValue().dyn_cast<IntegerAttr>()) {
|
|
|
|
IntegerType intTy = type.dyn_cast<IntegerType>();
|
|
|
|
|
|
|
|
// Sugar i1 constants with 'true' and 'false'.
|
|
|
|
if (intTy && intTy.getWidth() == 1)
|
|
|
|
return setNameFn(getResult(), (intCst.getInt() ? "true" : "false"));
|
|
|
|
|
|
|
|
// Otherwise, build a complex name with the value and type.
|
|
|
|
SmallString<32> specialNameBuffer;
|
|
|
|
llvm::raw_svector_ostream specialName(specialNameBuffer);
|
|
|
|
specialName << 'c' << intCst.getInt();
|
|
|
|
if (intTy)
|
|
|
|
specialName << '_' << type;
|
|
|
|
setNameFn(getResult(), specialName.str());
|
|
|
|
|
|
|
|
} else if (type.isa<FunctionType>()) {
|
|
|
|
setNameFn(getResult(), "f");
|
|
|
|
} else {
|
|
|
|
setNameFn(getResult(), "cst");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 11:48:43 -07:00
|
|
|
/// Returns true if a constant operation can be built with the given value and
|
|
|
|
/// result type.
|
|
|
|
bool ConstantOp::isBuildableWith(Attribute value, Type type) {
|
2019-07-11 11:41:04 -07:00
|
|
|
// SymbolRefAttr can only be used with a function type.
|
|
|
|
if (value.isa<SymbolRefAttr>())
|
2019-06-22 11:48:43 -07:00
|
|
|
return type.isa<FunctionType>();
|
|
|
|
// Otherwise, the attribute must have the same type as 'type'.
|
|
|
|
if (value.getType() != type)
|
|
|
|
return false;
|
|
|
|
// Finally, check that the attribute kind is handled.
|
2019-07-22 21:43:14 -07:00
|
|
|
return value.isa<BoolAttr>() || value.isa<IntegerAttr>() ||
|
|
|
|
value.isa<FloatAttr>() || value.isa<ElementsAttr>() ||
|
|
|
|
value.isa<UnitAttr>();
|
2019-06-22 11:48:43 -07:00
|
|
|
}
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
void ConstantFloatOp::build(Builder *builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
const APFloat &value, FloatType type) {
|
|
|
|
ConstantOp::build(builder, result, type, builder->getFloatAttr(type, value));
|
|
|
|
}
|
|
|
|
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantFloatOp::classof(Operation *op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
return ConstantOp::classof(op) && op->getResult(0).getType().isa<FloatType>();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantIntOp only matches values whose result type is an IntegerType.
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantIntOp::classof(Operation *op) {
|
|
|
|
return ConstantOp::classof(op) &&
|
2020-01-10 14:48:24 -05:00
|
|
|
op->getResult(0).getType().isSignlessInteger();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
void ConstantIntOp::build(Builder *builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value, unsigned width) {
|
|
|
|
Type type = builder->getIntegerType(width);
|
|
|
|
ConstantOp::build(builder, result, type,
|
|
|
|
builder->getIntegerAttr(type, value));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Build a constant int op producing an integer with the specified type,
|
|
|
|
/// which must be an integer type.
|
2019-09-20 19:47:05 -07:00
|
|
|
void ConstantIntOp::build(Builder *builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value, Type type) {
|
2020-01-10 14:48:24 -05:00
|
|
|
assert(type.isSignlessInteger() &&
|
|
|
|
"ConstantIntOp can only have signless integer type");
|
2019-03-01 16:58:00 -08:00
|
|
|
ConstantOp::build(builder, result, type,
|
|
|
|
builder->getIntegerAttr(type, value));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// ConstantIndexOp only matches values whose result type is Index.
|
2019-05-11 12:45:35 -07:00
|
|
|
bool ConstantIndexOp::classof(Operation *op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
return ConstantOp::classof(op) && op->getResult(0).getType().isIndex();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
void ConstantIndexOp::build(Builder *builder, OperationState &result,
|
2019-03-01 16:58:00 -08:00
|
|
|
int64_t value) {
|
|
|
|
Type type = builder->getIndexType();
|
|
|
|
ConstantOp::build(builder, result, type,
|
|
|
|
builder->getIntegerAttr(type, value));
|
|
|
|
}
|
|
|
|
|
2018-08-15 15:39:26 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DeallocOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-01-16 12:39:03 -08:00
|
|
|
namespace {
|
2019-03-28 08:24:38 -07:00
|
|
|
/// Fold Dealloc operations that are deallocating an AllocOp that is only used
|
2019-01-16 12:39:03 -08:00
|
|
|
/// by other Dealloc operations.
|
2019-05-25 17:22:27 -07:00
|
|
|
struct SimplifyDeadDealloc : public OpRewritePattern<DeallocOp> {
|
|
|
|
using OpRewritePattern<DeallocOp>::OpRewritePattern;
|
2019-01-16 12:39:03 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(DeallocOp dealloc,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2019-03-28 08:24:38 -07:00
|
|
|
// Check that the memref operand's defining operation is an AllocOp.
|
2019-12-23 14:45:01 -08:00
|
|
|
Value memref = dealloc.memref();
|
2020-01-11 08:54:04 -08:00
|
|
|
if (!isa_and_nonnull<AllocOp>(memref.getDefiningOp()))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 12:39:03 -08:00
|
|
|
|
|
|
|
// Check that all of the uses of the AllocOp are other DeallocOps.
|
2020-01-11 08:54:04 -08:00
|
|
|
for (auto *user : memref.getUsers())
|
2019-05-18 11:09:07 -07:00
|
|
|
if (!isa<DeallocOp>(user))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-01-16 12:39:03 -08:00
|
|
|
|
|
|
|
// Erase the dealloc operation.
|
2019-10-16 09:50:28 -07:00
|
|
|
rewriter.eraseOp(dealloc);
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-01-16 12:39:03 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace.
|
2018-08-15 15:39:26 -07:00
|
|
|
|
2019-05-08 22:38:01 -07:00
|
|
|
static LogicalResult verify(DeallocOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
if (!op.memref().getType().isa<MemRefType>())
|
2019-05-08 22:38:01 -07:00
|
|
|
return op.emitOpError("operand must be a memref");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-15 15:39:26 -07:00
|
|
|
}
|
|
|
|
|
2018-11-28 15:09:39 -08:00
|
|
|
void DeallocOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
2018-10-25 16:44:04 -07:00
|
|
|
MLIRContext *context) {
|
2019-08-05 18:37:56 -07:00
|
|
|
results.insert<SimplifyDeadDealloc>(context);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
/// dealloc(memrefcast) -> dealloc
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// DimOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
static void print(OpAsmPrinter &p, DimOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
p << "dim " << op.getOperand() << ", " << op.getIndex();
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(op.getAttrs(), /*elidedAttrs=*/{"index"});
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << op.getOperand().getType();
|
2018-07-05 09:12:11 -07:00
|
|
|
}
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
static ParseResult parseDimOp(OpAsmParser &parser, OperationState &result) {
|
2018-07-25 11:15:20 -07:00
|
|
|
OpAsmParser::OperandType operandInfo;
|
2018-10-25 15:46:10 -07:00
|
|
|
IntegerAttr indexAttr;
|
2018-10-30 14:59:22 -07:00
|
|
|
Type type;
|
2019-09-20 11:36:49 -07:00
|
|
|
Type indexType = parser.getBuilder().getIndexType();
|
2018-08-02 16:54:36 -07:00
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
return failure(
|
|
|
|
parser.parseOperand(operandInfo) || parser.parseComma() ||
|
|
|
|
parser.parseAttribute(indexAttr, indexType, "index", result.attributes) ||
|
2019-11-05 13:32:07 -08:00
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.parseColonType(type) ||
|
|
|
|
parser.resolveOperand(operandInfo, type, result.operands) ||
|
|
|
|
parser.addTypeToList(indexType, result.types));
|
2018-07-25 11:15:20 -07:00
|
|
|
}
|
|
|
|
|
2019-05-10 15:26:23 -07:00
|
|
|
static LogicalResult verify(DimOp op) {
|
2018-07-06 10:46:19 -07:00
|
|
|
// Check that we have an integer index operand.
|
2019-05-10 15:26:23 -07:00
|
|
|
auto indexAttr = op.getAttrOfType<IntegerAttr>("index");
|
2018-07-06 10:46:19 -07:00
|
|
|
if (!indexAttr)
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("requires an integer attribute named 'index'");
|
2019-05-31 13:28:37 -07:00
|
|
|
int64_t index = indexAttr.getValue().getSExtValue();
|
2018-07-24 08:34:58 -07:00
|
|
|
|
2020-01-11 08:54:04 -08:00
|
|
|
auto type = op.getOperand().getType();
|
2018-10-30 14:59:22 -07:00
|
|
|
if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
|
2019-05-31 13:28:37 -07:00
|
|
|
if (index >= tensorType.getRank())
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("index is out of range");
|
2018-10-30 14:59:22 -07:00
|
|
|
} else if (auto memrefType = type.dyn_cast<MemRefType>()) {
|
|
|
|
if (index >= memrefType.getRank())
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("index is out of range");
|
2018-07-06 10:46:19 -07:00
|
|
|
|
2018-10-30 14:59:22 -07:00
|
|
|
} else if (type.isa<UnrankedTensorType>()) {
|
2018-07-24 08:34:58 -07:00
|
|
|
// ok, assumed to be in-range.
|
|
|
|
} else {
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("requires an operand with tensor or memref type");
|
2018-07-24 08:34:58 -07:00
|
|
|
}
|
2018-07-06 10:46:19 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-06 10:46:19 -07:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
|
2018-10-05 18:24:18 -07:00
|
|
|
// Constant fold dim when the size along the index referred to is a constant.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto opType = memrefOrTensor().getType();
|
2020-04-08 12:31:48 +05:30
|
|
|
int64_t dimSize = ShapedType::kDynamicSize;
|
2019-05-10 15:26:23 -07:00
|
|
|
if (auto tensorType = opType.dyn_cast<RankedTensorType>())
|
2020-04-08 12:31:48 +05:30
|
|
|
dimSize = tensorType.getShape()[getIndex()];
|
2019-05-10 15:26:23 -07:00
|
|
|
else if (auto memrefType = opType.dyn_cast<MemRefType>())
|
2020-04-08 12:31:48 +05:30
|
|
|
dimSize = memrefType.getShape()[getIndex()];
|
2018-10-05 09:28:49 -07:00
|
|
|
|
2020-04-08 12:31:48 +05:30
|
|
|
if (!ShapedType::isDynamic(dimSize))
|
|
|
|
return IntegerAttr::get(IndexType::get(getContext()), dimSize);
|
2018-10-05 09:28:49 -07:00
|
|
|
|
2019-12-06 05:59:06 -08:00
|
|
|
// Fold dim to the size argument for an AllocOp/ViewOp/SubViewOp.
|
|
|
|
auto memrefType = opType.dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
// The size at getIndex() is now a dynamic size of a memref.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto memref = memrefOrTensor().getDefiningOp();
|
2019-12-06 05:59:06 -08:00
|
|
|
if (auto alloc = dyn_cast_or_null<AllocOp>(memref))
|
|
|
|
return *(alloc.getDynamicSizes().begin() +
|
|
|
|
memrefType.getDynamicDimIndex(getIndex()));
|
|
|
|
|
|
|
|
if (auto view = dyn_cast_or_null<ViewOp>(memref))
|
|
|
|
return *(view.getDynamicSizes().begin() +
|
|
|
|
memrefType.getDynamicDimIndex(getIndex()));
|
|
|
|
|
|
|
|
// The subview op here is expected to have rank dynamic sizes now.
|
2019-11-18 04:31:02 -08:00
|
|
|
if (auto subview = dyn_cast_or_null<SubViewOp>(memref)) {
|
2019-12-02 07:51:27 -08:00
|
|
|
auto sizes = subview.sizes();
|
2019-11-18 04:31:02 -08:00
|
|
|
if (!sizes.empty())
|
|
|
|
return *(sizes.begin() + getIndex());
|
|
|
|
}
|
|
|
|
|
2019-09-13 18:18:21 -07:00
|
|
|
/// dim(memrefcast) -> dim
|
2019-12-13 14:52:39 -08:00
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
|
|
|
|
return {};
|
2019-09-13 18:18:21 -07:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:04:27 -07:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaStartOp
|
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
void DmaStartOp::build(Builder *builder, OperationState &result,
|
2019-12-23 14:45:01 -08:00
|
|
|
Value srcMemRef, ValueRange srcIndices, Value destMemRef,
|
|
|
|
ValueRange destIndices, Value numElements,
|
|
|
|
Value tagMemRef, ValueRange tagIndices, Value stride,
|
|
|
|
Value elementsPerStride) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands(srcMemRef);
|
|
|
|
result.addOperands(srcIndices);
|
|
|
|
result.addOperands(destMemRef);
|
|
|
|
result.addOperands(destIndices);
|
|
|
|
result.addOperands({numElements, tagMemRef});
|
|
|
|
result.addOperands(tagIndices);
|
2019-08-16 19:21:50 -07:00
|
|
|
if (stride)
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands({stride, elementsPerStride});
|
2018-11-08 17:31:01 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
void DmaStartOp::print(OpAsmPrinter &p) {
|
2020-01-11 08:54:04 -08:00
|
|
|
p << "dma_start " << getSrcMemRef() << '[' << getSrcIndices() << "], "
|
|
|
|
<< getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
|
|
|
|
<< ", " << getTagMemRef() << '[' << getTagIndices() << ']';
|
2019-12-12 15:31:39 -08:00
|
|
|
if (isStrided())
|
2020-01-11 08:54:04 -08:00
|
|
|
p << ", " << getStride() << ", " << getNumElementsPerStride();
|
2019-12-12 15:31:39 -08:00
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
|
|
|
|
<< ", " << getTagMemRef().getType();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse DmaStartOp.
|
2018-10-18 11:14:26 -07:00
|
|
|
// Ex:
|
2018-10-09 15:04:27 -07:00
|
|
|
// %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
|
2019-02-19 10:26:53 -08:00
|
|
|
// %tag[%index], %stride, %num_elt_per_stride :
|
|
|
|
// : memref<3076 x f32, 0>,
|
|
|
|
// memref<1024 x f32, 2>,
|
|
|
|
// memref<1 x i32>
|
2018-10-09 15:04:27 -07:00
|
|
|
//
|
2019-09-20 19:47:05 -07:00
|
|
|
ParseResult DmaStartOp::parse(OpAsmParser &parser, OperationState &result) {
|
2018-10-09 15:04:27 -07:00
|
|
|
OpAsmParser::OperandType srcMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
|
|
|
|
OpAsmParser::OperandType dstMemRefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
|
|
|
|
OpAsmParser::OperandType numElementsInfo;
|
|
|
|
OpAsmParser::OperandType tagMemrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
|
2018-12-05 15:30:25 -08:00
|
|
|
SmallVector<OpAsmParser::OperandType, 2> strideInfo;
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2018-10-30 14:59:22 -07:00
|
|
|
SmallVector<Type, 3> types;
|
2019-09-20 11:36:49 -07:00
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
|
|
|
// Parse and resolve the following list of operands:
|
|
|
|
// *) source memref followed by its indices (in square brackets).
|
|
|
|
// *) destination memref followed by its indices (in square brackets).
|
|
|
|
// *) dma size in KiB.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperand(srcMemRefInfo) ||
|
|
|
|
parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
|
|
|
|
parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(numElementsInfo) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
|
|
|
|
parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-12-05 15:30:25 -08:00
|
|
|
|
|
|
|
// Parse optional stride and elements per stride.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseTrailingOperandList(strideInfo))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2019-08-16 19:21:50 -07:00
|
|
|
|
|
|
|
bool isStrided = strideInfo.size() == 2;
|
|
|
|
if (!strideInfo.empty() && !isStrided) {
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected two stride related operands");
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseColonTypeList(types))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
if (types.size() != 3)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-09-20 19:47:05 -07:00
|
|
|
if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
|
|
|
|
parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
|
|
|
|
parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
|
2018-10-09 15:04:27 -07:00
|
|
|
// size should be an index.
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
|
2018-10-09 15:04:27 -07:00
|
|
|
// tag indices should be index.
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperands(tagIndexInfos, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-08-16 19:21:50 -07:00
|
|
|
auto memrefType0 = types[0].dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType0)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected source to be of memref type");
|
2019-01-15 10:27:32 -08:00
|
|
|
|
2019-08-16 19:21:50 -07:00
|
|
|
auto memrefType1 = types[1].dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType1)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected destination to be of memref type");
|
2019-01-15 10:27:32 -08:00
|
|
|
|
2019-08-16 19:21:50 -07:00
|
|
|
auto memrefType2 = types[2].dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType2)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected tag to be of memref type");
|
2019-01-15 10:27:32 -08:00
|
|
|
|
2018-12-05 15:30:25 -08:00
|
|
|
if (isStrided) {
|
2019-09-20 19:47:05 -07:00
|
|
|
if (parser.resolveOperands(strideInfo, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:04:27 -07:00
|
|
|
// Check that source/destination index list size matches associated rank.
|
2019-08-16 19:21:50 -07:00
|
|
|
if (static_cast<int64_t>(srcIndexInfos.size()) != memrefType0.getRank() ||
|
|
|
|
static_cast<int64_t>(dstIndexInfos.size()) != memrefType1.getRank())
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"memref rank not equal to indices count");
|
2019-08-16 19:21:50 -07:00
|
|
|
if (static_cast<int64_t>(tagIndexInfos.size()) != memrefType2.getRank())
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"tag memref rank not equal to indices count");
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-05-06 22:01:31 -07:00
|
|
|
return success();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
LogicalResult DmaStartOp::verify() {
|
2018-12-05 15:30:25 -08:00
|
|
|
// DMAs from different memory spaces supported.
|
2019-08-16 19:21:50 -07:00
|
|
|
if (getSrcMemorySpace() == getDstMemorySpace())
|
2018-12-05 15:30:25 -08:00
|
|
|
return emitOpError("DMA should be between different memory spaces");
|
|
|
|
|
|
|
|
if (getNumOperands() != getTagMemRefRank() + getSrcMemRefRank() +
|
|
|
|
getDstMemRefRank() + 3 + 1 &&
|
|
|
|
getNumOperands() != getTagMemRefRank() + getSrcMemRefRank() +
|
|
|
|
getDstMemRefRank() + 3 + 1 + 2) {
|
|
|
|
return emitOpError("incorrect number of operands");
|
|
|
|
}
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-12-05 15:30:25 -08:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// dma_start(memrefcast) -> dma_start
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-09 15:04:27 -07:00
|
|
|
// ---------------------------------------------------------------------------
|
|
|
|
// DmaWaitOp
|
|
|
|
// ---------------------------------------------------------------------------
|
2018-10-18 11:14:26 -07:00
|
|
|
|
2019-12-23 14:45:01 -08:00
|
|
|
void DmaWaitOp::build(Builder *builder, OperationState &result, Value tagMemRef,
|
|
|
|
ValueRange tagIndices, Value numElements) {
|
2019-09-20 19:47:05 -07:00
|
|
|
result.addOperands(tagMemRef);
|
|
|
|
result.addOperands(tagIndices);
|
|
|
|
result.addOperands(numElements);
|
2018-11-08 17:31:01 -08:00
|
|
|
}
|
|
|
|
|
2019-09-20 20:43:02 -07:00
|
|
|
void DmaWaitOp::print(OpAsmPrinter &p) {
|
2019-12-12 15:31:39 -08:00
|
|
|
p << "dma_wait " << getTagMemRef() << '[' << getTagIndices() << "], "
|
|
|
|
<< getNumElements();
|
2019-09-20 20:43:02 -07:00
|
|
|
p.printOptionalAttrDict(getAttrs());
|
2020-01-11 08:54:04 -08:00
|
|
|
p << " : " << getTagMemRef().getType();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2018-10-18 11:14:26 -07:00
|
|
|
// Parse DmaWaitOp.
|
|
|
|
// Eg:
|
|
|
|
// dma_wait %tag[%index], %num_elements : memref<1 x i32, (d0) -> (d0), 4>
|
|
|
|
//
|
2019-09-20 19:47:05 -07:00
|
|
|
ParseResult DmaWaitOp::parse(OpAsmParser &parser, OperationState &result) {
|
2018-10-09 15:04:27 -07:00
|
|
|
OpAsmParser::OperandType tagMemrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 2> tagIndexInfos;
|
2018-10-30 14:59:22 -07:00
|
|
|
Type type;
|
2019-09-20 11:36:49 -07:00
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
2018-10-18 11:14:26 -07:00
|
|
|
OpAsmParser::OperandType numElementsInfo;
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2018-10-18 11:14:26 -07:00
|
|
|
// Parse tag memref, its indices, and dma size.
|
2019-09-20 11:36:49 -07:00
|
|
|
if (parser.parseOperand(tagMemrefInfo) ||
|
|
|
|
parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseOperand(numElementsInfo) ||
|
|
|
|
parser.parseColonType(type) ||
|
2019-09-20 19:47:05 -07:00
|
|
|
parser.resolveOperand(tagMemrefInfo, type, result.operands) ||
|
|
|
|
parser.resolveOperands(tagIndexInfos, indexType, result.operands) ||
|
|
|
|
parser.resolveOperand(numElementsInfo, indexType, result.operands))
|
2019-05-06 22:01:31 -07:00
|
|
|
return failure();
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-08-16 19:21:50 -07:00
|
|
|
auto memrefType = type.dyn_cast<MemRefType>();
|
|
|
|
if (!memrefType)
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"expected tag to be of memref type");
|
2019-01-15 10:04:28 -08:00
|
|
|
|
2019-08-16 19:21:50 -07:00
|
|
|
if (static_cast<int64_t>(tagIndexInfos.size()) != memrefType.getRank())
|
2019-09-20 11:36:49 -07:00
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"tag memref rank not equal to indices count");
|
2018-10-09 15:04:27 -07:00
|
|
|
|
2019-05-06 22:01:31 -07:00
|
|
|
return success();
|
2018-10-09 15:04:27 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// dma_wait(memrefcast) -> dma_wait
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-08-23 09:58:23 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ExtractElementOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-10 15:26:23 -07:00
|
|
|
static LogicalResult verify(ExtractElementOp op) {
|
2018-08-23 09:58:23 -07:00
|
|
|
// Verify the # indices match if we have a ranked type.
|
2020-02-19 10:18:28 -08:00
|
|
|
auto aggregateType = op.getAggregate().getType().cast<ShapedType>();
|
2019-05-29 17:15:51 -07:00
|
|
|
if (aggregateType.hasRank() &&
|
|
|
|
aggregateType.getRank() != op.getNumOperands() - 1)
|
2019-05-10 15:26:23 -07:00
|
|
|
return op.emitOpError("incorrect number of indices for extract_element");
|
2018-08-23 09:58:23 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-08-23 09:58:23 -07:00
|
|
|
}
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult ExtractElementOp::fold(ArrayRef<Attribute> operands) {
|
2019-10-04 04:37:14 -07:00
|
|
|
assert(!operands.empty() && "extract_element takes at least one operand");
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// The aggregate operand must be a known constant.
|
|
|
|
Attribute aggregate = operands.front();
|
|
|
|
if (!aggregate)
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// If this is a splat elements attribute, simply return the value. All of the
|
|
|
|
// elements of a splat attribute are the same.
|
|
|
|
if (auto splatAggregate = aggregate.dyn_cast<SplatElementsAttr>())
|
2019-06-18 18:26:26 -07:00
|
|
|
return splatAggregate.getSplatValue();
|
2019-01-19 20:54:09 -08:00
|
|
|
|
|
|
|
// Otherwise, collect the constant indices into the aggregate.
|
|
|
|
SmallVector<uint64_t, 8> indices;
|
|
|
|
for (Attribute indice : llvm::drop_begin(operands, 1)) {
|
|
|
|
if (!indice || !indice.isa<IntegerAttr>())
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
indices.push_back(indice.cast<IntegerAttr>().getInt());
|
|
|
|
}
|
|
|
|
|
2019-02-27 16:15:16 -08:00
|
|
|
// If this is an elements attribute, query the value at the given indices.
|
2019-08-14 15:03:25 -07:00
|
|
|
auto elementsAttr = aggregate.dyn_cast<ElementsAttr>();
|
|
|
|
if (elementsAttr && elementsAttr.isValidIndex(indices))
|
2019-02-27 16:15:16 -08:00
|
|
|
return elementsAttr.getValue(indices);
|
2019-05-16 12:51:45 -07:00
|
|
|
return {};
|
2019-01-19 20:54:09 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FPExtOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPExtOp::areCastCompatible(Type a, Type b) {
|
|
|
|
if (auto fa = a.dyn_cast<FloatType>())
|
|
|
|
if (auto fb = b.dyn_cast<FloatType>())
|
|
|
|
return fa.getWidth() < fb.getWidth();
|
|
|
|
if (auto va = a.dyn_cast<VectorType>())
|
|
|
|
if (auto vb = b.dyn_cast<VectorType>())
|
|
|
|
return va.getShape().equals(vb.getShape()) &&
|
|
|
|
areCastCompatible(va.getElementType(), vb.getElementType());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FPTruncOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
bool FPTruncOp::areCastCompatible(Type a, Type b) {
|
|
|
|
if (auto fa = a.dyn_cast<FloatType>())
|
|
|
|
if (auto fb = b.dyn_cast<FloatType>())
|
|
|
|
return fa.getWidth() > fb.getWidth();
|
|
|
|
if (auto va = a.dyn_cast<VectorType>())
|
|
|
|
if (auto vb = b.dyn_cast<VectorType>())
|
|
|
|
return va.getShape().equals(vb.getShape()) &&
|
|
|
|
areCastCompatible(va.getElementType(), vb.getElementType());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-17 11:35:05 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// IndexCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Index cast is applicable from index to integer and backwards.
|
|
|
|
bool IndexCastOp::areCastCompatible(Type a, Type b) {
|
2020-01-10 14:48:24 -05:00
|
|
|
return (a.isIndex() && b.isSignlessInteger()) ||
|
|
|
|
(a.isSignlessInteger() && b.isIndex());
|
2019-06-17 11:35:05 -07:00
|
|
|
}
|
|
|
|
|
2019-12-04 16:15:10 -08:00
|
|
|
OpFoldResult IndexCastOp::fold(ArrayRef<Attribute> cstOperands) {
|
|
|
|
// Fold IndexCast(IndexCast(x)) -> x
|
|
|
|
auto cast = dyn_cast_or_null<IndexCastOp>(getOperand().getDefiningOp());
|
|
|
|
if (cast && cast.getOperand().getType() == getType())
|
|
|
|
return cast.getOperand();
|
2019-12-10 11:59:13 -08:00
|
|
|
|
|
|
|
// Fold IndexCast(constant) -> constant
|
|
|
|
// A little hack because we go through int. Otherwise, the size
|
|
|
|
// of the constant might need to change.
|
|
|
|
if (auto value = cstOperands[0].dyn_cast_or_null<IntegerAttr>())
|
|
|
|
return IntegerAttr::get(getType(), value.getInt());
|
|
|
|
|
2019-12-04 16:15:10 -08:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoadOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-24 18:01:38 -07:00
|
|
|
static LogicalResult verify(LoadOp op) {
|
2019-09-27 11:57:52 -07:00
|
|
|
if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
|
2019-05-24 18:01:38 -07:00
|
|
|
return op.emitOpError("incorrect number of indices for load");
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-24 10:13:31 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// load(memrefcast) -> load
|
2019-12-13 14:52:39 -08:00
|
|
|
if (succeeded(foldMemRefCast(*this)))
|
|
|
|
return getResult();
|
|
|
|
return OpFoldResult();
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-22 09:00:03 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MemRefCastOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-04-27 20:55:38 -07:00
|
|
|
bool MemRefCastOp::areCastCompatible(Type a, Type b) {
|
|
|
|
auto aT = a.dyn_cast<MemRefType>();
|
|
|
|
auto bT = b.dyn_cast<MemRefType>();
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto uaT = a.dyn_cast<UnrankedMemRefType>();
|
|
|
|
auto ubT = b.dyn_cast<UnrankedMemRefType>();
|
|
|
|
|
|
|
|
if (aT && bT) {
|
|
|
|
if (aT.getElementType() != bT.getElementType())
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
if (aT.getAffineMaps() != bT.getAffineMaps()) {
|
|
|
|
int64_t aOffset, bOffset;
|
|
|
|
SmallVector<int64_t, 4> aStrides, bStrides;
|
|
|
|
if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
|
|
|
|
failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
|
|
|
|
aStrides.size() != bStrides.size())
|
|
|
|
return false;
|
2019-11-25 10:10:58 -08:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
// Strides along a dimension/offset are compatible if the value in the
|
|
|
|
// source memref is static and the value in the target memref is the
|
|
|
|
// same. They are also compatible if either one is dynamic (see
|
|
|
|
// description of MemRefCastOp for details).
|
|
|
|
auto checkCompatible = [](int64_t a, int64_t b) {
|
|
|
|
return (a == MemRefType::getDynamicStrideOrOffset() ||
|
|
|
|
b == MemRefType::getDynamicStrideOrOffset() || a == b);
|
|
|
|
};
|
|
|
|
if (!checkCompatible(aOffset, bOffset))
|
|
|
|
return false;
|
|
|
|
for (auto aStride : enumerate(aStrides))
|
|
|
|
if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (aT.getMemorySpace() != bT.getMemorySpace())
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
|
|
|
|
// They must have the same rank, and any specified dimensions must match.
|
|
|
|
if (aT.getRank() != bT.getRank())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
|
|
|
|
int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
|
|
|
|
if (aDim != -1 && bDim != -1 && aDim != bDim)
|
2019-11-25 10:10:58 -08:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
if (!aT && !uaT)
|
|
|
|
return false;
|
|
|
|
if (!bT && !ubT)
|
|
|
|
return false;
|
|
|
|
// Unranked to unranked casting is unsupported
|
|
|
|
if (uaT && ubT)
|
|
|
|
return false;
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
|
|
|
|
auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
|
|
|
|
if (aEltType != bEltType)
|
|
|
|
return false;
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
|
|
|
|
auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
|
|
|
|
if (aMemSpace != bMemSpace)
|
2019-04-27 20:55:38 -07:00
|
|
|
return false;
|
2019-12-05 13:12:50 -08:00
|
|
|
|
|
|
|
return true;
|
2019-04-27 20:55:38 -07:00
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-12-05 13:12:50 -08:00
|
|
|
return false;
|
2019-04-27 20:55:38 -07:00
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MemRefCastOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return impl::foldCastOp(*this);
|
|
|
|
}
|
2018-10-22 09:00:03 -07:00
|
|
|
|
2018-09-26 10:07:16 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MulFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MulFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a * b; });
|
2018-09-26 10:07:16 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MulIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult MulIOp::fold(ArrayRef<Attribute> operands) {
|
2019-02-07 08:26:31 -08:00
|
|
|
/// muli(x, 0) -> 0
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return rhs();
|
2019-02-07 08:26:31 -08:00
|
|
|
/// muli(x, 1) -> x
|
2019-05-16 12:51:45 -07:00
|
|
|
if (matchPattern(rhs(), m_One()))
|
2019-02-07 08:26:31 -08:00
|
|
|
return getOperand(0);
|
2019-05-16 12:51:45 -07:00
|
|
|
|
|
|
|
// TODO: Handle the overflow case.
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a * b; });
|
2018-10-26 11:28:06 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// OrOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult OrOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// or(x, 0) -> x
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
|
|
|
/// or(x,x) -> x
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return rhs();
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a | b; });
|
|
|
|
}
|
|
|
|
|
2019-12-18 09:59:37 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// PrefetchOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, PrefetchOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
p << PrefetchOp::getOperationName() << " " << op.memref() << '[';
|
2019-12-18 09:59:37 -08:00
|
|
|
p.printOperands(op.indices());
|
|
|
|
p << ']' << ", " << (op.isWrite() ? "write" : "read");
|
|
|
|
p << ", locality<" << op.localityHint();
|
|
|
|
p << ">, " << (op.isDataCache() ? "data" : "instr");
|
|
|
|
p.printOptionalAttrDict(
|
|
|
|
op.getAttrs(),
|
|
|
|
/*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
|
|
|
|
p << " : " << op.getMemRefType();
|
|
|
|
}
|
|
|
|
|
|
|
|
static ParseResult parsePrefetchOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
|
|
|
OpAsmParser::OperandType memrefInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> indexInfo;
|
|
|
|
IntegerAttr localityHint;
|
|
|
|
MemRefType type;
|
|
|
|
StringRef readOrWrite, cacheType;
|
|
|
|
|
|
|
|
auto indexTy = parser.getBuilder().getIndexType();
|
|
|
|
auto i32Type = parser.getBuilder().getIntegerType(32);
|
|
|
|
if (parser.parseOperand(memrefInfo) ||
|
|
|
|
parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
|
|
|
|
parser.parseComma() || parser.parseKeyword("locality") ||
|
|
|
|
parser.parseLess() ||
|
|
|
|
parser.parseAttribute(localityHint, i32Type, "localityHint",
|
|
|
|
result.attributes) ||
|
|
|
|
parser.parseGreater() || parser.parseComma() ||
|
|
|
|
parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
|
|
|
|
parser.resolveOperand(memrefInfo, type, result.operands) ||
|
|
|
|
parser.resolveOperands(indexInfo, indexTy, result.operands))
|
|
|
|
return failure();
|
|
|
|
|
|
|
|
if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"rw specifier has to be 'read' or 'write'");
|
|
|
|
result.addAttribute(
|
|
|
|
PrefetchOp::getIsWriteAttrName(),
|
|
|
|
parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
|
|
|
|
|
|
|
|
if (!cacheType.equals("data") && !cacheType.equals("instr"))
|
|
|
|
return parser.emitError(parser.getNameLoc(),
|
|
|
|
"cache type has to be 'data' or 'instr'");
|
|
|
|
|
|
|
|
result.addAttribute(
|
|
|
|
PrefetchOp::getIsDataCacheAttrName(),
|
|
|
|
parser.getBuilder().getBoolAttr(cacheType.equals("data")));
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verify(PrefetchOp op) {
|
|
|
|
if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
|
|
|
|
return op.emitOpError("too few indices");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
// prefetch(memrefcast) -> prefetch
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
|
2019-05-29 09:22:30 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// RankOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
// Constant fold rank when the rank of the tensor is known.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto type = getOperand().getType();
|
2019-08-16 19:21:50 -07:00
|
|
|
if (auto tensorType = type.dyn_cast<RankedTensorType>())
|
|
|
|
return IntegerAttr::get(IndexType::get(getContext()), tensorType.getRank());
|
2019-05-29 09:22:30 -07:00
|
|
|
return IntegerAttr();
|
|
|
|
}
|
|
|
|
|
2018-11-28 07:08:55 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-03-01 16:58:00 -08:00
|
|
|
// ReturnOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-13 11:53:14 -07:00
|
|
|
static LogicalResult verify(ReturnOp op) {
|
2019-07-29 10:45:17 -07:00
|
|
|
auto function = cast<FuncOp>(op.getParentOp());
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
// The operand number and types must match the function signature.
|
2019-07-01 10:29:09 -07:00
|
|
|
const auto &results = function.getType().getResults();
|
2019-05-13 11:53:14 -07:00
|
|
|
if (op.getNumOperands() != results.size())
|
|
|
|
return op.emitOpError("has ")
|
|
|
|
<< op.getNumOperands()
|
|
|
|
<< " operands, but enclosing function returns " << results.size();
|
2019-03-01 16:58:00 -08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = results.size(); i != e; ++i)
|
2020-01-11 08:54:04 -08:00
|
|
|
if (op.getOperand(i).getType() != results[i])
|
2019-05-16 14:12:18 -07:00
|
|
|
return op.emitError()
|
|
|
|
<< "type of return operand " << i << " ("
|
2020-01-11 08:54:04 -08:00
|
|
|
<< op.getOperand(i).getType()
|
2019-05-16 14:12:18 -07:00
|
|
|
<< ") doesn't match function result type (" << results[i] << ")";
|
2019-03-01 16:58:00 -08:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2019-03-01 16:58:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-11-28 07:08:55 -08:00
|
|
|
// SelectOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2018-11-28 15:09:39 -08:00
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SelectOp::fold(ArrayRef<Attribute> operands) {
|
2019-12-22 21:59:55 -08:00
|
|
|
auto condition = getCondition();
|
2018-11-28 07:08:55 -08:00
|
|
|
|
|
|
|
// select true, %0, %1 => %0
|
2019-02-07 08:26:31 -08:00
|
|
|
if (matchPattern(condition, m_One()))
|
|
|
|
return getTrueValue();
|
2018-11-28 07:08:55 -08:00
|
|
|
|
2019-02-07 08:26:31 -08:00
|
|
|
// select false, %0, %1 => %1
|
|
|
|
if (matchPattern(condition, m_Zero()))
|
|
|
|
return getFalseValue();
|
|
|
|
return nullptr;
|
2018-11-28 07:08:55 -08:00
|
|
|
}
|
|
|
|
|
2019-09-21 16:14:07 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignExtendIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(SignExtendIOp op) {
|
|
|
|
// Get the scalar type (which is either directly the type of the operand
|
|
|
|
// or the vector's/tensor's element type.
|
2020-01-11 08:54:04 -08:00
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
2019-09-21 16:14:07 -07:00
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
|
|
|
|
|
|
|
// For now, index is forbidden for the source and the destination type.
|
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
|
|
|
|
|
|
|
if (srcType.cast<IntegerType>().getWidth() >=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("result type ")
|
|
|
|
<< dstType << " must be wider than operand type " << srcType;
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-09-24 12:44:11 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// SignedDivIOp
|
2019-09-24 12:44:11 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult SignedDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Don't fold if it would overflow or if it requires a division by zero.
|
|
|
|
bool overflowOrDiv0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (overflowOrDiv0 || !b) {
|
|
|
|
overflowOrDiv0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
return a.sdiv_ov(b, overflowOrDiv0);
|
|
|
|
});
|
|
|
|
return overflowOrDiv0 ? Attribute() : result;
|
2019-09-24 12:44:11 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SignedRemIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult SignedRemIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "remi_signed takes two operands");
|
|
|
|
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!rhs)
|
2019-09-24 12:44:11 -07:00
|
|
|
return {};
|
2020-03-04 09:44:36 -08:00
|
|
|
auto rhsValue = rhs.getValue();
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// x % 1 = 0
|
|
|
|
if (rhsValue.isOneValue())
|
|
|
|
return IntegerAttr::get(rhs.getType(), APInt(rhsValue.getBitWidth(), 0));
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Don't fold if it requires division by zero.
|
|
|
|
if (rhsValue.isNullValue())
|
|
|
|
return {};
|
2019-09-24 12:44:11 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs)
|
|
|
|
return {};
|
|
|
|
return IntegerAttr::get(lhs.getType(), lhs.getValue().srem(rhsValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SIToFPOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// sitofp is applicable from integer types to float types.
|
|
|
|
bool SIToFPOp::areCastCompatible(Type a, Type b) {
|
|
|
|
return a.isSignlessInteger() && b.isa<FloatType>();
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SplatOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(SplatOp op) {
|
|
|
|
// TODO: we could replace this by a trait.
|
|
|
|
if (op.getOperand().getType() !=
|
|
|
|
op.getType().cast<ShapedType>().getElementType())
|
|
|
|
return op.emitError("operand should be of elemental type of result type");
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Constant folding hook for SplatOp.
|
|
|
|
OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 1 && "splat takes one operand");
|
|
|
|
|
|
|
|
auto constOperand = operands.front();
|
|
|
|
if (!constOperand ||
|
|
|
|
(!constOperand.isa<IntegerAttr>() && !constOperand.isa<FloatAttr>()))
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto shapedType = getType().cast<ShapedType>();
|
|
|
|
assert(shapedType.getElementType() == constOperand.getType() &&
|
|
|
|
"incorrect input attribute type for folding");
|
|
|
|
|
|
|
|
// SplatElementsAttr::get treats single value for second arg as being a splat.
|
|
|
|
return SplatElementsAttr::get(shapedType, {constOperand});
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// StoreOp
|
2018-08-09 12:28:58 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-24 18:01:38 -07:00
|
|
|
static LogicalResult verify(StoreOp op) {
|
|
|
|
if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
|
|
|
|
return op.emitOpError("store index operand count not equal to memref rank");
|
2018-07-31 14:11:38 -07:00
|
|
|
|
2019-04-02 13:09:34 -07:00
|
|
|
return success();
|
2018-07-31 14:11:38 -07:00
|
|
|
}
|
|
|
|
|
2019-12-13 14:52:39 -08:00
|
|
|
LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
2018-10-25 16:44:04 -07:00
|
|
|
/// store(memrefcast) -> store
|
2019-12-13 14:52:39 -08:00
|
|
|
return foldMemRefCast(*this);
|
2018-10-25 16:44:04 -07:00
|
|
|
}
|
|
|
|
|
2018-10-03 09:43:13 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubFOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SubFOp::fold(ArrayRef<Attribute> operands) {
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<FloatAttr>(
|
|
|
|
operands, [](APFloat a, APFloat b) { return a - b; });
|
2018-10-03 09:43:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// SubIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-16 12:51:45 -07:00
|
|
|
OpFoldResult SubIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
// subi(x,x) -> 0
|
|
|
|
if (getOperand(0) == getOperand(1))
|
|
|
|
return Builder(getContext()).getZeroAttr(getType());
|
|
|
|
|
2019-01-11 09:12:11 -08:00
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a - b; });
|
2018-10-03 09:43:13 -07:00
|
|
|
}
|
2018-10-25 16:44:04 -07:00
|
|
|
|
2019-04-08 00:00:46 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// SubViewOp
|
2018-10-25 16:44:04 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Returns a MemRefType with dynamic sizes and offset and the same stride as the
|
|
|
|
// `memRefType` passed as argument.
|
|
|
|
// TODO(andydavis,ntv) Evolve to a more powerful inference that can also keep
|
|
|
|
// sizes and offset static.
|
|
|
|
static Type inferSubViewResultType(MemRefType memRefType) {
|
|
|
|
auto rank = memRefType.getRank();
|
|
|
|
int64_t offset;
|
|
|
|
SmallVector<int64_t, 4> strides;
|
|
|
|
auto res = getStridesAndOffset(memRefType, strides, offset);
|
|
|
|
assert(succeeded(res) && "SubViewOp expected strided memref type");
|
|
|
|
(void)res;
|
2019-04-27 20:55:38 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Assume sizes and offset are fully dynamic for now until canonicalization
|
|
|
|
// occurs on the ranges. Typed strides don't change though.
|
|
|
|
offset = MemRefType::getDynamicStrideOrOffset();
|
|
|
|
// Overwrite strides because verifier will not pass.
|
|
|
|
// TODO(b/144419106): don't force degrade the strides to fully dynamic.
|
|
|
|
for (auto &stride : strides)
|
|
|
|
stride = MemRefType::getDynamicStrideOrOffset();
|
|
|
|
auto stridedLayout =
|
|
|
|
makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
|
|
|
|
SmallVector<int64_t, 4> sizes(rank, ShapedType::kDynamicSize);
|
|
|
|
return MemRefType::Builder(memRefType)
|
|
|
|
.setShape(sizes)
|
|
|
|
.setAffineMaps(stridedLayout);
|
2019-05-16 12:51:45 -07:00
|
|
|
}
|
2019-03-20 17:25:34 -07:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void mlir::SubViewOp::build(Builder *b, OperationState &result, Value source,
|
|
|
|
ValueRange offsets, ValueRange sizes,
|
|
|
|
ValueRange strides, Type resultType,
|
|
|
|
ArrayRef<NamedAttribute> attrs) {
|
|
|
|
if (!resultType)
|
|
|
|
resultType = inferSubViewResultType(source.getType().cast<MemRefType>());
|
2020-03-05 12:41:56 -08:00
|
|
|
build(b, result, resultType, source, offsets, sizes, strides);
|
2020-03-04 09:44:36 -08:00
|
|
|
result.addAttributes(attrs);
|
2019-08-28 11:25:19 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void mlir::SubViewOp::build(Builder *b, OperationState &result, Type resultType,
|
|
|
|
Value source) {
|
|
|
|
build(b, result, source, /*offsets=*/{}, /*sizes=*/{}, /*strides=*/{},
|
|
|
|
resultType);
|
2019-09-21 16:14:07 -07:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static LogicalResult verify(SubViewOp op) {
|
|
|
|
auto baseType = op.getBaseMemRefType().cast<MemRefType>();
|
|
|
|
auto subViewType = op.getType();
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// The rank of the base and result subview must match.
|
|
|
|
if (baseType.getRank() != subViewType.getRank()) {
|
|
|
|
return op.emitError(
|
|
|
|
"expected rank of result type to match rank of base type ");
|
2019-11-11 10:32:52 -08:00
|
|
|
}
|
2019-11-06 08:53:39 -08:00
|
|
|
|
|
|
|
// The base memref and the view memref should be in the same memory space.
|
2020-03-04 09:44:36 -08:00
|
|
|
if (baseType.getMemorySpace() != subViewType.getMemorySpace())
|
2019-11-06 08:53:39 -08:00
|
|
|
return op.emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
2020-03-04 09:44:36 -08:00
|
|
|
<< baseType << " and subview memref type " << subViewType;
|
|
|
|
|
|
|
|
// Verify that the base memref type has a strided layout map.
|
|
|
|
int64_t baseOffset;
|
|
|
|
SmallVector<int64_t, 4> baseStrides;
|
|
|
|
if (failed(getStridesAndOffset(baseType, baseStrides, baseOffset)))
|
|
|
|
return op.emitError("base type ") << subViewType << " is not strided";
|
2019-11-06 08:53:39 -08:00
|
|
|
|
2019-11-11 10:32:52 -08:00
|
|
|
// Verify that the result memref type has a strided layout map.
|
2020-03-04 09:44:36 -08:00
|
|
|
int64_t subViewOffset;
|
|
|
|
SmallVector<int64_t, 4> subViewStrides;
|
|
|
|
if (failed(getStridesAndOffset(subViewType, subViewStrides, subViewOffset)))
|
|
|
|
return op.emitError("result type ") << subViewType << " is not strided";
|
2019-11-06 08:53:39 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Num offsets should either be zero or rank of memref.
|
|
|
|
if (op.getNumOffsets() != 0 && op.getNumOffsets() != subViewType.getRank()) {
|
|
|
|
return op.emitError("expected number of dynamic offsets specified to match "
|
|
|
|
"the rank of the result type ")
|
|
|
|
<< subViewType;
|
|
|
|
}
|
2019-11-06 11:25:16 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Num sizes should either be zero or rank of memref.
|
|
|
|
if (op.getNumSizes() != 0 && op.getNumSizes() != subViewType.getRank()) {
|
|
|
|
return op.emitError("expected number of dynamic sizes specified to match "
|
|
|
|
"the rank of the result type ")
|
|
|
|
<< subViewType;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Num strides should either be zero or rank of memref.
|
|
|
|
if (op.getNumStrides() != 0 && op.getNumStrides() != subViewType.getRank()) {
|
|
|
|
return op.emitError("expected number of dynamic strides specified to match "
|
|
|
|
"the rank of the result type ")
|
|
|
|
<< subViewType;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that if the shape of the subview type is static, then sizes are not
|
|
|
|
// dynamic values, and vice versa.
|
|
|
|
if ((subViewType.hasStaticShape() && op.getNumSizes() != 0) ||
|
|
|
|
(op.getNumSizes() == 0 && !subViewType.hasStaticShape())) {
|
|
|
|
return op.emitError("invalid to specify dynamic sizes when subview result "
|
|
|
|
"type is statically shaped and viceversa");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that if dynamic sizes are specified, then the result memref type
|
|
|
|
// have full dynamic dimensions.
|
|
|
|
if (op.getNumSizes() > 0) {
|
|
|
|
if (llvm::any_of(subViewType.getShape(), [](int64_t dim) {
|
|
|
|
return dim != ShapedType::kDynamicSize;
|
|
|
|
})) {
|
|
|
|
// TODO: This is based on the assumption that number of size arguments are
|
|
|
|
// either 0, or the rank of the result type. It is possible to have more
|
|
|
|
// fine-grained verification where only particular dimensions are
|
|
|
|
// dynamic. That probably needs further changes to the shape op
|
|
|
|
// specification.
|
|
|
|
return op.emitError("expected shape of result type to be fully dynamic "
|
|
|
|
"when sizes are specified");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that if dynamic offsets are specified or base memref has dynamic
|
|
|
|
// offset or base memref has dynamic strides, then the subview offset is
|
|
|
|
// dynamic.
|
|
|
|
if ((op.getNumOffsets() > 0 ||
|
|
|
|
baseOffset == MemRefType::getDynamicStrideOrOffset() ||
|
|
|
|
llvm::is_contained(baseStrides,
|
|
|
|
MemRefType::getDynamicStrideOrOffset())) &&
|
|
|
|
subViewOffset != MemRefType::getDynamicStrideOrOffset()) {
|
|
|
|
return op.emitError(
|
|
|
|
"expected result memref layout map to have dynamic offset");
|
|
|
|
}
|
|
|
|
|
|
|
|
// For now, verify that if dynamic strides are specified, then all the result
|
|
|
|
// memref type have dynamic strides.
|
|
|
|
if (op.getNumStrides() > 0) {
|
|
|
|
if (llvm::any_of(subViewStrides, [](int64_t stride) {
|
|
|
|
return stride != MemRefType::getDynamicStrideOrOffset();
|
|
|
|
})) {
|
|
|
|
return op.emitError("expected result type to have dynamic strides");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any of the base memref has dynamic stride, then the corresponding
|
|
|
|
// stride of the subview must also have dynamic stride.
|
|
|
|
assert(baseStrides.size() == subViewStrides.size());
|
|
|
|
for (auto stride : enumerate(baseStrides)) {
|
|
|
|
if (stride.value() == MemRefType::getDynamicStrideOrOffset() &&
|
|
|
|
subViewStrides[stride.index()] !=
|
|
|
|
MemRefType::getDynamicStrideOrOffset()) {
|
|
|
|
return op.emitError(
|
|
|
|
"expected result type to have dynamic stride along a dimension if "
|
|
|
|
"the base memref type has dynamic stride along that dimension");
|
|
|
|
}
|
|
|
|
}
|
2019-11-06 08:53:39 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
raw_ostream &mlir::operator<<(raw_ostream &os, SubViewOp::Range &range) {
|
|
|
|
return os << "range " << range.offset << ":" << range.size << ":"
|
|
|
|
<< range.stride;
|
|
|
|
}
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
SmallVector<SubViewOp::Range, 8> SubViewOp::getRanges() {
|
|
|
|
SmallVector<Range, 8> res;
|
|
|
|
unsigned rank = getType().getRank();
|
|
|
|
res.reserve(rank);
|
|
|
|
for (unsigned i = 0; i < rank; ++i)
|
|
|
|
res.emplace_back(Range{*(offsets().begin() + i), *(sizes().begin() + i),
|
|
|
|
*(strides().begin() + i)});
|
|
|
|
return res;
|
|
|
|
}
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
LogicalResult
|
|
|
|
SubViewOp::getStaticStrides(SmallVectorImpl<int64_t> &staticStrides) {
|
|
|
|
// If the strides are dynamic return failure.
|
|
|
|
if (getNumStrides())
|
|
|
|
return failure();
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// When static, the stride operands can be retrieved by taking the strides of
|
|
|
|
// the result of the subview op, and dividing the strides of the base memref.
|
|
|
|
int64_t resultOffset, baseOffset;
|
|
|
|
SmallVector<int64_t, 2> resultStrides, baseStrides;
|
|
|
|
if (failed(
|
|
|
|
getStridesAndOffset(getBaseMemRefType(), baseStrides, baseOffset)) ||
|
|
|
|
llvm::is_contained(baseStrides, MemRefType::getDynamicStrideOrOffset()) ||
|
|
|
|
failed(getStridesAndOffset(getType(), resultStrides, resultOffset)))
|
|
|
|
return failure();
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
assert(static_cast<int64_t>(resultStrides.size()) == getType().getRank() &&
|
|
|
|
baseStrides.size() == resultStrides.size() &&
|
|
|
|
"base and result memrefs must have the same rank");
|
|
|
|
assert(!llvm::is_contained(resultStrides,
|
|
|
|
MemRefType::getDynamicStrideOrOffset()) &&
|
|
|
|
"strides of subview op must be static, when there are no dynamic "
|
|
|
|
"strides specified");
|
|
|
|
staticStrides.resize(getType().getRank());
|
|
|
|
for (auto resultStride : enumerate(resultStrides)) {
|
|
|
|
auto baseStride = baseStrides[resultStride.index()];
|
|
|
|
// The result stride is expected to be a multiple of the base stride. Abort
|
|
|
|
// if that is not the case.
|
|
|
|
if (resultStride.value() < baseStride ||
|
|
|
|
resultStride.value() % baseStride != 0)
|
|
|
|
return failure();
|
|
|
|
staticStrides[resultStride.index()] = resultStride.value() / baseStride;
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
namespace {
|
2019-11-07 10:19:54 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
/// Pattern to rewrite a subview op with constant size arguments.
|
|
|
|
class SubViewOpShapeFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
MemRefType subViewType = subViewOp.getType();
|
|
|
|
// Follow all or nothing approach for shapes for now. If all the operands
|
|
|
|
// for sizes are constants then fold it into the type of the result memref.
|
|
|
|
if (subViewType.hasStaticShape() ||
|
|
|
|
llvm::any_of(subViewOp.sizes(), [](Value operand) {
|
|
|
|
return !matchPattern(operand, m_ConstantIndex());
|
|
|
|
})) {
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
2020-03-04 09:44:36 -08:00
|
|
|
SmallVector<int64_t, 4> staticShape(subViewOp.getNumSizes());
|
|
|
|
for (auto size : llvm::enumerate(subViewOp.sizes())) {
|
|
|
|
auto defOp = size.value().getDefiningOp();
|
|
|
|
assert(defOp);
|
|
|
|
staticShape[size.index()] = cast<ConstantIndexOp>(defOp).getValue();
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
2020-03-04 09:44:36 -08:00
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(subViewType).setShape(staticShape);
|
|
|
|
auto newSubViewOp = rewriter.create<SubViewOp>(
|
|
|
|
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
|
|
|
|
ArrayRef<Value>(), subViewOp.strides(), newMemRefType);
|
|
|
|
// Insert a memref_cast for compatibility of the uses of the op.
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(subViewOp, newSubViewOp,
|
|
|
|
subViewOp.getType());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
};
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Pattern to rewrite a subview op with constant stride arguments.
|
|
|
|
class SubViewOpStrideFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
if (subViewOp.getNumStrides() == 0) {
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
// Follow all or nothing approach for strides for now. If all the operands
|
|
|
|
// for strides are constants then fold it into the strides of the result
|
|
|
|
// memref.
|
|
|
|
int64_t baseOffset, resultOffset;
|
|
|
|
SmallVector<int64_t, 4> baseStrides, resultStrides;
|
|
|
|
MemRefType subViewType = subViewOp.getType();
|
|
|
|
if (failed(getStridesAndOffset(subViewOp.getBaseMemRefType(), baseStrides,
|
|
|
|
baseOffset)) ||
|
|
|
|
failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) ||
|
|
|
|
llvm::is_contained(baseStrides,
|
|
|
|
MemRefType::getDynamicStrideOrOffset()) ||
|
|
|
|
llvm::any_of(subViewOp.strides(), [](Value stride) {
|
|
|
|
return !matchPattern(stride, m_ConstantIndex());
|
|
|
|
})) {
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
2019-11-07 08:04:33 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
SmallVector<int64_t, 4> staticStrides(subViewOp.getNumStrides());
|
|
|
|
for (auto stride : llvm::enumerate(subViewOp.strides())) {
|
|
|
|
auto defOp = stride.value().getDefiningOp();
|
|
|
|
assert(defOp);
|
|
|
|
assert(baseStrides[stride.index()] > 0);
|
|
|
|
staticStrides[stride.index()] =
|
|
|
|
cast<ConstantIndexOp>(defOp).getValue() * baseStrides[stride.index()];
|
|
|
|
}
|
|
|
|
AffineMap layoutMap = makeStridedLinearLayoutMap(
|
|
|
|
staticStrides, resultOffset, rewriter.getContext());
|
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(subViewType).setAffineMaps(layoutMap);
|
|
|
|
auto newSubViewOp = rewriter.create<SubViewOp>(
|
|
|
|
subViewOp.getLoc(), subViewOp.source(), subViewOp.offsets(),
|
|
|
|
subViewOp.sizes(), ArrayRef<Value>(), newMemRefType);
|
|
|
|
// Insert a memref_cast for compatibility of the uses of the op.
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(subViewOp, newSubViewOp,
|
|
|
|
subViewOp.getType());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Pattern to rewrite a subview op with constant offset arguments.
|
|
|
|
class SubViewOpOffsetFolder final : public OpRewritePattern<SubViewOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern<SubViewOp>::OpRewritePattern;
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-07 17:46:40 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(SubViewOp subViewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
if (subViewOp.getNumOffsets() == 0) {
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
// Follow all or nothing approach for offsets for now. If all the operands
|
|
|
|
// for offsets are constants then fold it into the offset of the result
|
|
|
|
// memref.
|
|
|
|
int64_t baseOffset, resultOffset;
|
|
|
|
SmallVector<int64_t, 4> baseStrides, resultStrides;
|
|
|
|
MemRefType subViewType = subViewOp.getType();
|
|
|
|
if (failed(getStridesAndOffset(subViewOp.getBaseMemRefType(), baseStrides,
|
|
|
|
baseOffset)) ||
|
|
|
|
failed(getStridesAndOffset(subViewType, resultStrides, resultOffset)) ||
|
|
|
|
llvm::is_contained(baseStrides,
|
|
|
|
MemRefType::getDynamicStrideOrOffset()) ||
|
|
|
|
baseOffset == MemRefType::getDynamicStrideOrOffset() ||
|
|
|
|
llvm::any_of(subViewOp.offsets(), [](Value stride) {
|
|
|
|
return !matchPattern(stride, m_ConstantIndex());
|
|
|
|
})) {
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
auto staticOffset = baseOffset;
|
|
|
|
for (auto offset : llvm::enumerate(subViewOp.offsets())) {
|
|
|
|
auto defOp = offset.value().getDefiningOp();
|
|
|
|
assert(defOp);
|
|
|
|
assert(baseStrides[offset.index()] > 0);
|
|
|
|
staticOffset +=
|
|
|
|
cast<ConstantIndexOp>(defOp).getValue() * baseStrides[offset.index()];
|
|
|
|
}
|
|
|
|
|
|
|
|
AffineMap layoutMap = makeStridedLinearLayoutMap(
|
|
|
|
resultStrides, staticOffset, rewriter.getContext());
|
|
|
|
MemRefType newMemRefType =
|
|
|
|
MemRefType::Builder(subViewType).setAffineMaps(layoutMap);
|
|
|
|
auto newSubViewOp = rewriter.create<SubViewOp>(
|
|
|
|
subViewOp.getLoc(), subViewOp.source(), ArrayRef<Value>(),
|
|
|
|
subViewOp.sizes(), subViewOp.strides(), newMemRefType);
|
|
|
|
// Insert a memref_cast for compatibility of the uses of the op.
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(subViewOp, newSubViewOp,
|
|
|
|
subViewOp.getType());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
Canonicalize static alloc followed by memref_cast and std.view
Summary: Rewrite alloc, memref_cast, std.view into allo, std.view by droping memref_cast.
Reviewers: nicolasvasilache
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72379
2020-01-07 17:46:40 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-07 08:04:33 -08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void SubViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<SubViewOpShapeFolder, SubViewOpStrideFolder,
|
|
|
|
SubViewOpOffsetFolder>(context);
|
2019-11-07 08:04:33 -08:00
|
|
|
}
|
|
|
|
|
2019-11-11 10:32:52 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2020-03-04 09:44:36 -08:00
|
|
|
// TensorCastOp
|
2019-11-11 10:32:52 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
bool TensorCastOp::areCastCompatible(Type a, Type b) {
|
|
|
|
auto aT = a.dyn_cast<TensorType>();
|
|
|
|
auto bT = b.dyn_cast<TensorType>();
|
|
|
|
if (!aT || !bT)
|
|
|
|
return false;
|
2019-11-13 12:09:40 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (aT.getElementType() != bT.getElementType())
|
|
|
|
return false;
|
2019-11-13 12:09:40 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
return succeeded(verifyCompatibleShape(aT, bT));
|
2019-11-20 11:16:37 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
OpFoldResult TensorCastOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
return impl::foldCastOp(*this);
|
2019-11-11 10:32:52 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Helpers for Tensor[Load|Store]Op
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-02 07:51:27 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static Type getTensorTypeFromMemRefType(Type type) {
|
|
|
|
if (auto memref = type.dyn_cast<MemRefType>())
|
|
|
|
return RankedTensorType::get(memref.getShape(), memref.getElementType());
|
|
|
|
return NoneType::get(type.getContext());
|
2019-11-11 10:32:52 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TruncateIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-02 07:51:27 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static LogicalResult verify(TruncateIOp op) {
|
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (srcType.cast<IntegerType>().getWidth() <=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("operand type ")
|
|
|
|
<< srcType << " must be wider than result type " << dstType;
|
2019-11-20 11:16:37 -08:00
|
|
|
|
2019-11-11 10:32:52 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnsignedDivIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult UnsignedDivIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "binary operation takes two operands");
|
|
|
|
|
|
|
|
// Don't fold if it would require a division by zero.
|
|
|
|
bool div0 = false;
|
|
|
|
auto result = constFoldBinaryOp<IntegerAttr>(operands, [&](APInt a, APInt b) {
|
|
|
|
if (div0 || !b) {
|
|
|
|
div0 = true;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
return a.udiv(b);
|
|
|
|
});
|
|
|
|
return div0 ? Attribute() : result;
|
2019-11-13 12:09:40 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnsignedRemIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult UnsignedRemIOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
assert(operands.size() == 2 && "remi_unsigned takes two operands");
|
|
|
|
|
|
|
|
auto rhs = operands.back().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!rhs)
|
|
|
|
return {};
|
|
|
|
auto rhsValue = rhs.getValue();
|
|
|
|
|
|
|
|
// x % 1 = 0
|
|
|
|
if (rhsValue.isOneValue())
|
|
|
|
return IntegerAttr::get(rhs.getType(), APInt(rhsValue.getBitWidth(), 0));
|
|
|
|
|
|
|
|
// Don't fold if it requires division by zero.
|
|
|
|
if (rhsValue.isNullValue())
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto lhs = operands.front().dyn_cast_or_null<IntegerAttr>();
|
|
|
|
if (!lhs)
|
|
|
|
return {};
|
|
|
|
return IntegerAttr::get(lhs.getType(), lhs.getValue().urem(rhsValue));
|
2019-11-13 12:09:40 -08:00
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ViewOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-03 16:05:46 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
|
|
|
|
OpAsmParser::OperandType srcInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
|
|
|
|
SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
|
|
|
|
auto indexType = parser.getBuilder().getIndexType();
|
|
|
|
Type srcType, dstType;
|
|
|
|
llvm::SMLoc offsetLoc;
|
|
|
|
if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
|
|
|
|
parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
|
2019-12-03 16:05:46 -08:00
|
|
|
return failure();
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
if (offsetInfo.size() > 1)
|
|
|
|
return parser.emitError(offsetLoc) << "expects 0 or 1 offset operand";
|
|
|
|
|
|
|
|
return failure(
|
|
|
|
parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColonType(srcType) ||
|
|
|
|
parser.resolveOperand(srcInfo, srcType, result.operands) ||
|
|
|
|
parser.resolveOperands(offsetInfo, indexType, result.operands) ||
|
|
|
|
parser.resolveOperands(sizesInfo, indexType, result.operands) ||
|
|
|
|
parser.parseKeywordType("to", dstType) ||
|
|
|
|
parser.addTypeToList(dstType, result.types));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void print(OpAsmPrinter &p, ViewOp op) {
|
|
|
|
p << op.getOperationName() << ' ' << op.getOperand(0) << '[';
|
|
|
|
auto dynamicOffset = op.getDynamicOffset();
|
|
|
|
if (dynamicOffset != nullptr)
|
|
|
|
p.printOperand(dynamicOffset);
|
|
|
|
p << "][" << op.getDynamicSizes() << ']';
|
|
|
|
p.printOptionalAttrDict(op.getAttrs());
|
|
|
|
p << " : " << op.getOperand(0).getType() << " to " << op.getType();
|
|
|
|
}
|
|
|
|
|
|
|
|
Value ViewOp::getDynamicOffset() {
|
|
|
|
int64_t offset;
|
|
|
|
SmallVector<int64_t, 4> strides;
|
|
|
|
auto result =
|
|
|
|
succeeded(mlir::getStridesAndOffset(getType(), strides, offset));
|
|
|
|
assert(result);
|
|
|
|
if (result && offset == MemRefType::getDynamicStrideOrOffset())
|
|
|
|
return getOperand(1);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult verifyDynamicStrides(MemRefType memrefType,
|
|
|
|
ArrayRef<int64_t> strides) {
|
|
|
|
unsigned rank = memrefType.getRank();
|
|
|
|
assert(rank == strides.size());
|
|
|
|
bool dynamicStrides = false;
|
|
|
|
for (int i = rank - 2; i >= 0; --i) {
|
|
|
|
// If size at dim 'i + 1' is dynamic, set the 'dynamicStrides' flag.
|
2020-04-08 12:31:48 +05:30
|
|
|
if (memrefType.isDynamicDim(i + 1))
|
2020-03-04 09:44:36 -08:00
|
|
|
dynamicStrides = true;
|
|
|
|
// If stride at dim 'i' is not dynamic, return error.
|
|
|
|
if (dynamicStrides && strides[i] != MemRefType::getDynamicStrideOrOffset())
|
2019-12-03 16:05:46 -08:00
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
static LogicalResult verify(ViewOp op) {
|
|
|
|
auto baseType = op.getOperand(0).getType().cast<MemRefType>();
|
|
|
|
auto viewType = op.getResult().getType().cast<MemRefType>();
|
[MLIR] Add std.assume_alignment op.
Reviewers: ftynse, nicolasvasilache, andydavis1
Subscribers: bixia, sanjoy.google, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74378
2020-02-10 19:44:42 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// The base memref should have identity layout map (or none).
|
|
|
|
if (baseType.getAffineMaps().size() > 1 ||
|
|
|
|
(baseType.getAffineMaps().size() == 1 &&
|
|
|
|
!baseType.getAffineMaps()[0].isIdentity()))
|
|
|
|
return op.emitError("unsupported map for base memref type ") << baseType;
|
|
|
|
|
|
|
|
// The base memref and the view memref should be in the same memory space.
|
|
|
|
if (baseType.getMemorySpace() != viewType.getMemorySpace())
|
|
|
|
return op.emitError("different memory spaces specified for base memref "
|
|
|
|
"type ")
|
|
|
|
<< baseType << " and view memref type " << viewType;
|
|
|
|
|
|
|
|
// Verify that the result memref type has a strided layout map.
|
|
|
|
int64_t offset;
|
|
|
|
SmallVector<int64_t, 4> strides;
|
|
|
|
if (failed(getStridesAndOffset(viewType, strides, offset)))
|
|
|
|
return op.emitError("result type ") << viewType << " is not strided";
|
|
|
|
|
|
|
|
// Verify that we have the correct number of operands for the result type.
|
|
|
|
unsigned memrefOperandCount = 1;
|
|
|
|
unsigned numDynamicDims = viewType.getNumDynamicDims();
|
|
|
|
unsigned dynamicOffsetCount =
|
|
|
|
offset == MemRefType::getDynamicStrideOrOffset() ? 1 : 0;
|
|
|
|
if (op.getNumOperands() !=
|
|
|
|
memrefOperandCount + numDynamicDims + dynamicOffsetCount)
|
|
|
|
return op.emitError("incorrect number of operands for type ") << viewType;
|
|
|
|
|
|
|
|
// Verify dynamic strides symbols were added to correct dimensions based
|
|
|
|
// on dynamic sizes.
|
|
|
|
if (failed(verifyDynamicStrides(viewType, strides)))
|
|
|
|
return op.emitError("incorrect dynamic strides in view memref type ")
|
|
|
|
<< viewType;
|
[MLIR] Add std.assume_alignment op.
Reviewers: ftynse, nicolasvasilache, andydavis1
Subscribers: bixia, sanjoy.google, mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74378
2020-02-10 19:44:42 -08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-11-14 12:22:28 -08:00
|
|
|
namespace {
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
// Return if none of the operands are constants.
|
|
|
|
if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
|
|
|
|
return matchPattern(operand, m_ConstantIndex());
|
|
|
|
}))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Get result memref type.
|
|
|
|
auto memrefType = viewOp.getType();
|
|
|
|
if (memrefType.getAffineMaps().size() > 1)
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
auto map = memrefType.getAffineMaps().empty()
|
|
|
|
? AffineMap::getMultiDimIdentityMap(memrefType.getRank(),
|
|
|
|
rewriter.getContext())
|
|
|
|
: memrefType.getAffineMaps()[0];
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Get offset from old memref view type 'memRefType'.
|
|
|
|
int64_t oldOffset;
|
|
|
|
SmallVector<int64_t, 4> oldStrides;
|
|
|
|
if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
SmallVector<Value, 4> newOperands;
|
|
|
|
|
|
|
|
// Fold dynamic offset operand if it is produced by a constant.
|
|
|
|
auto dynamicOffset = viewOp.getDynamicOffset();
|
|
|
|
int64_t newOffset = oldOffset;
|
|
|
|
unsigned dynamicOffsetOperandCount = 0;
|
|
|
|
if (dynamicOffset != nullptr) {
|
|
|
|
auto *defOp = dynamicOffset.getDefiningOp();
|
|
|
|
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
|
|
|
|
// Dynamic offset will be folded into the map.
|
|
|
|
newOffset = constantIndexOp.getValue();
|
|
|
|
} else {
|
|
|
|
// Unable to fold dynamic offset. Add it to 'newOperands' list.
|
|
|
|
newOperands.push_back(dynamicOffset);
|
|
|
|
dynamicOffsetOperandCount = 1;
|
|
|
|
}
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
// Fold any dynamic dim operands which are produced by a constant.
|
|
|
|
SmallVector<int64_t, 4> newShapeConstants;
|
|
|
|
newShapeConstants.reserve(memrefType.getRank());
|
|
|
|
|
|
|
|
unsigned dynamicDimPos = viewOp.getDynamicSizesOperandStart();
|
|
|
|
unsigned rank = memrefType.getRank();
|
|
|
|
for (unsigned dim = 0, e = rank; dim < e; ++dim) {
|
|
|
|
int64_t dimSize = memrefType.getDimSize(dim);
|
|
|
|
// If this is already static dimension, keep it.
|
|
|
|
if (!ShapedType::isDynamic(dimSize)) {
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto *defOp = viewOp.getOperand(dynamicDimPos).getDefiningOp();
|
|
|
|
if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
|
|
|
|
// Dynamic shape dimension will be folded.
|
|
|
|
newShapeConstants.push_back(constantIndexOp.getValue());
|
|
|
|
} else {
|
|
|
|
// Dynamic shape dimension not folded; copy operand from old memref.
|
|
|
|
newShapeConstants.push_back(dimSize);
|
|
|
|
newOperands.push_back(viewOp.getOperand(dynamicDimPos));
|
|
|
|
}
|
|
|
|
dynamicDimPos++;
|
2019-11-22 11:41:29 -08:00
|
|
|
}
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
// Compute new strides based on 'newShapeConstants'.
|
|
|
|
SmallVector<int64_t, 4> newStrides(rank);
|
|
|
|
newStrides[rank - 1] = 1;
|
|
|
|
bool dynamicStrides = false;
|
|
|
|
for (int i = rank - 2; i >= 0; --i) {
|
|
|
|
if (ShapedType::isDynamic(newShapeConstants[i + 1]))
|
|
|
|
dynamicStrides = true;
|
|
|
|
if (dynamicStrides)
|
|
|
|
newStrides[i] = MemRefType::getDynamicStrideOrOffset();
|
|
|
|
else
|
|
|
|
newStrides[i] = newShapeConstants[i + 1] * newStrides[i + 1];
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
2020-03-04 09:44:36 -08:00
|
|
|
|
|
|
|
// Regenerate strided layout map with 'newStrides' and 'newOffset'.
|
|
|
|
map = makeStridedLinearLayoutMap(newStrides, newOffset,
|
|
|
|
rewriter.getContext());
|
|
|
|
|
|
|
|
// Create new memref type with constant folded dims and/or offset/strides.
|
|
|
|
MemRefType newMemRefType = MemRefType::Builder(memrefType)
|
|
|
|
.setShape(newShapeConstants)
|
|
|
|
.setAffineMaps({map});
|
|
|
|
(void)dynamicOffsetOperandCount; // unused in opt mode
|
|
|
|
assert(static_cast<int64_t>(newOperands.size()) ==
|
|
|
|
dynamicOffsetOperandCount + newMemRefType.getNumDynamicDims());
|
|
|
|
|
|
|
|
// Create new ViewOp.
|
|
|
|
auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
|
|
|
|
viewOp.getOperand(0), newOperands);
|
|
|
|
// Insert a cast so we have the same type as the old memref type.
|
|
|
|
rewriter.replaceOpWithNewOp<MemRefCastOp>(viewOp, newViewOp,
|
|
|
|
viewOp.getType());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-22 11:41:29 -08:00
|
|
|
}
|
|
|
|
};
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
|
|
|
|
using OpRewritePattern<ViewOp>::OpRewritePattern;
|
2019-11-14 12:22:28 -08:00
|
|
|
|
2020-03-17 20:07:55 -07:00
|
|
|
LogicalResult matchAndRewrite(ViewOp viewOp,
|
|
|
|
PatternRewriter &rewriter) const override {
|
2020-03-04 09:44:36 -08:00
|
|
|
Value memrefOperand = viewOp.getOperand(0);
|
|
|
|
MemRefCastOp memrefCastOp =
|
|
|
|
dyn_cast_or_null<MemRefCastOp>(memrefOperand.getDefiningOp());
|
|
|
|
if (!memrefCastOp)
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
Value allocOperand = memrefCastOp.getOperand();
|
|
|
|
AllocOp allocOp = dyn_cast_or_null<AllocOp>(allocOperand.getDefiningOp());
|
|
|
|
if (!allocOp)
|
2020-03-17 20:07:55 -07:00
|
|
|
return failure();
|
2020-03-04 09:44:36 -08:00
|
|
|
rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
|
|
|
|
viewOp.operands());
|
2020-03-17 20:07:55 -07:00
|
|
|
return success();
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2020-03-04 09:44:36 -08:00
|
|
|
void ViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
|
|
|
MLIRContext *context) {
|
|
|
|
results.insert<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// XOrOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
OpFoldResult XOrOp::fold(ArrayRef<Attribute> operands) {
|
|
|
|
/// xor(x, 0) -> x
|
|
|
|
if (matchPattern(rhs(), m_Zero()))
|
|
|
|
return lhs();
|
|
|
|
/// xor(x,x) -> 0
|
|
|
|
if (lhs() == rhs())
|
|
|
|
return Builder(getContext()).getZeroAttr(getType());
|
|
|
|
|
|
|
|
return constFoldBinaryOp<IntegerAttr>(operands,
|
|
|
|
[](APInt a, APInt b) { return a ^ b; });
|
2019-11-14 12:22:28 -08:00
|
|
|
}
|
|
|
|
|
2019-09-21 16:14:07 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ZeroExtendIOp
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static LogicalResult verify(ZeroExtendIOp op) {
|
2020-01-11 08:54:04 -08:00
|
|
|
auto srcType = getElementTypeOrSelf(op.getOperand().getType());
|
2019-09-21 16:14:07 -07:00
|
|
|
auto dstType = getElementTypeOrSelf(op.getType());
|
|
|
|
|
|
|
|
if (srcType.isa<IndexType>())
|
|
|
|
return op.emitError() << srcType << " is not a valid operand type";
|
|
|
|
if (dstType.isa<IndexType>())
|
|
|
|
return op.emitError() << dstType << " is not a valid result type";
|
|
|
|
|
|
|
|
if (srcType.cast<IntegerType>().getWidth() >=
|
|
|
|
dstType.cast<IntegerType>().getWidth())
|
|
|
|
return op.emitError("result type ")
|
|
|
|
<< dstType << " must be wider than operand type " << srcType;
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-03-20 17:25:34 -07:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// TableGen'd op method definitions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#define GET_OP_CLASSES
|
2020-02-21 11:54:49 -08:00
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.cpp.inc"
|