NFC: Rename FuncBuilder to OpBuilder and refactor to take a top level region instead of a function.

PiperOrigin-RevId: 251563898
This commit is contained in:
River Riddle 2019-06-04 19:18:23 -07:00 committed by Mehdi Amini
parent f59f64e838
commit f1b848e470
70 changed files with 249 additions and 229 deletions

View File

@ -248,7 +248,7 @@ struct PythonFunctionContext {
PythonFunction enter() {
assert(function.function && "function is not set up");
auto *mlirFunc = static_cast<mlir::Function *>(function.function);
contextBuilder.emplace(mlirFunc);
contextBuilder.emplace(mlirFunc->getBody());
context =
new mlir::edsc::ScopedContext(*contextBuilder, mlirFunc->getLoc());
return function;
@ -262,7 +262,7 @@ struct PythonFunctionContext {
PythonFunction function;
mlir::edsc::ScopedContext *context;
llvm::Optional<FuncBuilder> contextBuilder;
llvm::Optional<OpBuilder> contextBuilder;
};
PythonFunctionContext PythonMLIRModule::makeFunctionContext(

View File

@ -121,8 +121,7 @@ Type linalg::convertLinalgType(Type t) {
// Create an array attribute containing integer attributes with values provided
// in `position`.
static ArrayAttr makePositionAttr(FuncBuilder &builder,
ArrayRef<int> position) {
static ArrayAttr makePositionAttr(OpBuilder &builder, ArrayRef<int> position) {
SmallVector<Attribute, 4> attrs;
attrs.reserve(position.size());
for (auto p : position)

View File

@ -39,7 +39,7 @@ TEST_FUNC(linalg_ops) {
mlir::Function *f =
makeFunction(module, "linalg_ops", {indexType, indexType, indexType}, {});
FuncBuilder builder(f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
@ -78,7 +78,7 @@ TEST_FUNC(linalg_ops_folded_slices) {
mlir::Function *f = makeFunction(module, "linalg_ops_folded_slices",
{indexType, indexType, indexType}, {});
FuncBuilder builder(f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off

View File

@ -31,8 +31,8 @@ using llvm::ArrayRef;
using llvm::cast;
using llvm::isa;
using llvm::SmallVector;
using mlir::FuncBuilder;
using mlir::MemRefType;
using mlir::OpBuilder;
using mlir::Value;
using mlir::edsc::ScopedContext;
using mlir::edsc::ValueHandle;
@ -101,7 +101,7 @@ static mlir::Value *createFullyComposedIndexing(unsigned dim,
}
ViewOp linalg::emitAndReturnFullyComposedView(Value *v) {
FuncBuilder builder(v->getDefiningOp());
OpBuilder builder(v->getDefiningOp());
ScopedContext scope(builder, v->getDefiningOp()->getLoc());
assert(v->getType().isa<ViewType>() && "must be a ViewType");
auto *memRef = getViewSupportingMemRef(v);

View File

@ -44,7 +44,7 @@ Function *makeFunctionWithAMatmulOp(Module &module, StringRef name) {
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
FuncBuilder builder(f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle

View File

@ -41,7 +41,7 @@ Function *makeFunctionWithAMatmulOp(Module &module, StringRef name) {
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
mlir::FuncBuilder builder(f);
mlir::OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle

View File

@ -44,7 +44,7 @@ Function *makeFunctionWithAMatmulOp(Module &module, StringRef name) {
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
mlir::FuncBuilder builder(f);
mlir::OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle

View File

@ -41,8 +41,7 @@ using namespace mlir;
// Create an array attribute containing integer attributes with values provided
// in `position`.
static ArrayAttr makePositionAttr(FuncBuilder &builder,
ArrayRef<int> position) {
static ArrayAttr makePositionAttr(Builder &builder, ArrayRef<int> position) {
SmallVector<Attribute, 4> attrs;
attrs.reserve(position.size());
for (auto p : position)
@ -64,7 +63,7 @@ public:
// descriptor to emit IR iteratively computing the actual offset, followed by
// a getelementptr.
Value *obtainDataPtr(Operation *op, Value *viewDescriptor,
ArrayRef<Value *> indices, FuncBuilder &rewriter) const {
ArrayRef<Value *> indices, Builder &rewriter) const {
auto loadOp = cast<Op>(op);
auto elementType =
loadOp.getViewType().template cast<linalg::ViewType>().getElementType();

View File

@ -64,7 +64,7 @@ void linalg::DotOp::emitScalarImplementation(
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
FuncBuilder builder(body, std::prev(body->end(), 1));
OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()
@ -107,7 +107,7 @@ void linalg::MatvecOp::writeAsFinerGrainTensorContraction() {
assert(
llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
// clang-format off
FuncBuilder builder(op);
OpBuilder builder(op);
ScopedContext scope(builder, op->getLoc());
IndexHandle i;
using linalg::common::LoopNestRangeBuilder;
@ -132,7 +132,7 @@ void linalg::MatvecOp::emitScalarImplementation(
using edsc::op::operator==;
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
FuncBuilder builder(body, std::prev(body->end(), 1));
OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()
@ -181,7 +181,7 @@ void linalg::MatmulOp::writeAsFinerGrainTensorContraction() {
llvm::isa_and_nonnull<RangeOp>(indexingPosPair.first->getDefiningOp()));
using linalg::common::LoopNestRangeBuilder;
// clang-format off
FuncBuilder builder(op);
OpBuilder builder(op);
ScopedContext scope(builder, op->getLoc());
IndexHandle j;
LoopNestRangeBuilder(&j, ValueHandle(indexingPosPair.first))(
@ -205,7 +205,7 @@ void linalg::MatmulOp::emitScalarImplementation(
using edsc::op::operator==;
using edsc::intrinsics::select;
// Account for affine.terminator in loop.
FuncBuilder builder(body, std::prev(body->end(), 1));
OpBuilder builder(body, std::prev(body->end(), 1));
ScopedContext scope(builder, innermostLoop.getLoc());
FloatType fTy = getOperand(0)
->getType()

View File

@ -161,7 +161,7 @@ linalg::makeGenericLoopRanges(AffineMap operandRangesToLoopMaps,
template <class ContractionOp>
static SmallVector<mlir::AffineForOp, 4>
writeContractionAsLoops(ContractionOp contraction) {
FuncBuilder builder(contraction.getOperation());
OpBuilder builder(contraction.getOperation());
ScopedContext scope(builder, contraction.getLoc());
auto allRanges = getRanges(contraction);
auto loopRanges =
@ -274,7 +274,7 @@ Rewriter<linalg::LoadOp>::matchAndRewrite(linalg::LoadOp load,
SliceOp slice = dyn_cast<SliceOp>(load.getView()->getDefiningOp());
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: cast<ViewOp>(load.getView()->getDefiningOp());
FuncBuilder builder(load);
OpBuilder builder(load);
ScopedContext scope(builder, load.getLoc());
auto *memRef = view.getSupportingMemRef();
auto operands = emitAndReturnLoadStoreOperands(load, view);
@ -289,7 +289,7 @@ Rewriter<linalg::StoreOp>::matchAndRewrite(linalg::StoreOp store,
SliceOp slice = dyn_cast<SliceOp>(store.getView()->getDefiningOp());
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: cast<ViewOp>(store.getView()->getDefiningOp());
FuncBuilder builder(store);
OpBuilder builder(store);
ScopedContext scope(builder, store.getLoc());
auto *valueToStore = store.getValueToStore();
auto *memRef = view.getSupportingMemRef();

View File

@ -41,7 +41,7 @@ Function *makeFunctionWithAMatmulOp(Module &module, StringRef name) {
module, name,
{dynamic2DMemRefType, dynamic2DMemRefType, dynamic2DMemRefType}, {});
FuncBuilder builder(f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
@ -97,7 +97,7 @@ TEST_FUNC(matmul_tiled_views) {
MLIRContext context;
Module module(&context);
mlir::Function *f = makeFunctionWithAMatmulOp(module, "matmul_tiled_views");
FuncBuilder b(f);
OpBuilder b(f->getBody());
lowerToTiledViews(f, {b.create<ConstantIndexOp>(f->getLoc(), 8),
b.create<ConstantIndexOp>(f->getLoc(), 9)});
composeSliceOps(f);
@ -127,7 +127,7 @@ TEST_FUNC(matmul_tiled_views_as_loops) {
Module module(&context);
mlir::Function *f =
makeFunctionWithAMatmulOp(module, "matmul_tiled_views_as_loops");
FuncBuilder b(f);
OpBuilder b(f->getBody());
lowerToTiledViews(f, {b.create<ConstantIndexOp>(f->getLoc(), 8),
b.create<ConstantIndexOp>(f->getLoc(), 9)});
composeSliceOps(f);

View File

@ -148,7 +148,7 @@ writeContractionAsTiledViews(TensorContractionBase<ConcreteOp> &contraction,
contraction.getNumParallelDims() + contraction.getNumReductionDims());
auto *op = static_cast<ConcreteOp *>(&contraction);
mlir::FuncBuilder builder(op->getOperation());
mlir::OpBuilder builder(op->getOperation());
ScopedContext scope(builder, op->getLoc());
SmallVector<IndexHandle, 4> ivs(tileSizes.size());
auto pivs = IndexHandle::makeIndexHandlePointers(ivs);

View File

@ -104,7 +104,7 @@ private:
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
std::unique_ptr<mlir::FuncBuilder> builder;
std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@ -174,7 +174,7 @@ private:
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))

View File

@ -105,7 +105,7 @@ private:
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
std::unique_ptr<mlir::FuncBuilder> builder;
std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@ -175,7 +175,7 @@ private:
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))

View File

@ -105,7 +105,7 @@ private:
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
std::unique_ptr<mlir::FuncBuilder> builder;
std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@ -175,7 +175,7 @@ private:
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))

View File

@ -315,7 +315,7 @@ public:
// Found a specialized callee! Let's turn this into a normal call
// operation.
SmallVector<mlir::Value *, 8> operands(op->getOperands());
mlir::FuncBuilder builder(op);
mlir::OpBuilder builder(op);
auto newCall =
builder.create<mlir::CallOp>(op->getLoc(), mangledCallee, operands);
if (newCall.getNumResults()) {

View File

@ -57,7 +57,7 @@ namespace {
/// time both side of the cast (producer and consumer) will be lowered to a
/// dialect like LLVM and end up with the same LLVM representation, at which
/// point this becomes a no-op and is eliminated.
Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
Value *typeCast(PatternRewriter &builder, Value *val, Type destTy) {
if (val->getType() == destTy)
return val;
return builder.create<toy::TypeCastOp>(val->getLoc(), val, destTy)
@ -67,7 +67,7 @@ Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
/// Create a type cast to turn a toy.array into a memref. The Toy Array will be
/// lowered to a memref during buffer allocation, at which point the type cast
/// becomes useless.
Value *memRefTypeCast(FuncBuilder &builder, Value *val) {
Value *memRefTypeCast(PatternRewriter &builder, Value *val) {
if (val->getType().isa<MemRefType>())
return val;
auto toyArrayTy = val->getType().dyn_cast<toy::ToyArrayType>();

View File

@ -57,7 +57,7 @@ namespace {
/// time both side of the cast (producer and consumer) will be lowered to a
/// dialect like LLVM and end up with the same LLVM representation, at which
/// point this becomes a no-op and is eliminated.
Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
Value *typeCast(PatternRewriter &builder, Value *val, Type destTy) {
if (val->getType() == destTy)
return val;
return builder.create<toy::TypeCastOp>(val->getLoc(), val, destTy)
@ -67,7 +67,7 @@ Value *typeCast(FuncBuilder &builder, Value *val, Type destTy) {
/// Create a type cast to turn a toy.array into a memref. The Toy Array will be
/// lowered to a memref during buffer allocation, at which point the type cast
/// becomes useless.
Value *memRefTypeCast(FuncBuilder &builder, Value *val) {
Value *memRefTypeCast(PatternRewriter &builder, Value *val) {
if (val->getType().isa<MemRefType>())
return val;
auto toyArrayTy = val->getType().dyn_cast<toy::ToyArrayType>();
@ -183,7 +183,7 @@ public:
private:
// Turn a string into a toy.alloc (malloc/free abstraction) and a sequence
// of stores into the buffer, and return a MemRef into the buffer.
Value *getConstantCharBuffer(FuncBuilder &builder, Location loc,
Value *getConstantCharBuffer(PatternRewriter &builder, Location loc,
StringRef data) const {
auto retTy =
builder.getMemRefType(data.size() + 1, builder.getIntegerType(8));
@ -405,7 +405,7 @@ struct LateLoweringPass : public ModulePass<LateLoweringPass> {
/// operating in a brand new function: we don't have the return to hook the
/// dealloc operations.
Value *allocTensor(toy::AllocOp alloc) {
FuncBuilder builder(alloc);
OpBuilder builder(alloc);
auto retTy = alloc.getResult()->getType();
auto memRefTy = retTy.dyn_cast<MemRefType>();
@ -420,7 +420,7 @@ struct LateLoweringPass : public ModulePass<LateLoweringPass> {
// Insert a `dealloc` operation right before the `return` operations, unless
// it is returned itself in which case the caller is responsible for it.
builder.getFunction()->walk([&](Operation *op) {
builder.getRegion()->walk([&](Operation *op) {
auto returnOp = dyn_cast<ReturnOp>(op);
if (!returnOp)
return;

View File

@ -105,7 +105,7 @@ private:
/// convenience for emitting individual operations.
/// The builder is stateful, in particular it keeeps an "insertion point":
/// this is where the next operations will be introduced.
std::unique_ptr<mlir::FuncBuilder> builder;
std::unique_ptr<mlir::OpBuilder> builder;
/// The symbol table maps a variable name to a value in the current scope.
/// Entering a function creates a new scope, and the function arguments are
@ -175,7 +175,7 @@ private:
// Create a builder for the function, it will be used throughout the codegen
// to create operations in this function.
builder = llvm::make_unique<mlir::FuncBuilder>(function.get());
builder = llvm::make_unique<mlir::OpBuilder>(function->getBody());
// Emit the body of the function.
if (!mlirGen(*funcAST.getBody()))

View File

@ -319,7 +319,7 @@ public:
// Found a specialized callee! Let's turn this into a normal call
// operation.
SmallVector<mlir::Value *, 8> operands(op->getOperands());
mlir::FuncBuilder builder(f);
mlir::OpBuilder builder(f->getBody());
builder.setInsertionPoint(op);
auto newCall =
builder.create<mlir::CallOp>(op->getLoc(), mangledCallee, operands);

View File

@ -233,7 +233,7 @@ public:
// needs to define as many value as the original operation, but their types
// may be different.
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override;
OpBuilder &rewriter) const override;
}
```
@ -296,7 +296,7 @@ operates.
```c++
SmallVector<Value *, 4> ViewOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto viewOp = op->cast<linalg::ViewOp>();
@ -437,7 +437,7 @@ struct ViewDescriptor {
}
// The builder into which we emit code.
FuncBuilder &builder;
OpBuilder &builder;
// The actual descriptor.
Value *d;
@ -450,7 +450,7 @@ rules described above:
```c++
SmallVector<Value *, 4> SliceOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto sliceOp = op->cast<linalg::SliceOp>();
@ -528,7 +528,7 @@ for the view descriptor:
```c++
Value *obtainDataPtr(Location loc, int rank, Value *viewDescriptorVal,
ArrayRef<Value *> indices, FuncBuilder &rewriter) {
ArrayRef<Value *> indices, OpBuilder &rewriter) {
// Create the context object (RAII) in which we can use declarative builders.
// Bring all the builders into the namespace.
using namespace intrinsics;
@ -560,7 +560,7 @@ conversions for load and store operations.
// Load Operation Conversion.
SmallVector<Value *, 4> LoadOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto loadOp = op->cast<linalg::LoadOp>();
@ -582,7 +582,7 @@ SmallVector<Value *, 4> LoadOpConversion::rewrite(
// Store Operation Conversion
SmallVector<Value *, 4> StoreOpConversion::rewrite(
Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
OpBuilder &rewriter) const override {
// Obtain the typed operation (we know we matched only one type).
auto loadOp = op->cast<linalg::StoreOp>();

View File

@ -123,7 +123,7 @@ generation through a simple depth-first search traversal of the Toy AST. Here is
how we create a `toy.transpose` operation:
```
mlir::Operation *createTransposeOp(FuncBuilder *builder,
mlir::Operation *createTransposeOp(OpBuilder *builder,
mlir::Value *input_array) {
// We bundle our custom type in a `toy` dialect.
auto toyDialect = mlir::Identifier::get("toy", builder->getContext());

View File

@ -202,11 +202,11 @@ class GenericCallOp
bool verify();
/// Interface to the builder to allow:
/// mlir::FuncBuilder::create<GenericCallOp>(...)
/// mlir::OpBuilder::create<GenericCallOp>(...)
/// This method populate the `state` that MLIR use to create operations.
/// The `toy.generic_call` operation accepts a callee name and a list of
/// arguments for the call.
static void build(mlir::FuncBuilder *builder, mlir::OperationState *state,
static void build(mlir::OpBuilder *builder, mlir::OperationState *state,
llvm::StringRef callee,
llvm::ArrayRef<mlir::Value *> arguments);

View File

@ -80,7 +80,7 @@ public:
/// The results created by the new IR with the builder are returned, and their
/// number must match the number of result of `op`.
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
OpBuilder &rewriter) const override {
...
// Return the newly allocated buffer, it will be used as an operand when

View File

@ -32,7 +32,7 @@ namespace mlir {
class AffineBound;
class AffineValueMap;
class FlatAffineConstraints;
class FuncBuilder;
class OpBuilder;
/// A utility function to check if a value is defined at the top level of a
/// function. A value defined at the top level is always a valid symbol.
@ -143,7 +143,7 @@ public:
/// Return a Builder set up to insert operations immediately before the
/// terminator.
FuncBuilder getBodyBuilder();
OpBuilder getBodyBuilder();
/// Get the body of the AffineForOp.
Block *getBody() { return &getRegion().front(); }
@ -361,8 +361,7 @@ void canonicalizeMapAndOperands(AffineMap *map,
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
/// other AffineApplyOps supplying those operands. The operands of the resulting
/// AffineApplyOp do not change the length of AffineApplyOp chains.
AffineApplyOp makeComposedAffineApply(FuncBuilder *b, Location loc,
AffineMap map,
AffineApplyOp makeComposedAffineApply(OpBuilder *b, Location loc, AffineMap map,
llvm::ArrayRef<Value *> operands);
/// Given an affine map `map` and its input `operands`, this method composes

View File

@ -27,9 +27,9 @@ namespace mlir {
class AffineApplyOp;
class AffineForOp;
class AffineMap;
class FuncBuilder;
class Location;
class MemRefType;
class OpBuilder;
class Operation;
class Value;
class VectorType;

View File

@ -50,17 +50,17 @@ class ValueHandle;
/// setting and restoring of insertion points.
class ScopedContext {
public:
ScopedContext(FuncBuilder &builder, Location location);
ScopedContext(OpBuilder &builder, Location location);
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
ScopedContext(FuncBuilder &builder, FuncBuilder::InsertPoint newInsertPt,
ScopedContext(OpBuilder &builder, OpBuilder::InsertPoint newInsertPt,
Location location);
~ScopedContext();
static MLIRContext *getContext();
static FuncBuilder *getBuilder();
static OpBuilder *getBuilder();
static Location getLocation();
private:
@ -74,10 +74,10 @@ private:
static ScopedContext *&getCurrentScopedContext();
/// Top level FuncBuilder.
FuncBuilder &builder;
/// Top level OpBuilder.
OpBuilder &builder;
/// The previous insertion point of the builder.
llvm::Optional<FuncBuilder::InsertPoint> prevBuilderInsertPoint;
llvm::Optional<OpBuilder::InsertPoint> prevBuilderInsertPoint;
/// Current location.
Location location;
/// Parent context we return into.
@ -116,20 +116,20 @@ protected:
/// Enter an mlir::Block and setup a ScopedContext to insert operations at
/// the end of it. Since we cannot use c++ language-level scoping to implement
/// scoping itself, we use enter/exit pairs of operations.
/// As a consequence we must allocate a new FuncBuilder + ScopedContext and
/// As a consequence we must allocate a new OpBuilder + ScopedContext and
/// let the escape.
/// Step back "prev" times from the end of the block to set up the insertion
/// point, which is useful for non-empty blocks.
void enter(mlir::Block *block, int prev = 0) {
bodyScope = new ScopedContext(
*ScopedContext::getBuilder(),
FuncBuilder::InsertPoint(block, std::prev(block->end(), prev)),
OpBuilder::InsertPoint(block, std::prev(block->end(), prev)),
ScopedContext::getLocation());
bodyScope->nestedBuilder = this;
}
/// Exit the current mlir::Block by explicitly deleting the dynamically
/// allocated FuncBuilder and ScopedContext.
/// allocated OpBuilder and ScopedContext.
void exit() {
// Reclaim now to exit the scope.
bodyScope->nestedBuilder = nullptr;

View File

@ -344,6 +344,14 @@ public:
explicit Region(Operation *container);
~Region();
/// Return the context this region is inserted in. The region must have a
/// valid parent container.
MLIRContext *getContext();
/// Return a location for this region. This is the location attached to the
/// parent container. The region must have a valid parent container.
Location getLoc();
using RegionType = llvm::iplist<Block>;
RegionType &getBlocks() { return blocks; }
@ -409,6 +417,13 @@ public:
/// the operation with an offending use.
bool isIsolatedAbove(llvm::Optional<Location> noteLoc = llvm::None);
/// Walk the operations in this block in postorder, calling the callback for
/// each operation.
void walk(const std::function<void(Operation *)> &callback) {
for (auto &block : *this)
block.walk(callback);
}
private:
RegionType blocks;

View File

@ -181,40 +181,37 @@ protected:
MLIRContext *context;
};
/// This class helps build a Function. Operations that are created are
/// automatically inserted at an insertion point. The builder is copyable.
class FuncBuilder : public Builder {
/// This class helps build Operations. Operations that are created are
/// automatically inserted at an insertion point. The builder is copyable.
class OpBuilder : public Builder {
public:
/// Create a function builder and set the insertion point to the start of
/// the function.
explicit FuncBuilder(Function *func)
: Builder(func->getContext()), function(func) {
if (!func->empty())
setInsertionPoint(&func->front(), func->front().begin());
/// Create a builder and set the insertion point to the start of the region.
explicit OpBuilder(Region *region)
: Builder(region->getContext()), region(region) {
if (!region->empty())
setInsertionPoint(&region->front(), region->front().begin());
else
clearInsertionPoint();
}
explicit OpBuilder(Region &region) : OpBuilder(&region) {}
explicit FuncBuilder(Function &func) : FuncBuilder(&func) {}
virtual ~FuncBuilder();
virtual ~OpBuilder();
/// Create a function builder and set insertion point to the given
/// operation, which will cause subsequent insertions to go right before it.
FuncBuilder(Operation *op) : FuncBuilder(op->getFunction()) {
/// Create a builder and set insertion point to the given operation, which
/// will cause subsequent insertions to go right before it.
OpBuilder(Operation *op) : OpBuilder(op->getContainingRegion()) {
setInsertionPoint(op);
}
FuncBuilder(Block *block) : FuncBuilder(block->getFunction()) {
setInsertionPoint(block, block->end());
}
OpBuilder(Block *block) : OpBuilder(block, block->end()) {}
FuncBuilder(Block *block, Block::iterator insertPoint)
: FuncBuilder(block->getFunction()) {
OpBuilder(Block *block, Block::iterator insertPoint)
: OpBuilder(block->getParent()) {
setInsertionPoint(block, insertPoint);
}
/// Return the function this builder is referring to.
Function *getFunction() const { return function; }
/// Return the region this builder is referring to.
Region *getRegion() const { return region; }
/// This class represents a saved insertion point.
class InsertPoint {
@ -291,7 +288,7 @@ public:
/// Add new block and set the insertion point to the end of it. If an
/// 'insertBefore' block is passed, the block will be placed before the
/// specified block. If not, the block will be appended to the end of the
/// current function.
/// current region.
Block *createBlock(Block *insertBefore = nullptr);
/// Returns the current block of the builder.
@ -342,7 +339,7 @@ public:
}
private:
Function *function;
Region *region;
Block *block = nullptr;
Block::iterator insertPoint;
};

View File

@ -266,7 +266,7 @@ template <typename SourceOp> struct OpRewritePattern : public RewritePattern {
/// to apply patterns and observe their effects (e.g. to keep worklists or
/// other data structures up to date).
///
class PatternRewriter : public FuncBuilder {
class PatternRewriter : public OpBuilder {
public:
/// Create operation of specific op type at the current insertion point
/// without verifying to see if it is valid.
@ -342,7 +342,7 @@ public:
ArrayRef<Value *> valuesToRemoveIfDead = {});
protected:
PatternRewriter(Function *fn) : FuncBuilder(fn) {}
PatternRewriter(Region &region) : OpBuilder(region) {}
virtual ~PatternRewriter();
// These are the callback methods that subclasses can choose to implement if

View File

@ -113,9 +113,9 @@ public:
/// Return a Builder set up to insert operations immediately before the
/// terminator.
FuncBuilder getBodyBuilder() {
OpBuilder getBodyBuilder() {
Block *body = getBody();
return FuncBuilder(body, std::prev(body->end()));
return OpBuilder(body, std::prev(body->end()));
}
/// Get the body of the ForOp.
@ -408,7 +408,7 @@ public:
unsigned getNumInputsAndOutputs() {
return impl->getNumInputsAndOutputs(getOperation());
}
Operation *create(FuncBuilder &builder, Location loc,
Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) {
return impl->create(builder, loc, operands);
}
@ -425,7 +425,7 @@ private:
virtual unsigned getNumReductionLoops(Operation *op) = 0;
virtual unsigned getNumWindowLoops(Operation *op) = 0;
virtual unsigned getNumLoops(Operation *op) = 0;
virtual Operation *create(FuncBuilder &builder, Location loc,
virtual Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) = 0;
};
@ -458,7 +458,7 @@ private:
unsigned getNumLoops(Operation *op) override {
return cast<ConcreteOp>(op).getNumLoops();
}
Operation *create(FuncBuilder &builder, Location loc,
Operation *create(OpBuilder &builder, Location loc,
ArrayRef<Value *> operands) override {
return builder.create<ConcreteOp>(loc, operands);
}

View File

@ -88,7 +88,7 @@ SmallVector<Value *, 8> getViewSizes(LinalgOp &linalgOp);
/// Returns the values obtained by applying `map` to the list of values.
/// Performs simplifications and foldings where possible.
SmallVector<Value *, 4> applyMapToValues(FuncBuilder *b, Location loc,
SmallVector<Value *, 4> applyMapToValues(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> values,
FunctionConstants &state);

View File

@ -31,7 +31,6 @@ namespace mlir {
// Forward declarations.
class Block;
class FuncBuilder;
class MLIRContext;
class Operation;
class Type;

View File

@ -31,7 +31,7 @@ namespace mlir {
class AffineMap;
class AffineForOp;
class Function;
class FuncBuilder;
class OpBuilder;
class Value;
/// Unrolls this for operation completely if the trip count is known to be
@ -80,7 +80,7 @@ void promoteSingleIterationLoops(Function *f);
void getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
FuncBuilder *builder);
OpBuilder *builder);
/// Skew the operations in the body of a 'affine.for' operation with the
/// specified operation-wise shifts. The shifts are with respect to the

View File

@ -34,11 +34,9 @@ namespace mlir {
class AffineApplyOp;
class AffineForOp;
class FuncBuilder;
class Location;
class Module;
class Function;
class OpBuilder;
/// Replaces all "deferencing" uses of oldMemRef with newMemRef while optionally
/// remapping the old memref's indices using the supplied affine map,
@ -83,7 +81,7 @@ bool replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
/// these will also be collected into a single (multi-result) affine apply op.
/// The final results of the composed AffineApplyOp are returned in output
/// parameter 'results'. Returns the affine apply op created.
Operation *createComposedAffineApplyOp(FuncBuilder *builder, Location loc,
Operation *createComposedAffineApplyOp(OpBuilder *builder, Location loc,
ArrayRef<Value *> operands,
ArrayRef<Operation *> affineApplyOps,
SmallVectorImpl<Value *> *results);

View File

@ -544,7 +544,7 @@ void mlir::fullyComposeAffineMapAndOperands(
}
}
AffineApplyOp mlir::makeComposedAffineApply(FuncBuilder *b, Location loc,
AffineApplyOp mlir::makeComposedAffineApply(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> operands) {
AffineMap normalizedMap = map;
@ -1069,9 +1069,9 @@ void AffineForOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
results.push_back(llvm::make_unique<AffineForLoopBoundFolder>(context));
}
FuncBuilder AffineForOp::getBodyBuilder() {
OpBuilder AffineForOp::getBodyBuilder() {
Block *body = getBody();
return FuncBuilder(body, std::prev(body->end()));
return OpBuilder(body, std::prev(body->end()));
}
AffineBound AffineForOp::getLowerBound() {

View File

@ -54,7 +54,7 @@ void mlir::buildTripCountMapAndOperands(
int64_t loopSpan;
int64_t step = forOp.getStep();
FuncBuilder b(forOp.getOperation());
OpBuilder b(forOp.getOperation());
if (forOp.hasConstantBounds()) {
int64_t lb = forOp.getConstantLowerBound();

View File

@ -44,7 +44,7 @@ FunctionPassBase *mlir::createParallelismDetectionTestPass() {
// parallel.
void TestParallelismDetection::runOnFunction() {
Function &f = getFunction();
FuncBuilder b(f);
OpBuilder b(f.getBody());
f.walk<AffineForOp>([&](AffineForOp forOp) {
if (isLoopParallel(forOp))
forOp.emitRemark("parallel loop");

View File

@ -749,7 +749,7 @@ mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst,
// Clone src loop nest and insert it a the beginning of the operation block
// of the loop at 'dstLoopDepth' in 'dstLoopIVs'.
auto dstAffineForOp = dstLoopIVs[dstLoopDepth - 1];
FuncBuilder b(dstAffineForOp.getBody(), dstAffineForOp.getBody()->begin());
OpBuilder b(dstAffineForOp.getBody(), dstAffineForOp.getBody()->begin());
auto sliceLoopNest =
cast<AffineForOp>(b.clone(*srcLoopIVs[0].getOperation()));

View File

@ -24,8 +24,7 @@
using namespace mlir;
using namespace mlir::edsc;
mlir::edsc::ScopedContext::ScopedContext(FuncBuilder &builder,
Location location)
mlir::edsc::ScopedContext::ScopedContext(OpBuilder &builder, Location location)
: builder(builder), location(location),
enclosingScopedContext(ScopedContext::getCurrentScopedContext()),
nestedBuilder(nullptr) {
@ -35,8 +34,8 @@ mlir::edsc::ScopedContext::ScopedContext(FuncBuilder &builder,
/// Sets the insertion point of the builder to 'newInsertPt' for the duration
/// of the scope. The existing insertion point of the builder is restored on
/// destruction.
mlir::edsc::ScopedContext::ScopedContext(FuncBuilder &builder,
FuncBuilder::InsertPoint newInsertPt,
mlir::edsc::ScopedContext::ScopedContext(OpBuilder &builder,
OpBuilder::InsertPoint newInsertPt,
Location location)
: builder(builder), prevBuilderInsertPoint(builder.saveInsertionPoint()),
location(location),
@ -59,7 +58,7 @@ ScopedContext *&mlir::edsc::ScopedContext::getCurrentScopedContext() {
return context;
}
FuncBuilder *mlir::edsc::ScopedContext::getBuilder() {
OpBuilder *mlir::edsc::ScopedContext::getBuilder() {
assert(ScopedContext::getCurrentScopedContext() &&
"Unexpected Null ScopedContext");
return &ScopedContext::getCurrentScopedContext()->builder;

View File

@ -30,7 +30,7 @@ using namespace mlir;
namespace {
template <typename OpTy>
void createForAllDimensions(FuncBuilder &builder, Location loc,
void createForAllDimensions(OpBuilder &builder, Location loc,
SmallVectorImpl<Value *> &values) {
for (StringRef dim : {"x", "y", "z"}) {
Value *v = builder.create<OpTy>(loc, builder.getIndexType(),
@ -42,12 +42,12 @@ void createForAllDimensions(FuncBuilder &builder, Location loc,
// Add operations generating block/thread ids and gird/block dimensions at the
// beginning of `kernelFunc` and replace uses of the respective function args.
void injectGpuIndexOperations(Location loc, Function &kernelFunc) {
FuncBuilder funcBuilder(kernelFunc);
OpBuilder OpBuilder(kernelFunc.getBody());
SmallVector<Value *, 12> indexOps;
createForAllDimensions<gpu::BlockId>(funcBuilder, loc, indexOps);
createForAllDimensions<gpu::ThreadId>(funcBuilder, loc, indexOps);
createForAllDimensions<gpu::GridDim>(funcBuilder, loc, indexOps);
createForAllDimensions<gpu::BlockDim>(funcBuilder, loc, indexOps);
createForAllDimensions<gpu::BlockId>(OpBuilder, loc, indexOps);
createForAllDimensions<gpu::ThreadId>(OpBuilder, loc, indexOps);
createForAllDimensions<gpu::GridDim>(OpBuilder, loc, indexOps);
createForAllDimensions<gpu::BlockDim>(OpBuilder, loc, indexOps);
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
for (int i = 11; i >= 0; --i) {
@ -78,10 +78,10 @@ Function *outlineKernelFunc(Module &module, gpu::LaunchOp &launchOp) {
// Replace `gpu.launch` operations with an `gpu.launch_func` operation launching
// `kernelFunc`.
void convertToLaunchFuncOp(gpu::LaunchOp &launchOp, Function &kernelFunc) {
FuncBuilder funcBuilder(launchOp);
OpBuilder OpBuilder(launchOp);
SmallVector<Value *, 4> kernelOperandValues(
launchOp.getKernelOperandValues());
funcBuilder.create<gpu::LaunchFuncOp>(
OpBuilder.create<gpu::LaunchFuncOp>(
launchOp.getLoc(), &kernelFunc, launchOp.getGridSizeOperandValues(),
launchOp.getBlockSizeOperandValues(), kernelOperandValues);
launchOp.erase();

View File

@ -282,6 +282,24 @@ Region::~Region() {
bb.dropAllReferences();
}
/// Return the context this region is inserted in. The region must have a valid
/// parent container.
MLIRContext *Region::getContext() {
assert(!container.isNull() && "region is not attached to a container");
if (auto *inst = getContainingOp())
return inst->getContext();
return getContainingFunction()->getContext();
}
/// Return a location for this region. This is the location attached to the
/// parent container. The region must have a valid parent container.
Location Region::getLoc() {
assert(!container.isNull() && "region is not attached to a container");
if (auto *inst = getContainingOp())
return inst->getLoc();
return getContainingFunction()->getLoc();
}
Region *Region::getContainingRegion() {
if (auto *inst = getContainingOp())
return inst->getContainingRegion();

View File

@ -332,31 +332,31 @@ AffineMap Builder::getShiftedAffineMap(AffineMap map, int64_t shift) {
}
//===----------------------------------------------------------------------===//
// Operations.
// OpBuilder.
//===----------------------------------------------------------------------===//
FuncBuilder::~FuncBuilder() {}
OpBuilder::~OpBuilder() {}
/// Add new block and set the insertion point to the end of it. If an
/// 'insertBefore' block is passed, the block will be placed before the
/// specified block. If not, the block will be appended to the end of the
/// current function.
Block *FuncBuilder::createBlock(Block *insertBefore) {
Block *OpBuilder::createBlock(Block *insertBefore) {
Block *b = new Block();
// If we are supposed to insert before a specific block, do so, otherwise add
// the block to the end of the function.
if (insertBefore)
function->getBlocks().insert(Function::iterator(insertBefore), b);
region->getBlocks().insert(Function::iterator(insertBefore), b);
else
function->push_back(b);
region->push_back(b);
setInsertionPointToEnd(b);
return b;
}
/// Create an operation given the fields represented as an OperationState.
Operation *FuncBuilder::createOperation(const OperationState &state) {
Operation *OpBuilder::createOperation(const OperationState &state) {
assert(block && "createOperation() called without setting builder's block");
auto *op = Operation::create(state);
block->getOperations().insert(insertPoint, op);

View File

@ -214,9 +214,7 @@ void Function::addEntryBlock() {
}
void Function::walk(const std::function<void(Operation *)> &callback) {
// Walk each of the blocks within the function.
for (auto &block : getBlocks())
block.walk(callback);
getBody().walk(callback);
}
//===----------------------------------------------------------------------===//

View File

@ -312,8 +312,7 @@ void Operation::replaceUsesOfWith(Value *from, Value *to) {
void Operation::walk(const std::function<void(Operation *)> &callback) {
// Visit any internal operations.
for (auto &region : getRegions())
for (auto &block : region)
block.walk(callback);
region.walk(callback);
// Visit the current operation.
callback(this);

View File

@ -889,7 +889,7 @@ static void ensureDistinctSuccessors(Block &bb) {
position != end; ++position) {
auto *dummyBlock = new Block();
bb.getParent()->push_back(dummyBlock);
auto builder = FuncBuilder(dummyBlock);
auto builder = OpBuilder(dummyBlock);
SmallVector<Value *, 8> operands(
terminator->getSuccessorOperands(*position));
builder.create<BranchOp>(terminator->getLoc(), successor.first, operands);

View File

@ -773,7 +773,7 @@ void mlir::linalg::emitScalarImplementation(
using edsc::intrinsics::select;
// account for affine.terminator in loop.
FuncBuilder b(body, std::prev(body->end(), 1));
OpBuilder b(body, std::prev(body->end(), 1));
ScopedContext scope(b, innermostLoop.getLoc());
auto *op = linalgOp.getOperation();
if (isa<DotOp>(op)) {

View File

@ -621,7 +621,7 @@ static void lowerLinalgForToCFG(Function &f) {
auto *op = forOp.getOperation();
auto loc = op->getLoc();
using namespace edsc::op;
FuncBuilder builder(op);
OpBuilder builder(op);
ScopedContext scope(builder, loc);
ValueHandle lb(forOp.getLowerBound()), ub(forOp.getUpperBound()),
step(forOp.getStep());

View File

@ -35,7 +35,7 @@ using namespace mlir::linalg;
// Creates a number of ranges equal to the number of results in `map`.
// The returned ranges correspond to the loop ranges, in the proper order, for
// which new loops will be created.
static SmallVector<Value *, 4> emitLoopRanges(FuncBuilder *b, Location loc,
static SmallVector<Value *, 4> emitLoopRanges(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> allViewSizes,
FunctionConstants &state) {
@ -51,7 +51,7 @@ static SmallVector<Value *, 4> emitLoopRanges(FuncBuilder *b, Location loc,
}
static void emitLinalgOpAsLoops(LinalgOp &linalgOp, FunctionConstants &state) {
FuncBuilder b(linalgOp.getOperation());
OpBuilder b(linalgOp.getOperation());
ScopedContext scope(b, linalgOp.getOperation()->getLoc());
auto loopRanges = emitLoopRanges(
scope.getBuilder(), scope.getLocation(),

View File

@ -58,7 +58,7 @@ static bool isZero(Value *v) {
// The returned ranges correspond to the loop ranges, in the proper order, that
// are tiled and for which new loops will be created.
static SmallVector<Value *, 4>
makeTiledLoopRanges(FuncBuilder *b, Location loc, AffineMap map,
makeTiledLoopRanges(OpBuilder *b, Location loc, AffineMap map,
ArrayRef<Value *> allViewSizes,
ArrayRef<Value *> allTileSizes, FunctionConstants &state) {
assert(allTileSizes.size() == map.getNumResults());
@ -127,7 +127,7 @@ static Value *foldRange(Value *view, unsigned dim) {
return nullptr;
}
static SmallVector<Value *, 4> makeTiledViews(FuncBuilder *b, Location loc,
static SmallVector<Value *, 4> makeTiledViews(OpBuilder *b, Location loc,
LinalgOp &linalgOp,
ArrayRef<Value *> ivs,
ArrayRef<Value *> tileSizes,
@ -210,7 +210,7 @@ static LogicalResult tileLinalgOp(LinalgOp &op, ArrayRef<Value *> tileSizes,
tileSizes.size() &&
"expected matching number of tile sizes and loops");
FuncBuilder builder(op.getOperation());
OpBuilder builder(op.getOperation());
ScopedContext scope(builder, op.getLoc());
auto loopRanges = makeTiledLoopRanges(
scope.getBuilder(), scope.getLocation(),

View File

@ -109,7 +109,7 @@ static Value *tryFold(AffineMap map, ArrayRef<Value *> operands,
return nullptr;
}
static Value *emitOrFoldComposedAffineApply(FuncBuilder *b, Location loc,
static Value *emitOrFoldComposedAffineApply(OpBuilder *b, Location loc,
AffineMap map,
ArrayRef<Value *> operandsRef,
FunctionConstants &state) {
@ -121,7 +121,7 @@ static Value *emitOrFoldComposedAffineApply(FuncBuilder *b, Location loc,
}
SmallVector<Value *, 4>
mlir::linalg::applyMapToValues(FuncBuilder *b, Location loc, AffineMap map,
mlir::linalg::applyMapToValues(OpBuilder *b, Location loc, AffineMap map,
ArrayRef<Value *> values,
FunctionConstants &state) {
SmallVector<Value *, 4> res;
@ -141,7 +141,7 @@ Value *FunctionConstants::getOrCreateIndex(int64_t v) {
auto it = map.find(v);
if (it != map.end())
return it->second;
FuncBuilder builder(f);
OpBuilder builder(f.getBody());
edsc::ScopedContext s(builder, f.getLoc());
return map.insert(std::make_pair(v, edsc::intrinsics::constant_index(v)))
.first->getSecond();

View File

@ -2302,11 +2302,11 @@ public:
/// more specific builder type.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wshadow-field"
FuncBuilder builder;
OpBuilder builder;
#pragma clang diagnostic pop
FunctionParser(ParserState &state, Function *function)
: Parser(state), builder(function), function(function) {}
: Parser(state), builder(function->getBody()), function(function) {}
~FunctionParser();

View File

@ -77,7 +77,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext,
for (auto *arg : func.getArguments()) {
if (!config.isHandledType(arg->getType()))
continue;
FuncBuilder b(func);
OpBuilder b(func.getBody());
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);
ElementsAttr layerStats = DenseFPElementsAttr::get(
@ -102,7 +102,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext,
if (!config.isHandledType(originalResult->getType()))
return;
FuncBuilder b(op->getBlock(), ++op->getIterator());
OpBuilder b(op->getBlock(), ++op->getIterator());
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);

View File

@ -184,7 +184,7 @@ void InferQuantizedTypesPass::transformOperandType(CAGOperandAnchor *anchor,
Type newType) {
Value *inputValue = anchor->getValue();
Operation *op = anchor->getOp();
FuncBuilder b(op->getBlock(), Block::iterator(op));
OpBuilder b(op->getBlock(), Block::iterator(op));
SmallVector<Value *, 1> removeValuesIfDead;
@ -240,7 +240,7 @@ void InferQuantizedTypesPass::transformResultType(CAGResultAnchor *anchor,
Type newType) {
Value *origResultValue = anchor->getValue();
Operation *op = origResultValue->getDefiningOp();
FuncBuilder b(op->getBlock(), ++Block::iterator(op));
OpBuilder b(op->getBlock(), ++Block::iterator(op));
Value *replacedResultValue = nullptr;
Value *newResultValue = nullptr;

View File

@ -108,8 +108,8 @@ struct DialectConversionRewriter final : public PatternRewriter {
SmallVector<Value *, 2> newValues;
};
DialectConversionRewriter(Function *fn)
: PatternRewriter(fn), argConverter(fn->getContext()) {}
DialectConversionRewriter(Region &region)
: PatternRewriter(region), argConverter(region.getContext()) {}
~DialectConversionRewriter() = default;
/// Cleanup and destroy any generated rewrite operations. This method is
@ -151,7 +151,7 @@ struct DialectConversionRewriter final : public PatternRewriter {
/// PatternRewriter hook for creating a new operation.
Operation *createOperation(const OperationState &state) override {
auto *result = FuncBuilder::createOperation(state);
auto *result = OpBuilder::createOperation(state);
createdOps.push_back(result);
return result;
}
@ -572,7 +572,7 @@ LogicalResult FunctionConverter::convertFunction(Function *f) {
return success();
// Rewrite the function body.
DialectConversionRewriter rewriter(f);
DialectConversionRewriter rewriter(f->getBody());
if (failed(convertRegion(rewriter, f->getBody(), f->getLoc()))) {
// Reset any of the generated rewrites.
rewriter.discardRewrites();

View File

@ -240,14 +240,14 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
return true;
// DMAs for read regions are going to be inserted just before the for loop.
FuncBuilder prologue(block, begin);
OpBuilder prologue(block, begin);
// DMAs for write regions are going to be inserted just after the for loop.
FuncBuilder epilogue(block, end);
FuncBuilder *b = region.isWrite() ? &epilogue : &prologue;
OpBuilder epilogue(block, end);
OpBuilder *b = region.isWrite() ? &epilogue : &prologue;
// Builder to create constants at the top level.
auto *func = block->getFunction();
FuncBuilder top(func);
OpBuilder top(func->getBody());
auto loc = region.loc;
auto *memref = region.memref;
@ -759,7 +759,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
void DmaGeneration::runOnFunction() {
Function &f = getFunction();
FuncBuilder topBuilder(f);
OpBuilder topBuilder(f.getBody());
zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0);
// Override default is a command line option is provided.

View File

@ -1006,9 +1006,9 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
auto *forInst = forOp.getOperation();
// Create builder to insert alloc op just before 'forOp'.
FuncBuilder b(forInst);
OpBuilder b(forInst);
// Builder to create constants at the top level.
FuncBuilder top(forInst->getFunction());
OpBuilder top(forInst->getFunction()->getBody());
// Create new memref type based on slice bounds.
auto *oldMemRef = cast<StoreOp>(srcStoreOpInst).getMemRef();
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();

View File

@ -203,7 +203,7 @@ void LoopInvariantCodeMotion::runOnAffineForOp(AffineForOp forOp) {
SmallPtrSet<Operation *, 8> definedOps;
// This is the place where hoisted instructions would reside.
FuncBuilder b(forOp.getOperation());
OpBuilder b(forOp.getOperation());
SmallPtrSet<Operation *, 8> opsToHoist;
SmallVector<Operation *, 8> opsToMove;

View File

@ -112,7 +112,7 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
assert(!origLoops.empty());
assert(origLoops.size() == tileSizes.size());
FuncBuilder b(origLoops[0].getOperation());
OpBuilder b(origLoops[0].getOperation());
unsigned width = origLoops.size();
// Bounds for tile space loops.
@ -207,7 +207,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
// Add intra-tile (or point) loops.
for (unsigned i = 0; i < width; i++) {
FuncBuilder b(topLoop);
OpBuilder b(topLoop);
// Loop bounds will be set later.
auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
pointLoop.getBody()->getOperations().splice(
@ -221,7 +221,7 @@ LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
// Add tile space loops;
for (unsigned i = width; i < 2 * width; i++) {
FuncBuilder b(topLoop);
OpBuilder b(topLoop);
// Loop bounds will be set later.
auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
tileSpaceLoop.getBody()->getOperations().splice(

View File

@ -185,8 +185,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
// unrollJamFactor.
if (getLargestDivisorOfTripCount(forOp) % unrollJamFactor != 0) {
// Insert the cleanup loop right after 'forOp'.
FuncBuilder builder(forInst->getBlock(),
std::next(Block::iterator(forInst)));
OpBuilder builder(forInst->getBlock(), std::next(Block::iterator(forInst)));
auto cleanupAffineForOp = cast<AffineForOp>(builder.clone(*forInst));
// Adjust the lower bound of the cleanup loop; its upper bound is the same
// as the original loop's upper bound.
@ -212,7 +211,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
for (auto &subBlock : subBlocks) {
// Builder to insert unroll-jammed bodies. Insert right at the end of
// sub-block.
FuncBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
OpBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
// Unroll and jam (appends unrollJamFactor-1 additional copies).
for (unsigned i = 1; i < unrollJamFactor; i++) {

View File

@ -41,7 +41,7 @@ class AffineApplyExpander
public:
// This internal class expects arguments to be non-null, checks must be
// performed at the call site.
AffineApplyExpander(FuncBuilder *builder, ArrayRef<Value *> dimValues,
AffineApplyExpander(OpBuilder *builder, ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues, Location loc)
: builder(*builder), dimValues(dimValues), symbolValues(symbolValues),
loc(loc) {}
@ -206,7 +206,7 @@ public:
}
private:
FuncBuilder &builder;
OpBuilder &builder;
ArrayRef<Value *> dimValues;
ArrayRef<Value *> symbolValues;
@ -216,7 +216,7 @@ private:
// Create a sequence of operations that implement the `expr` applied to the
// given dimension and symbol values.
static mlir::Value *expandAffineExpr(FuncBuilder *builder, Location loc,
static mlir::Value *expandAffineExpr(OpBuilder *builder, Location loc,
AffineExpr expr,
ArrayRef<Value *> dimValues,
ArrayRef<Value *> symbolValues) {
@ -226,7 +226,7 @@ static mlir::Value *expandAffineExpr(FuncBuilder *builder, Location loc,
// Create a sequence of operations that implement the `affineMap` applied to
// the given `operands` (as it it were an AffineApplyOp).
Optional<SmallVector<Value *, 8>> static expandAffineMap(
FuncBuilder *builder, Location loc, AffineMap affineMap,
OpBuilder *builder, Location loc, AffineMap affineMap,
ArrayRef<Value *> operands) {
auto numDims = affineMap.getNumDims();
auto expanded = functional::map(
@ -260,7 +260,7 @@ struct LowerAffinePass : public FunctionPass<LowerAffinePass> {
// recognize as a reduction by the subsequent passes.
static Value *buildMinMaxReductionSeq(Location loc, CmpIPredicate predicate,
ArrayRef<Value *> values,
FuncBuilder &builder) {
OpBuilder &builder) {
assert(!llvm::empty(values) && "empty min/max chain");
auto valueIt = values.begin();
@ -348,7 +348,7 @@ static LogicalResult lowerAffineFor(AffineForOp forOp) {
// Append the induction variable stepping logic and branch back to the exit
// condition block. Construct an affine expression f : (x -> x+step) and
// apply this expression to the induction variable.
FuncBuilder builder(bodyBlock);
OpBuilder builder(bodyBlock);
auto affStep = builder.getAffineConstantExpr(forOp.getStep());
auto affDim = builder.getAffineDimExpr(0);
auto stepped = expandAffineExpr(&builder, loc, affDim + affStep, iv, {});
@ -482,7 +482,7 @@ static LogicalResult lowerAffineIf(AffineIfOp ifOp) {
std::prev(oldThen->end()));
}
FuncBuilder builder(thenBlock);
OpBuilder builder(thenBlock);
builder.create<BranchOp>(loc, continueBlock);
// Handle the 'else' block the same way, but we skip it if we have no else
@ -569,7 +569,7 @@ static LogicalResult lowerAffineIf(AffineIfOp ifOp) {
// Convert an "affine.apply" operation into a sequence of arithmetic
// operations using the StandardOps dialect. Return true on error.
static LogicalResult lowerAffineApply(AffineApplyOp op) {
FuncBuilder builder(op.getOperation());
OpBuilder builder(op.getOperation());
auto maybeExpandedMap =
expandAffineMap(&builder, op.getLoc(), op.getAffineMap(),
llvm::to_vector<8>(op.getOperands()));

View File

@ -238,7 +238,7 @@ static SmallVector<unsigned, 8> delinearize(unsigned linearIndex,
return res;
}
static Operation *instantiate(FuncBuilder *b, Operation *opInst,
static Operation *instantiate(OpBuilder *b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap);
@ -257,7 +257,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
if (it == substitutionsMap->end()) {
auto *opInst = v->getDefiningOp();
if (isa<ConstantOp>(opInst)) {
FuncBuilder b(opInst);
OpBuilder b(opInst);
auto *op = instantiate(&b, opInst, hwVectorType, substitutionsMap);
auto res = substitutionsMap->insert(std::make_pair(v, op->getResult(0)));
assert(res.second && "Insertion failed");
@ -331,7 +331,7 @@ static Value *substitute(Value *v, VectorType hwVectorType,
/// TODO(ntv): these implementation details should be captured in a
/// vectorization trait at the op level directly.
static SmallVector<mlir::Value *, 8>
reindexAffineIndices(FuncBuilder *b, VectorType hwVectorType,
reindexAffineIndices(OpBuilder *b, VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
ArrayRef<Value *> memrefIndices) {
auto vectorShape = hwVectorType.getShape();
@ -404,7 +404,7 @@ materializeAttributes(Operation *opInst, VectorType hwVectorType) {
/// substitutionsMap.
///
/// If the underlying substitution fails, this fails too and returns nullptr.
static Operation *instantiate(FuncBuilder *b, Operation *opInst,
static Operation *instantiate(OpBuilder *b, Operation *opInst,
VectorType hwVectorType,
DenseMap<Value *, Value *> *substitutionsMap) {
assert(!isa<VectorTransferReadOp>(opInst) &&
@ -481,7 +481,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy transfer,
/// `hwVectorType` int the covering of the super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
static Operation *instantiate(FuncBuilder *b, VectorTransferReadOp read,
static Operation *instantiate(OpBuilder *b, VectorTransferReadOp read,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@ -505,7 +505,7 @@ static Operation *instantiate(FuncBuilder *b, VectorTransferReadOp read,
/// `hwVectorType` int the covering of th3e super-vector type. For a more
/// detailed description of the problem, see the description of
/// reindexAffineIndices.
static Operation *instantiate(FuncBuilder *b, VectorTransferWriteOp write,
static Operation *instantiate(OpBuilder *b, VectorTransferWriteOp write,
VectorType hwVectorType,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
@ -547,7 +547,7 @@ static bool instantiateMaterialization(Operation *op,
LLVM_DEBUG(dbgs() << "\ninstantiate: " << *op);
// Create a builder here for unroll-and-jam effects.
FuncBuilder b(op);
OpBuilder b(op);
// AffineApplyOp are ignored: instantiating the proper vector op will take
// care of AffineApplyOps by composing them properly.
if (isa<AffineApplyOp>(op)) {

View File

@ -73,7 +73,7 @@ static unsigned getTagMemRefPos(Operation &dmaInst) {
/// modulo 2. Returns false if such a replacement cannot be performed.
static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
auto *forBody = forOp.getBody();
FuncBuilder bInner(forBody, forBody->begin());
OpBuilder bInner(forBody, forBody->begin());
bInner.setInsertionPoint(forBody, forBody->begin());
// Doubles the shape with a leading dimension extent of 2.
@ -94,7 +94,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
// The double buffer is allocated right before 'forInst'.
auto *forInst = forOp.getOperation();
FuncBuilder bOuter(forInst);
OpBuilder bOuter(forInst);
// Put together alloc operands for any dynamic dimensions of the memref.
SmallVector<Value *, 4> allocOperands;
unsigned dynamicDimCount = 0;
@ -360,7 +360,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
// Tagging operations with shifts for debugging purposes.
LLVM_DEBUG({
FuncBuilder b(&op);
OpBuilder b(&op);
op.setAttr("shift", b.getI64IntegerAttr(shifts[s - 1]));
});
}

View File

@ -110,7 +110,7 @@ LogicalResult OperationFolder::tryToFold(Operation *op,
assert(foldResults.size() == op->getNumResults());
// Create the result constants and replace the results.
FuncBuilder builder(op);
OpBuilder builder(op);
for (unsigned i = 0, e = op->getNumResults(); i != e; ++i) {
assert(!foldResults[i].isNull() && "expected valid OpFoldResult");

View File

@ -46,7 +46,7 @@ class GreedyPatternRewriteDriver : public PatternRewriter {
public:
explicit GreedyPatternRewriteDriver(Function &fn,
OwningRewritePatternList &&patterns)
: PatternRewriter(&fn), matcher(std::move(patterns)) {
: PatternRewriter(fn.getBody()), matcher(std::move(patterns)) {
worklist.reserve(64);
}
@ -88,7 +88,7 @@ protected:
// Implement the hook for creating operations, and make sure that newly
// created ops are added to the worklist for processing.
Operation *createOperation(const OperationState &state) override {
auto *result = FuncBuilder::createOperation(state);
auto *result = OpBuilder::createOperation(state);
addToWorklist(result);
return result;
}
@ -142,14 +142,16 @@ private:
/// Perform the rewrites.
bool GreedyPatternRewriteDriver::simplifyFunction(int maxIterations) {
Function *fn = getFunction();
OperationFolder helper(fn);
Region *region = getRegion();
// TODO(riverriddle) OperationFolder should take a region to insert into.
OperationFolder helper(region->getContainingFunction());
bool changed = false;
int i = 0;
do {
// Add all operations to the worklist.
fn->walk([&](Operation *op) { addToWorklist(op); });
region->walk([&](Operation *op) { addToWorklist(op); });
// These are scratch vectors used in the folding loop below.
SmallVector<Value *, 8> originalOperands, resultValues;

View File

@ -46,7 +46,7 @@ using namespace mlir;
void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
AffineMap *map,
SmallVectorImpl<Value *> *operands,
FuncBuilder *b) {
OpBuilder *b) {
auto lbMap = forOp.getLowerBoundMap();
// Single result lower bound map only.
@ -125,15 +125,14 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
Operation *op = forOp.getOperation();
if (!iv->use_empty()) {
if (forOp.hasConstantLowerBound()) {
auto *mlFunc = op->getFunction();
FuncBuilder topBuilder(mlFunc);
OpBuilder topBuilder(op->getFunction()->getBody());
auto constOp = topBuilder.create<ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
iv->replaceAllUsesWith(constOp);
} else {
AffineBound lb = forOp.getLowerBound();
SmallVector<Value *, 4> lbOperands(lb.operand_begin(), lb.operand_end());
FuncBuilder builder(op->getBlock(), Block::iterator(op));
OpBuilder builder(op->getBlock(), Block::iterator(op));
if (lb.getMap() == builder.getDimIdentityMap()) {
// No need of generating an affine.apply.
iv->replaceAllUsesWith(lbOperands[0]);
@ -173,7 +172,7 @@ static AffineForOp
generateLoop(AffineMap lbMap, AffineMap ubMap,
const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>>
&instGroupQueue,
unsigned offset, AffineForOp srcForInst, FuncBuilder *b) {
unsigned offset, AffineForOp srcForInst, OpBuilder *b) {
SmallVector<Value *, 4> lbOperands(srcForInst.getLowerBoundOperands());
SmallVector<Value *, 4> ubOperands(srcForInst.getUpperBoundOperands());
@ -188,7 +187,7 @@ generateLoop(AffineMap lbMap, AffineMap ubMap,
BlockAndValueMapping operandMap;
FuncBuilder bodyBuilder = loopChunk.getBodyBuilder();
OpBuilder bodyBuilder = loopChunk.getBodyBuilder();
for (auto it = instGroupQueue.begin() + offset, e = instGroupQueue.end();
it != e; ++it) {
uint64_t shift = it->first;
@ -291,7 +290,7 @@ LogicalResult mlir::instBodySkew(AffineForOp forOp, ArrayRef<uint64_t> shifts,
auto origLbMap = forOp.getLowerBoundMap();
uint64_t lbShift = 0;
FuncBuilder b(forOp.getOperation());
OpBuilder b(forOp.getOperation());
for (uint64_t d = 0, e = sortedInstGroups.size(); d < e; ++d) {
// If nothing is shifted by d, continue.
if (sortedInstGroups[d].empty())
@ -424,7 +423,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
// Generate the cleanup loop if trip count isn't a multiple of unrollFactor.
Operation *op = forOp.getOperation();
if (getLargestDivisorOfTripCount(forOp) % unrollFactor != 0) {
FuncBuilder builder(op->getBlock(), ++Block::iterator(op));
OpBuilder builder(op->getBlock(), ++Block::iterator(op));
auto cleanupForInst = cast<AffineForOp>(builder.clone(*op));
AffineMap cleanupMap;
SmallVector<Value *, 4> cleanupOperands;
@ -448,7 +447,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
// Builder to insert unrolled bodies just before the terminator of the body of
// 'forOp'.
FuncBuilder builder = forOp.getBodyBuilder();
OpBuilder builder = forOp.getBodyBuilder();
// Keep a pointer to the last non-terminator operation in the original block
// so that we know what to clone (since we are doing this in-place).
@ -647,7 +646,7 @@ void mlir::sinkLoop(AffineForOp forOp, unsigned loopDepth) {
// ...
// }
// ```
static void augmentMapAndBounds(FuncBuilder *b, Value *iv, AffineMap *map,
static void augmentMapAndBounds(OpBuilder *b, Value *iv, AffineMap *map,
SmallVector<Value *, 4> *operands,
int64_t offset = 0) {
auto bounds = llvm::to_vector<4>(map->getResults());
@ -665,7 +664,7 @@ static void cloneLoopBodyInto(AffineForOp forOp, Value *oldIv,
AffineForOp newForOp) {
BlockAndValueMapping map;
map.map(oldIv, newForOp.getInductionVar());
FuncBuilder b = newForOp.getBodyBuilder();
OpBuilder b = newForOp.getBodyBuilder();
for (auto &op : *forOp.getBody()) {
// Step over newForOp in case it is nested under forOp.
if (&op == newForOp.getOperation()) {
@ -704,7 +703,7 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
forOp.setStep(scaledStep);
auto *op = forOp.getOperation();
FuncBuilder b(op->getBlock(), ++Block::iterator(op));
OpBuilder b(op->getBlock(), ++Block::iterator(op));
// Lower-bound map creation.
auto lbMap = forOp.getLowerBoundMap();
@ -720,7 +719,7 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
SmallVector<AffineForOp, 8> innerLoops;
for (auto t : targets) {
// Insert newForOp before the terminator of `t`.
FuncBuilder b = t.getBodyBuilder();
OpBuilder b = t.getBodyBuilder();
auto newForOp = b.create<AffineForOp>(t.getLoc(), lbOperands, lbMap,
ubOperands, ubMap, originalStep);
cloneLoopBodyInto(t, forOp.getInductionVar(), newForOp);

View File

@ -123,7 +123,7 @@ bool mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
opInst->operand_begin() + memRefOperandPos);
state.operands.push_back(newMemRef);
FuncBuilder builder(opInst);
OpBuilder builder(opInst);
for (auto *extraIndex : extraIndices) {
assert(extraIndex->getDefiningOp()->getNumResults() == 1 &&
"single result op's expected to generate these indices");
@ -249,7 +249,7 @@ void mlir::createAffineComputationSlice(
if (localized)
return;
FuncBuilder builder(opInst);
OpBuilder builder(opInst);
SmallVector<Value *, 4> composedOpOperands(subOperands);
auto composedMap = builder.getMultiDimIdentityMap(composedOpOperands.size());
fullyComposeAffineMapAndOperands(&composedMap, &composedOpOperands);

View File

@ -240,7 +240,7 @@ void VectorizerTestPass::testNormalizeMaps() {
pattern.match(f, &matches);
for (auto m : matches) {
auto app = cast<AffineApplyOp>(m.getMatchedOperation());
FuncBuilder b(m.getMatchedOperation());
OpBuilder b(m.getMatchedOperation());
SmallVector<Value *, 8> operands(app.getOperands());
makeComposedAffineApply(&b, app.getLoc(), app.getAffineMap(), operands);
}

View File

@ -805,7 +805,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
return LogicalResult::Failure;
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
LLVM_DEBUG(permutationMap.print(dbgs()));
FuncBuilder b(opInst);
OpBuilder b(opInst);
auto transfer = b.create<VectorTransferReadOp>(
opInst->getLoc(), vectorType, memoryOp.getMemRef(),
map(makePtrDynCaster<Value>(), memoryOp.getIndices()), permutationMap);
@ -920,7 +920,7 @@ static Value *vectorizeConstant(Operation *op, ConstantOp constant, Type type) {
!VectorType::isValidElementType(constant.getType())) {
return nullptr;
}
FuncBuilder b(op);
OpBuilder b(op);
Location loc = op->getLoc();
auto vectorType = type.cast<VectorType>();
auto attr = SplatElementsAttr::get(vectorType, constant.getValue());
@ -1015,7 +1015,7 @@ static Operation *vectorizeOneOperation(Operation *opInst,
auto *value = store.getValueToStore();
auto *vectorValue = vectorizeOperand(value, opInst, state);
auto indices = map(makePtrDynCaster<Value>(), store.getIndices());
FuncBuilder b(opInst);
OpBuilder b(opInst);
auto permutationMap =
makePermutationMap(opInst, state->strategy->loopToVectorDim);
if (!permutationMap)
@ -1054,7 +1054,7 @@ static Operation *vectorizeOneOperation(Operation *opInst,
// name that works both in scalar mode and vector mode.
// TODO(ntv): Is it worth considering an Operation.clone operation which
// changes the type so we can promote an Operation with less boilerplate?
FuncBuilder b(opInst);
OpBuilder b(opInst);
OperationState newOp(b.getContext(), opInst->getLoc(),
opInst->getName().getStringRef(), vectorOperands,
vectorTypes, opInst->getAttrs(), /*successors=*/{},
@ -1136,7 +1136,7 @@ static LogicalResult vectorizeRootMatch(NestedMatch m,
/// maintains a clone for handling failure and restores the proper state via
/// RAII.
auto *loopInst = loop.getOperation();
FuncBuilder builder(loopInst);
OpBuilder builder(loopInst);
auto clonedLoop = cast<AffineForOp>(builder.clone(*loopInst));
struct Guard {
LogicalResult failure() {

View File

@ -62,7 +62,7 @@ TEST_FUNC(builder_dynamic_for_func_args) {
auto f =
makeFunction("builder_dynamic_for_func_args", {}, {indexType, indexType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), j(indexType), lb(f->getArgument(0)),
ub(f->getArgument(1));
@ -113,7 +113,7 @@ TEST_FUNC(builder_dynamic_for) {
auto f = makeFunction("builder_dynamic_for", {},
{indexType, indexType, indexType, indexType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), a(f->getArgument(0)), b(f->getArgument(1)),
c(f->getArgument(2)), d(f->getArgument(3));
@ -136,7 +136,7 @@ TEST_FUNC(builder_max_min_for) {
auto f = makeFunction("builder_max_min_for", {},
{indexType, indexType, indexType, indexType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle i(indexType), lb1(f->getArgument(0)), lb2(f->getArgument(1)),
ub1(f->getArgument(2)), ub2(f->getArgument(3));
@ -157,7 +157,7 @@ TEST_FUNC(builder_blocks) {
using namespace edsc::op;
auto f = makeFunction("builder_blocks");
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle c1(ValueHandle::create<ConstantIntOp>(42, 32)),
c2(ValueHandle::create<ConstantIntOp>(1234, 32));
@ -201,7 +201,7 @@ TEST_FUNC(builder_blocks_eager) {
using namespace edsc::op;
auto f = makeFunction("builder_blocks_eager");
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle c1(ValueHandle::create<ConstantIntOp>(42, 32)),
c2(ValueHandle::create<ConstantIntOp>(1234, 32));
@ -244,7 +244,7 @@ TEST_FUNC(builder_cond_branch) {
auto f = makeFunction("builder_cond_branch", {},
{IntegerType::get(1, &globalContext())});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle funcArg(f->getArgument(0));
ValueHandle c32(ValueHandle::create<ConstantIntOp>(32, 32)),
@ -281,7 +281,7 @@ TEST_FUNC(builder_cond_branch_eager) {
auto f = makeFunction("builder_cond_branch_eager", {},
{IntegerType::get(1, &globalContext())});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle funcArg(f->getArgument(0));
ValueHandle c32(ValueHandle::create<ConstantIntOp>(32, 32)),
@ -321,7 +321,7 @@ TEST_FUNC(builder_helpers) {
auto f =
makeFunction("builder_helpers", {}, {memrefType, memrefType, memrefType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle f7(
@ -373,7 +373,7 @@ TEST_FUNC(custom_ops) {
auto indexType = IndexType::get(&globalContext());
auto f = makeFunction("custom_ops", {}, {indexType, indexType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
CustomOperation<ValueHandle> MY_CUSTOM_OP("my_custom_op");
CustomOperation<OperationHandle> MY_CUSTOM_OP_0("my_custom_op_0");
@ -412,7 +412,7 @@ TEST_FUNC(insertion_in_block) {
auto indexType = IndexType::get(&globalContext());
auto f = makeFunction("insertion_in_block", {}, {indexType, indexType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
BlockHandle b1;
// clang-format off
@ -438,7 +438,7 @@ TEST_FUNC(select_op) {
auto memrefType = MemRefType::get({-1, -1, -1}, f32Type, {}, 0);
auto f = makeFunction("select_op", {}, {memrefType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
// clang-format off
ValueHandle zero = constant_index(0), one = constant_index(1);
@ -474,7 +474,7 @@ TEST_FUNC(tile_2d) {
MemRefType::get({-1, -1, -1}, FloatType::getF32(&globalContext()), {}, 0);
auto f = makeFunction("tile_2d", {}, {memrefType, memrefType, memrefType});
FuncBuilder builder(*f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle zero = constant_index(0);
MemRefView vA(f->getArgument(0)), vB(f->getArgument(1)),
@ -548,7 +548,7 @@ TEST_FUNC(vectorize_2d) {
mlir::Module module(&globalContext());
module.getFunctions().push_back(f);
FuncBuilder builder(f);
OpBuilder builder(f->getBody());
ScopedContext scope(builder, f->getLoc());
ValueHandle zero = constant_index(0);
MemRefView vA(f->getArgument(0)), vB(f->getArgument(1)),