mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-25 18:26:06 +00:00
[MLIR][Linalg] Retire tile_to_scf_for
(#65633)
Both `TileOp` and `TileToScfForOp` use the tiling interface and the
`tileUsingSCFForOp` method. This duplication was introduced in
44cfea0279
as a way to retire `linalg::tileLinalgOp,` now there is not more need
for this duplication, and it seems that `tileOp` has more recent
changes, thus retire `TileToScfForOp.`
This commit is contained in:
parent
62e576b454
commit
e5137e7c33
@ -753,6 +753,7 @@ def PackOp : Op<Transform_Dialect, "structured.pack", [
|
||||
//===----------------------------------------------------------------------===//
|
||||
// PackGreedilyOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def PackGreedilyOp : Op<Transform_Dialect, "structured.pack_greedily", [
|
||||
DeclareOpInterfaceMethods<TransformOpInterface>,
|
||||
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
|
||||
@ -861,6 +862,7 @@ def PackGreedilyOp : Op<Transform_Dialect, "structured.pack_greedily", [
|
||||
//===----------------------------------------------------------------------===//
|
||||
// PackTransposeOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def PackTransposeOp : Op<Transform_Dialect, "structured.pack_transpose", [
|
||||
FunctionalStyleTransformOpTrait,
|
||||
MemoryEffectsOpInterface,
|
||||
@ -1944,74 +1946,6 @@ def TileToForallOp :
|
||||
}];
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TileToScfForOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def TileToScfForOp : Op<Transform_Dialect, "structured.tile_to_scf_for",
|
||||
[DeclareOpInterfaceMethods<TransformOpInterface>,
|
||||
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
|
||||
ReportTrackingListenerFailuresOpTrait]> {
|
||||
let description = [{
|
||||
Indicates that the given `target` op should be tiled with the given sizes.
|
||||
This transform generates a loop nest with a smaller ("tiled") target
|
||||
operation in its body. The target must implement TilingInterface.
|
||||
|
||||
Tile sizes may be known at transformation time, in which case they are
|
||||
expected to be provided in the `static_size` attribute, or not, in which
|
||||
case the tile value must be computed by the payload IR and the handle to the
|
||||
operation computing it must be provided through `dynamic_sizes`. When the
|
||||
sizes are not known statically, the corresponding entry in the
|
||||
`static_sizes` attribute must be set to `ShapedType::kDynamic`. Only
|
||||
the dynamic sizes must be provided in `dynamic_sizes`, i.e., there should
|
||||
be as many handles as `ShapedType::kDynamic` values in the
|
||||
`static_sizes` attribute. A static size of `0` indicates that the dimension
|
||||
should not be tiled. No loop will be generated for such dimensions. If all
|
||||
tile sizes are `0`, this transform is effectively a no-op.
|
||||
|
||||
This op returns handles to the tiled op (in the generated loop nest) and the
|
||||
generated loops. The number of loops is the number of tile sizes that are
|
||||
statically known to be non-zero.
|
||||
|
||||
#### Return modes
|
||||
|
||||
On success, the resulting handles are associated with co-indexed lists of
|
||||
tiled operations and loops around them.
|
||||
|
||||
This operation only supports TilingInterface ops and produces a silenceable
|
||||
failure if the input contains any non-TilingInterface ops. The ops preceding
|
||||
it in the list associated with the `target` handle will have been tiled.
|
||||
|
||||
This operation produces a silenceable failure if the `dynamic_sizes` handles
|
||||
are associated with lists of payload operations of a size different than
|
||||
that of the list associated with the `target` handle.
|
||||
|
||||
If the internal implementation of tiling for any of the operations fails,
|
||||
produces a definite failure.
|
||||
}];
|
||||
|
||||
let arguments = (ins TransformHandleTypeInterface:$target,
|
||||
Variadic<TransformHandleTypeInterface>:$dynamic_sizes,
|
||||
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:$static_sizes,
|
||||
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:$interchange);
|
||||
let results = (outs TransformHandleTypeInterface:$tiled_linalg_op,
|
||||
Variadic<TransformHandleTypeInterface>:$loops);
|
||||
|
||||
let builders = [
|
||||
OpBuilder<(ins "Value":$target,
|
||||
"ArrayRef<OpFoldResult>":$mixedTileSizes,
|
||||
CArg<"ArrayRef<int64_t>", "{}">:$interchange)>
|
||||
];
|
||||
|
||||
let hasCustomAssemblyFormat = 1;
|
||||
|
||||
let extraClassDeclaration = [{
|
||||
/// Returns the list of tile sizes, which may be static (Attribute) or
|
||||
/// dynamic (Value).
|
||||
SmallVector<OpFoldResult> getMixedSizes();
|
||||
}];
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VectorizeOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -2507,8 +2507,9 @@ transform::TileOp::apply(transform::TransformRewriter &rewriter,
|
||||
|
||||
for (Operation *op : dynamicSizeProducers.back()) {
|
||||
if (op->getNumResults() == 1 &&
|
||||
isa<IndexType>(op->getResult(0).getType()))
|
||||
isa<IndexType>(op->getResult(0).getType())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
DiagnosedSilenceableFailure diag =
|
||||
emitSilenceableError() << "expected sizes to be produced by ops "
|
||||
@ -2525,11 +2526,10 @@ transform::TileOp::apply(transform::TransformRewriter &rewriter,
|
||||
auto scalableSizes = getScalableSizes();
|
||||
for (auto [i, op] : llvm::enumerate(targets)) {
|
||||
auto tilingInterface = dyn_cast<TilingInterface>(op);
|
||||
auto dpsInterface = dyn_cast<DestinationStyleOpInterface>(op);
|
||||
if (!tilingInterface || !dpsInterface) {
|
||||
if (!tilingInterface) {
|
||||
DiagnosedSilenceableFailure diag =
|
||||
emitSilenceableError() << "only ops implementing TilingInterface and "
|
||||
"DestinationStyleOpInterface are supported";
|
||||
emitSilenceableError()
|
||||
<< "only ops implementing TilingInterface are supported";
|
||||
diag.attachNote(op->getLoc()) << "target op";
|
||||
return diag;
|
||||
}
|
||||
@ -2578,10 +2578,7 @@ transform::TileOp::apply(transform::TransformRewriter &rewriter,
|
||||
if (failed(maybeTilingResult))
|
||||
return DiagnosedSilenceableFailure::definiteFailure();
|
||||
|
||||
if (dpsInterface.hasBufferSemantics())
|
||||
rewriter.eraseOp(op);
|
||||
else
|
||||
rewriter.replaceOp(op, maybeTilingResult->loops.front()->getResults());
|
||||
rewriter.replaceOp(op, maybeTilingResult->replacements);
|
||||
|
||||
tiled.append(maybeTilingResult->tiledOps);
|
||||
for (const auto &en2 : llvm::enumerate(maybeTilingResult->loops))
|
||||
@ -2895,204 +2892,6 @@ LogicalResult TileToForallOp::verify() {
|
||||
return success();
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TileToScfForOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void transform::TileToScfForOp::build(OpBuilder &builder,
|
||||
OperationState &result, Value target,
|
||||
ArrayRef<OpFoldResult> mixedTileSizes,
|
||||
ArrayRef<int64_t> interchange) {
|
||||
SmallVector<int64_t> staticTileSizes;
|
||||
SmallVector<Value> dynamicTileSizes;
|
||||
dispatchIndexOpFoldResults(mixedTileSizes, dynamicTileSizes, staticTileSizes);
|
||||
// Call the default builder which sets up the proper operands segment sizes
|
||||
// attributes for multiple variadic operands. In the absence of this,
|
||||
// horrible bugs ensue.
|
||||
auto staticTileSizesAttr = builder.getDenseI64ArrayAttr(staticTileSizes);
|
||||
int64_t numExpectedLoops =
|
||||
staticTileSizes.size() - llvm::count(staticTileSizes, 0);
|
||||
SmallVector<Type> resultTypes(
|
||||
numExpectedLoops, transform::AnyOpType::get(builder.getContext()));
|
||||
build(builder, result,
|
||||
/*tiled_linalg_op=*/target.getType(),
|
||||
/*loops=*/resultTypes,
|
||||
/*target=*/target,
|
||||
/*dynamic_sizes=*/dynamicTileSizes,
|
||||
/*static_sizes=*/staticTileSizesAttr,
|
||||
/*interchange=*/builder.getDenseI64ArrayAttr(interchange));
|
||||
}
|
||||
|
||||
DiagnosedSilenceableFailure
|
||||
transform::TileToScfForOp::apply(transform::TransformRewriter &rewriter,
|
||||
TransformResults &transformResults,
|
||||
TransformState &state) {
|
||||
ArrayRef<int64_t> tileSizes = getStaticSizes();
|
||||
|
||||
SmallVector<Operation *> targets =
|
||||
llvm::to_vector(state.getPayloadOps(getTarget()));
|
||||
SmallVector<SmallVector<Operation *>> dynamicSizeProducers;
|
||||
dynamicSizeProducers.reserve(getDynamicSizes().size());
|
||||
for (Value dynamicSizeProducerHandle : getDynamicSizes()) {
|
||||
dynamicSizeProducers.push_back(
|
||||
llvm::to_vector(state.getPayloadOps(dynamicSizeProducerHandle)));
|
||||
|
||||
if (dynamicSizeProducers.back().size() != targets.size()) {
|
||||
DiagnosedSilenceableFailure diag =
|
||||
emitSilenceableError()
|
||||
<< "expected as many dynamic size-producing operations ("
|
||||
<< dynamicSizeProducers.back().size() << ") as target ops ("
|
||||
<< targets.size() << ")";
|
||||
diag.attachNote(dynamicSizeProducerHandle.getLoc()) << "for this handle";
|
||||
return diag;
|
||||
}
|
||||
|
||||
for (Operation *op : dynamicSizeProducers.back()) {
|
||||
if (op->getNumResults() == 1 &&
|
||||
isa<IndexType>(op->getResult(0).getType()))
|
||||
continue;
|
||||
DiagnosedSilenceableFailure diag =
|
||||
emitSilenceableError() << "expected sizes to be produced by ops "
|
||||
"with a single index-type result";
|
||||
diag.attachNote(op->getLoc()) << "size producer op";
|
||||
diag.attachNote(dynamicSizeProducerHandle.getLoc()) << "for this handle";
|
||||
return diag;
|
||||
}
|
||||
}
|
||||
|
||||
SmallVector<Operation *> tiled;
|
||||
SmallVector<SmallVector<Operation *, 4>, 4> loops;
|
||||
loops.resize(getLoops().size());
|
||||
for (auto en : llvm::enumerate(targets)) {
|
||||
auto tilingInterfaceOp = dyn_cast<TilingInterface>(en.value());
|
||||
if (!tilingInterfaceOp) {
|
||||
DiagnosedSilenceableFailure diag =
|
||||
emitSilenceableError() << "only TilingInterface ops are supported";
|
||||
diag.attachNote(en.value()->getLoc()) << "target op";
|
||||
return diag;
|
||||
}
|
||||
|
||||
scf::SCFTilingOptions tilingOptions;
|
||||
unsigned index = en.index();
|
||||
if (!tileSizes.empty()) {
|
||||
tilingOptions.setTileSizeComputationFunction(
|
||||
[&, index](OpBuilder &b, Operation *) {
|
||||
SmallVector<Value, 4> sizes;
|
||||
sizes.reserve(tileSizes.size());
|
||||
unsigned dynamicIdx = 0;
|
||||
for (OpFoldResult ofr : getMixedSizes()) {
|
||||
if (auto attr = llvm::dyn_cast_if_present<Attribute>(ofr)) {
|
||||
sizes.push_back(b.create<arith::ConstantIndexOp>(
|
||||
getLoc(), cast<IntegerAttr>(attr).getInt()));
|
||||
} else {
|
||||
sizes.push_back(
|
||||
dynamicSizeProducers[dynamicIdx++][index]->getResult(0));
|
||||
}
|
||||
}
|
||||
return sizes;
|
||||
});
|
||||
}
|
||||
|
||||
tilingOptions.setInterchange(getInterchange());
|
||||
FailureOr<scf::SCFTilingResult> tilingResult =
|
||||
tileUsingSCFForOp(rewriter, tilingInterfaceOp, tilingOptions);
|
||||
if (failed(tilingResult))
|
||||
return DiagnosedSilenceableFailure::definiteFailure();
|
||||
|
||||
rewriter.replaceOp(tilingInterfaceOp, tilingResult->replacements);
|
||||
|
||||
tiled.append(tilingResult->tiledOps);
|
||||
for (const auto &en2 : llvm::enumerate(tilingResult->loops))
|
||||
loops[en2.index()].push_back(en2.value());
|
||||
}
|
||||
|
||||
transformResults.set(cast<OpResult>(getTiledLinalgOp()), tiled);
|
||||
for (const auto &en : llvm::enumerate(loops))
|
||||
transformResults.set(cast<OpResult>(getLoops()[en.index()]), en.value());
|
||||
|
||||
return DiagnosedSilenceableFailure::success();
|
||||
}
|
||||
|
||||
SmallVector<OpFoldResult> transform::TileToScfForOp::getMixedSizes() {
|
||||
ValueRange dynamic = getDynamicSizes();
|
||||
ArrayRef<int64_t> tileSizes = getStaticSizes();
|
||||
SmallVector<OpFoldResult> results;
|
||||
results.reserve(tileSizes.size());
|
||||
unsigned dynamicPos = 0;
|
||||
Builder builder(getContext());
|
||||
for (int64_t size : tileSizes) {
|
||||
if (size == ShapedType::kDynamic) {
|
||||
results.push_back(dynamic[dynamicPos++]);
|
||||
} else {
|
||||
results.push_back(builder.getIndexAttr(size));
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
ParseResult transform::TileToScfForOp::parse(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
OpAsmParser::UnresolvedOperand target;
|
||||
SmallVector<OpAsmParser::UnresolvedOperand> dynamicSizes;
|
||||
DenseI64ArrayAttr staticSizes;
|
||||
FunctionType trailingType;
|
||||
llvm::SMLoc typeLoc;
|
||||
if (parser.parseOperand(target) ||
|
||||
parseDynamicIndexList(parser, dynamicSizes, staticSizes) ||
|
||||
parseOptionalInterchange(parser, result) ||
|
||||
parser.parseOptionalAttrDict(result.attributes) ||
|
||||
parser.getCurrentLocation(&typeLoc) ||
|
||||
parser.parseColonType(trailingType)) {
|
||||
return ParseResult::failure();
|
||||
}
|
||||
|
||||
result.addAttribute(getStaticSizesAttrName(result.name), staticSizes);
|
||||
size_t numExpectedLoops =
|
||||
staticSizes.size() - llvm::count(staticSizes.asArrayRef(), 0);
|
||||
|
||||
unsigned numExpectedInputTypes = 1 + dynamicSizes.size();
|
||||
if (trailingType.getNumInputs() != numExpectedInputTypes) {
|
||||
return parser.emitError(typeLoc)
|
||||
<< "expected " << numExpectedInputTypes << " operand types, got "
|
||||
<< trailingType.getNumInputs();
|
||||
}
|
||||
|
||||
unsigned numExpectedOutputTypes = 1 + numExpectedLoops;
|
||||
if (trailingType.getNumResults() != numExpectedOutputTypes) {
|
||||
return parser.emitError(typeLoc)
|
||||
<< "expected " << numExpectedOutputTypes << " result types, got "
|
||||
<< trailingType.getNumResults();
|
||||
}
|
||||
|
||||
if (parser.resolveOperand(target, trailingType.getInput(0),
|
||||
result.operands) ||
|
||||
parser.resolveOperands(dynamicSizes,
|
||||
trailingType.getInputs().drop_front(), typeLoc,
|
||||
result.operands) ||
|
||||
parser.addTypesToList(trailingType.getResults(), result.types)) {
|
||||
return failure();
|
||||
}
|
||||
return success();
|
||||
}
|
||||
|
||||
void TileToScfForOp::print(OpAsmPrinter &p) {
|
||||
p << ' ' << getTarget();
|
||||
printDynamicIndexList(p, getOperation(), getDynamicSizes(), getStaticSizes());
|
||||
printOptionalInterchange(p, getInterchange());
|
||||
p.printOptionalAttrDict(getOperation()->getAttrs(), getAttributeNames());
|
||||
p << " : ";
|
||||
p.printFunctionalType(getOperation());
|
||||
}
|
||||
|
||||
void transform::TileToScfForOp::getEffects(
|
||||
SmallVectorImpl<MemoryEffects::EffectInstance> &effects) {
|
||||
consumesHandle(getTarget(), effects);
|
||||
onlyReadsHandle(getDynamicSizes(), effects);
|
||||
producesHandle(getTiledLinalgOp(), effects);
|
||||
producesHandle(getLoops(), effects);
|
||||
modifiesPayload(effects);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VectorizeOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -27,7 +27,7 @@ func.func @KCRS_to_KCRSsr(%arg0: tensor<1x1x128x64xf32>, %arg1: tensor<1x1x4x8x8
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:4 = transform.structured.tile_to_scf_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:4 = transform.structured.tile %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -56,7 +56,7 @@ func.func @pad_and_pack(%arg0: tensor<13x15xf32>, %arg1: tensor<2x8x8x2xf32>, %a
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -89,5 +89,5 @@ func.func @KC_to_CKkc(%arg0: tensor<128x256xf32>, %arg1: tensor<32x4x32x8xf32>)
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ func.func @KCRSsr_to_KCRS(%arg0: tensor<1x1x4x8x8x32xf32>, %arg1: tensor<1x1x128
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:4 = transform.structured.tile_to_scf_for %0 [1, 1, 32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:4 = transform.structured.tile %0 [1, 1, 32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0) -> (d0 floordiv 32)>
|
||||
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0) -> (d0 floordiv 8)>
|
||||
@ -70,7 +70,7 @@ func.func @unpack_and_extract_slice(%arg0: tensor<2x8x8x2xf32>, %arg1: tensor<13
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [8, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [8, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -104,5 +104,5 @@ func.func @CKkc_to_KC(%arg0: tensor<32x4x32x8xf32>, %arg1: tensor<128x256xf32>)
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [32, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
@ -22,9 +22,9 @@ transform.sequence failures(propagate) {
|
||||
^bb0(%module: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["linalg.matmul"]} in %module
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
%tiled_linalg_op, %loops:3 = transform.structured.tile_to_scf_for %0[64, 128, 256]
|
||||
%tiled_linalg_op, %loops:3 = transform.structured.tile %0[64, 128, 256]
|
||||
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%tiled_linalg_op_0, %loops_1:3 = transform.structured.tile_to_scf_for %tiled_linalg_op[8, 8, 8]
|
||||
%tiled_linalg_op_0, %loops_1:3 = transform.structured.tile %tiled_linalg_op[8, 8, 8]
|
||||
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
transform.structured.masked_vectorize %tiled_linalg_op_0 vector_sizes [8, 8, 8]
|
||||
: !transform.any_op
|
||||
|
@ -15,7 +15,7 @@ transform.sequence failures(propagate) {
|
||||
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -47,7 +47,7 @@ transform.sequence failures(propagate) {
|
||||
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -85,7 +85,7 @@ transform.sequence failures(propagate) {
|
||||
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -123,7 +123,7 @@ transform.sequence failures(propagate) {
|
||||
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -159,7 +159,7 @@ transform.sequence failures(propagate) {
|
||||
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
%matmul_l1, %loops_l1:2 = transform.structured.tile_to_scf_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1:2 = transform.structured.tile %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
|
@ -15,7 +15,7 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -51,7 +51,7 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -94,7 +94,7 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -139,7 +139,7 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1 = transform.structured.tile %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
@ -183,7 +183,7 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.any_op
|
||||
|
||||
|
||||
%matmul_l1, %loops_l1:2 = transform.structured.tile_to_scf_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%matmul_l1, %loops_l1:2 = transform.structured.tile %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
|
||||
%matmul_padded, %0, %copy_back = transform.structured.pad %matmul_l1 {
|
||||
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
|
||||
|
@ -71,19 +71,3 @@ transform.sequence failures(propagate) {
|
||||
: (!transform.any_op) -> !transform.op<"linalg.generic">
|
||||
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg0: !transform.any_op):
|
||||
// expected-error @below {{expected 4 result types, got 2}}
|
||||
transform.structured.tile_to_scf_for %arg0 [1, 2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg0: !transform.any_op, %arg1: !transform.any_op):
|
||||
// expected-error @below {{expected 2 operand types, got 1}}
|
||||
transform.structured.tile_to_scf_for %arg0 [%arg1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ func.func @dynamic_pad_tensor_3_4(%input_tensor: tensor<?x?xf32>,
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -71,7 +71,7 @@ func.func @dynamic_pad_tensor_0_3(%input_tensor: tensor<?x?xf32>,
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loop = transform.structured.tile_to_scf_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%1, %loop = transform.structured.tile %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -105,7 +105,7 @@ func.func @static_pad_tensor_3_4(%input_tensor: tensor<7x9xf32>,
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -137,7 +137,7 @@ func.func @static_pad_tensor_0_3(%input_tensor: tensor<7x9xf32>,
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loop = transform.structured.tile_to_scf_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%1, %loop = transform.structured.tile %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -175,7 +175,7 @@ func.func @static_pad_tile_evenly_0_3(%input_tensor: tensor<7x9xf32>,
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loop = transform.structured.tile_to_scf_for %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
%1, %loop = transform.structured.tile %0 [0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -211,7 +211,7 @@ func.func @NC_to_NCnc(%arg0: tensor<128x256xf32>, %arg1: tensor<4x8x32x32xf32>)
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -239,7 +239,7 @@ func.func @KC_to_CKkc(%arg0: tensor<128x256xf32>, %arg1: tensor<32x4x32x8xf32>)
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -274,7 +274,7 @@ func.func @pad_and_pack_static(%input: tensor<13x15xf32>, %output: tensor<2x8x8x
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -323,7 +323,7 @@ func.func @pad_and_pack_partially_dynamic(%input: tensor<?x?xf32>, %output: tens
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -377,7 +377,7 @@ func.func @pad_and_pack_fully_dynamic(%source: tensor<?x?xf32>, %dest: tensor<?x
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -424,7 +424,7 @@ func.func @NCnc_to_NC(%source: tensor<8x8x32x16xf32>, %dest: tensor<256x128xf32>
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -470,7 +470,7 @@ func.func @CKkc_to_KC(%source: tensor<32x4x32x8xf32>, %dest: tensor<128x256xf32>
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -506,7 +506,7 @@ func.func @perfect_CKkc_to_KC(%source: tensor<32x4x2x4xf32>, %dest: tensor<8x128
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -548,7 +548,7 @@ func.func @dynamic_perfect_CKkc_to_KC(%source: tensor<?x?x2x2xf32>, %dest: tenso
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [2, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -583,7 +583,7 @@ func.func @perfect_NKPQk_to_NPQK(%source: tensor<1x4x6x6x2xf32>, %dest: tensor<1
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:4 = transform.structured.tile_to_scf_for %0 [1, 1, 1, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:4 = transform.structured.tile %0 [1, 1, 1, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -609,7 +609,7 @@ func.func @fully_dynamic_unpack(%source: tensor<?x?x?x?xf32>, %dest: tensor<?x?x
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.unpack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:2 = transform.structured.tile_to_scf_for %0 [4, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:2 = transform.structured.tile %0 [4, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
// -----
|
||||
@ -643,5 +643,5 @@ func.func @perfect_NPQK_to_NKPQk(%source: tensor<1x6x6x8xf32>, %dest: tensor<1x4
|
||||
transform.sequence failures(propagate) {
|
||||
^bb0(%arg1: !transform.any_op):
|
||||
%0 = transform.structured.match ops{["tensor.pack"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
%1, %loops:4 = transform.structured.tile_to_scf_for %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
%1, %loops:4 = transform.structured.tile %0 [1, 1, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user