diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -1765,8 +1765,9 @@ def HoistRedundantVectorTransfersOp : Op { + [DeclareOpInterfaceMethods, + TransformEachOpTrait, + TransformOpInterface]> { let description = [{ Hoist vector.transfer_read / vector.transfer_write pairs out of immediately enclosing scf::ForOp iteratively, if the following conditions are true: @@ -1884,8 +1885,9 @@ def HoistRedundantTensorSubsetsOp : Op { + [DeclareOpInterfaceMethods, + TransformEachOpTrait, + TransformOpInterface]> { let description = [{ Hoists supported tensor subset extract/insert operation pairs out of immediately enclosing loop iteratively, if the following conditions diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -3133,22 +3133,6 @@ return getMixedValues(getStaticVectorSizes(), getVectorSizes(), b); } -//===----------------------------------------------------------------------===// -// HoistRedundantVectorTransfersOp -//===----------------------------------------------------------------------===// - -DiagnosedSilenceableFailure -transform::HoistRedundantVectorTransfersOp::applyToOne( - func::FuncOp target, transform::ApplyToEachResultList &results, - transform::TransformState &state) { - // WARNING: This hoisting does not model parallelism and is generally - // incorrect when used on distributed loops with memref semantics! - // TODO: obsolete and should be retired. - linalg::hoistRedundantVectorTransfers(target); - results.push_back(target); - return DiagnosedSilenceableFailure::success(); -} - //===----------------------------------------------------------------------===// // ConvertConv2DToImg2ColOp. //===----------------------------------------------------------------------===// @@ -3208,6 +3192,36 @@ return DiagnosedSilenceableFailure::success(); } +void transform::HoistRedundantTensorSubsetsOp::getEffects( + SmallVectorImpl &effects) { + transform::onlyReadsHandle(getTarget(), effects); + transform::producesHandle(getTransformed(), effects); + transform::modifiesPayload(effects); +} + +//===----------------------------------------------------------------------===// +// HoistRedundantVectorTransfersOp +//===----------------------------------------------------------------------===// + +DiagnosedSilenceableFailure +transform::HoistRedundantVectorTransfersOp::applyToOne( + func::FuncOp target, transform::ApplyToEachResultList &results, + transform::TransformState &state) { + // WARNING: This hoisting does not model parallelism and is generally + // incorrect when used on distributed loops with memref semantics! + // TODO: obsolete and should be retired. + linalg::hoistRedundantVectorTransfers(target); + results.push_back(target); + return DiagnosedSilenceableFailure::success(); +} + +void transform::HoistRedundantVectorTransfersOp::getEffects( + SmallVectorImpl &effects) { + transform::onlyReadsHandle(getTarget(), effects); + transform::producesHandle(getTransformed(), effects); + transform::modifiesPayload(effects); +} + //===----------------------------------------------------------------------===// // Transform op registration //===----------------------------------------------------------------------===// diff --git a/mlir/test/Dialect/Linalg/hoisting.mlir b/mlir/test/Dialect/Linalg/hoisting.mlir --- a/mlir/test/Dialect/Linalg/hoisting.mlir +++ b/mlir/test/Dialect/Linalg/hoisting.mlir @@ -80,6 +80,9 @@ : (!pdl.operation) -> !pdl.operation transform.structured.hoist_redundant_vector_transfers %0 : (!pdl.operation) -> !pdl.operation + // Test we can call the op twice without consuming the handle. + transform.structured.hoist_redundant_vector_transfers %0 + : (!pdl.operation) -> !pdl.operation } // ----- @@ -303,6 +306,9 @@ : (!pdl.operation) -> !pdl.operation transform.structured.hoist_redundant_tensor_subsets %0 : (!pdl.operation) -> !pdl.operation + // Test we can call the op twice without consuming the handle. + transform.structured.hoist_redundant_tensor_subsets %0 + : (!pdl.operation) -> !pdl.operation } // -----