diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -891,17 +891,19 @@ def PadOp : Op, ReportTrackingListenerFailuresOpTrait]> { let description = [{ Pads the operations pointed to by the target handle using the options - provides as operation attributes. + provides as operation attributes. The operation returns a handle to the + padded operation and to the padding operation ("tensor.pad"). #### Return modes This operation ignores non-Linalg ops and drops them in the return. This operation may produce a definiteFailure if the padding fails for any reason. + If all the operations referred to by the `target` handle pad properly, the transform succeeds. Otherwise the transform silently fails. The return handle points to only the subset of successfully produced @@ -918,11 +920,11 @@ TypedArrayAttrBase, "{}">:$transpose_paddings, DefaultValuedAttr:$copy_back); - let results = (outs TransformHandleTypeInterface:$transformed); + let results = (outs TransformHandleTypeInterface:$padded, + TransformHandleTypeInterface:$pad); let assemblyFormat = - "$target attr-dict `:` " - "custom(type($target), type($transformed))"; + "$target attr-dict `:` functional-type(operands, results)"; let hasVerifier = 1; let extraClassDeclaration = [{ diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -401,18 +401,24 @@ void peelLoops(RewriterBase &rewriter, ArrayRef loops); /// Pad the iterator dimensions `paddingDimensions` of all `opToPad` operands -/// to a static bounding box. `padToMultipleOf` indicates that each padding -/// dimension should be padded to the specified multiple. If the derived padding -/// sizes should not be rounded up to any multiple, use "1". Use `paddingValues` -/// and `packPaddings` to set padding value and nofold attribute of the created -/// tensor::PadOps, respectively. Update `paddedOp` to the cloned operation with -/// statically shaped `paddingDimensions` and return the extracted dynamically -/// shaped results. If padding fails, return failure. If `copyBack` is set, the -/// unpadded result is copied back into the original destination tensor. -FailureOr> -rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, - const LinalgPaddingOptions &options, LinalgOp &paddedOp, - bool copyBack); +/// to a static bounding box. The original `opToPad` is cloned and operates on +/// the padded tensors. +/// +/// * "options.padToMultipleOf" indicates that each padding dimension should be +/// padded to the specified multiple. +/// * Use "options.paddingValues" and "options.packPaddings" to set padding +/// value and nofold attribute of the created tensor::PadOps, respectively. +/// * The unpadded results (extracted slice of the cloned operation) are +/// returned via `replacements`. +/// * The tensor::PadOps are returned via `padOps`. +/// * If `copyBack` is set to "true", the unpadded result is copied back to the +/// original destination tensor. +LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, + const LinalgPaddingOptions &options, + LinalgOp &paddedOp, + SmallVector &replacements, + SmallVector &padOps, + bool copyBack); namespace detail { diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -1586,79 +1586,96 @@ //===---------------------------------------------------------------------===// DiagnosedSilenceableFailure -transform::PadOp::applyToOne(transform::TransformRewriter &rewriter, - LinalgOp target, - transform::ApplyToEachResultList &results, - transform::TransformState &state) { - // Convert the integer packing flags to booleans. - SmallVector packPaddings; - for (int64_t packPadding : extractFromI64ArrayAttr(getPackPaddings())) - packPaddings.push_back(static_cast(packPadding)); - - // Convert the padding values to attributes. - SmallVector paddingValues; - for (auto const &it : - llvm::zip(getPaddingValues(), target->getOperandTypes())) { - auto attr = dyn_cast(std::get<0>(it)); - if (!attr) { - emitOpError("expects padding values to be typed attributes"); - return DiagnosedSilenceableFailure::definiteFailure(); +transform::PadOp::apply(transform::TransformRewriter &rewriter, + transform::TransformResults &results, + transform::TransformState &state) { + SmallVector paddedOps, padOps; + + for (Operation *target : state.getPayloadOps(getTarget())) { + auto linalgTarget = dyn_cast(target); + if (!linalgTarget) { + auto diag = emitSilenceableError() << "expected LinalgOp target"; + diag.attachNote(target->getLoc()) << "target op"; + return diag; } - Type elementType = getElementTypeOrSelf(std::get<1>(it)); - // Try to parse string attributes to obtain an attribute of element type. - if (auto stringAttr = dyn_cast(attr)) { - auto parsedAttr = dyn_cast_if_present( - parseAttribute(stringAttr, getContext(), elementType, - /*numRead=*/nullptr, /*isKnownNullTerminated=*/true)); - if (!parsedAttr || parsedAttr.getType() != elementType) { - auto diag = this->emitOpError("expects a padding that parses to ") - << elementType << ", got " << std::get<0>(it); - diag.attachNote(target.getLoc()) << "when applied to this op"; + + // Convert the integer packing flags to booleans. + SmallVector packPaddings; + for (int64_t packPadding : extractFromI64ArrayAttr(getPackPaddings())) + packPaddings.push_back(static_cast(packPadding)); + + // Convert the padding values to attributes. + SmallVector paddingValues; + for (auto const &it : + llvm::zip(getPaddingValues(), linalgTarget->getOperandTypes())) { + auto attr = dyn_cast(std::get<0>(it)); + if (!attr) { + emitOpError("expects padding values to be typed attributes"); return DiagnosedSilenceableFailure::definiteFailure(); } - paddingValues.push_back(parsedAttr); - continue; + Type elementType = getElementTypeOrSelf(std::get<1>(it)); + // Try to parse string attributes to obtain an attribute of element type. + if (auto stringAttr = dyn_cast(attr)) { + auto parsedAttr = dyn_cast_if_present(parseAttribute( + stringAttr, getContext(), elementType, + /*numRead=*/nullptr, /*isKnownNullTerminated=*/true)); + if (!parsedAttr || parsedAttr.getType() != elementType) { + auto diag = this->emitOpError("expects a padding that parses to ") + << elementType << ", got " << std::get<0>(it); + diag.attachNote(linalgTarget.getLoc()) << "when applied to this op"; + return DiagnosedSilenceableFailure::definiteFailure(); + } + paddingValues.push_back(parsedAttr); + continue; + } + // Otherwise, add the attribute directly. + if (attr.getType() != elementType) { + auto diag = this->emitOpError("expects a padding value of type ") + << elementType << ", got " << attr; + diag.attachNote(linalgTarget.getLoc()) << "when applied to this op"; + return DiagnosedSilenceableFailure::definiteFailure(); + } + paddingValues.push_back(attr); } - // Otherwise, add the attribute directly. - if (attr.getType() != elementType) { - auto diag = this->emitOpError("expects a padding value of type ") - << elementType << ", got " << attr; - diag.attachNote(target.getLoc()) << "when applied to this op"; - return DiagnosedSilenceableFailure::definiteFailure(); + + // Extract the transpose vectors. + SmallVector> transposePaddings; + for (Attribute transposeVector : cast(getTransposePaddings())) + transposePaddings.push_back( + extractFromI64ArrayAttr(cast(transposeVector))); + + LinalgOp paddedOp; + LinalgPaddingOptions options; + options.paddingDimensions = extractFromI64ArrayAttr(getPaddingDimensions()); + SmallVector padToMultipleOf(options.paddingDimensions.size(), 1); + if (getPadToMultipleOf().has_value()) + padToMultipleOf = extractFromI64ArrayAttr(*getPadToMultipleOf()); + options.padToMultipleOf = padToMultipleOf; + options.paddingValues = paddingValues; + options.packPaddings = packPaddings; + + SmallVector replacements; + SmallVector newPadOps; + if (failed(rewriteAsPaddedOp(rewriter, linalgTarget, options, paddedOp, + replacements, newPadOps, getCopyBack()))) { + auto diag = emitSilenceableError() << "failed to pad op"; + diag.attachNote(target->getLoc()) << "target op"; + return diag; } - paddingValues.push_back(attr); - } - - // Extract the transpose vectors. - SmallVector> transposePaddings; - for (Attribute transposeVector : cast(getTransposePaddings())) - transposePaddings.push_back( - extractFromI64ArrayAttr(cast(transposeVector))); - - // Set up options and pad. - LinalgOp paddedOp; - LinalgPaddingOptions options; - options.paddingDimensions = extractFromI64ArrayAttr(getPaddingDimensions()); - SmallVector padToMultipleOf(options.paddingDimensions.size(), 1); - if (getPadToMultipleOf().has_value()) - padToMultipleOf = extractFromI64ArrayAttr(*getPadToMultipleOf()); - options.padToMultipleOf = padToMultipleOf; - options.paddingValues = paddingValues; - options.packPaddings = packPaddings; - FailureOr> result = - rewriteAsPaddedOp(rewriter, target, options, paddedOp, getCopyBack()); - if (succeeded(result)) { + // We need to perform our own replacement here because this API is still // used in patterns that "pad and hoist", for which the replacement values // need to be different. // TODO: clean this up and stop "pad and hoist" behavior more globally now // that we have more composable abstractions. - rewriter.replaceOp(target, *result); - results.push_back(paddedOp); - return DiagnosedSilenceableFailure::success(); + rewriter.replaceOp(linalgTarget, replacements); + paddedOps.push_back(paddedOp); + padOps.append(newPadOps.begin(), newPadOps.end()); } - return emitDefaultSilenceableFailure(target); + results.set(cast(getPadded()), paddedOps); + results.set(cast(getPad()), padOps); + return DiagnosedSilenceableFailure::success(); } LogicalResult transform::PadOp::verify() { diff --git a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Padding.cpp @@ -136,10 +136,11 @@ opOperand->get(), paddingValue, nofold); } -FailureOr> +LogicalResult linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, const LinalgPaddingOptions &options, - LinalgOp &paddedOp, bool copyBack) { + LinalgOp &paddedOp, SmallVector &replacements, + SmallVector &padOps, bool copyBack) { LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n"); Location loc = opToPad->getLoc(); @@ -166,6 +167,8 @@ "operand cannot be bound statically"); } newOperands.push_back(*paddedOperand); + if (auto padOp = paddedOperand->getDefiningOp()) + padOps.push_back(padOp); } ReifiedRankedShapedTypeDims reifiedResultShapes; @@ -199,20 +202,24 @@ strides)); } - if (!copyBack) - return paddedSubtensorResults; + if (!copyBack) { + replacements = std::move(paddedSubtensorResults); + return success(); + } // Copy back unpadded results to the original destination (i.e., inits of the // linalg op), so that the destination buffer of the computation does not // change. If the padding folds away, this will materizalize as a memcpy // between two identical buffers, which will then also fold away. - SmallVector copiedBack; + assert(static_cast(paddedSubtensorResults.size()) == + opToPad.getNumDpsInits() && + "expected matching number of results"); for (auto it : llvm::zip(paddedSubtensorResults, opToPad.getDpsInitOperands())) { - copiedBack.push_back(rewriter.create( + replacements.push_back(rewriter.create( loc, std::get<0>(it), std::get<1>(it)->get())); } - return copiedBack; + return success(); } FailureOr @@ -224,9 +231,10 @@ // Pad the operation. LinalgOp paddedOp; - FailureOr> newResults = rewriteAsPaddedOp( - rewriter, linalgOp, options, paddedOp, /*copyBack=*/false); - if (failed(newResults)) + SmallVector newResults; + SmallVector padOps; + if (failed(rewriteAsPaddedOp(rewriter, linalgOp, options, paddedOp, + newResults, padOps, /*copyBack=*/false))) return rewriter.notifyMatchFailure(linalgOp, "failed to rewrite as a padded op"); @@ -266,7 +274,7 @@ } // Replace the original operation to pad. - rewriter.replaceOp(linalgOp, *newResults); + rewriter.replaceOp(linalgOp, newResults); return paddedOp; } diff --git a/mlir/python/mlir/dialects/_structured_transform_ops_ext.py b/mlir/python/mlir/dialects/_structured_transform_ops_ext.py --- a/mlir/python/mlir/dialects/_structured_transform_ops_ext.py +++ b/mlir/python/mlir/dialects/_structured_transform_ops_ext.py @@ -130,43 +130,44 @@ class PadOp: - """Specialization for PadOp class.""" - - def __init__( - self, - target: Union[Operation, Value], - *, - padding_values: Optional[ - Optional[Union[ArrayAttr, Sequence[Attribute]]] - ] = None, - padding_dimensions: OptionalIntList = None, - pack_paddings: OptionalIntList = None, - transpose_paddings: Optional[ - Union[ArrayAttr, Sequence[Union[ArrayAttr, IntOrAttrList]]] - ] = None, - loc=None, - ip=None, - ): - if transpose_paddings is None: - transpose_paddings = [] - if pack_paddings is None: - pack_paddings = [] - if padding_dimensions is None: - padding_dimensions = [] - if padding_values is None: - padding_values = [] - pdl_operation_type = pdl.OperationType.get() - transpose_paddings_attr = _get_int_int_array_attr(transpose_paddings) - super().__init__( - pdl_operation_type, - _get_op_result_or_value(target), - padding_values=padding_values, - padding_dimensions=padding_dimensions, - pack_paddings=pack_paddings, - transpose_paddings=transpose_paddings_attr, - loc=loc, - ip=ip, - ) + """Specialization for PadOp class.""" + + def __init__( + self, + target: Union[Operation, Value], + *, + padding_values: Optional[ + Optional[Union[ArrayAttr, Sequence[Attribute]]] + ] = None, + padding_dimensions: OptionalIntList = None, + pack_paddings: OptionalIntList = None, + transpose_paddings: Optional[ + Union[ArrayAttr, Sequence[Union[ArrayAttr, IntOrAttrList]]] + ] = None, + loc=None, + ip=None, + ): + if transpose_paddings is None: + transpose_paddings = [] + if pack_paddings is None: + pack_paddings = [] + if padding_dimensions is None: + padding_dimensions = [] + if padding_values is None: + padding_values = [] + pdl_operation_type = pdl.OperationType.get() + transpose_paddings_attr = _get_int_int_array_attr(transpose_paddings) + super().__init__( + pdl_operation_type, + pdl_operation_type, + _get_op_result_or_value(target), + padding_values=padding_values, + padding_dimensions=padding_dimensions, + pack_paddings=pack_paddings, + transpose_paddings=transpose_paddings_attr, + loc=loc, + ip=ip, + ) class ScalarizeOp: diff --git a/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir @@ -0,0 +1,61 @@ + +// RUN: mlir-opt --test-transform-dialect-interpreter -cse -canonicalize -split-input-file -verify-diagnostics %s | FileCheck %s + +#map = affine_map<()[s0] -> (-s0 + 12, 7)> + +// CHECK-LABEL: func @pad_to_memory_space( +// CHECK-SAME: %[[arg0:.*]]: memref<24x12xf32, strided<[?, ?], offset: ?>>, +// CHECK-SAME: %[[arg1:.*]]: memref<12x25xf32, strided<[?, ?], offset: ?>>, +// CHECK-SAME: %[[arg2:.*]]: memref<24x25xf32, strided<[?, ?], offset: ?>>, +func.func @pad_to_memory_space(%arg0: tensor<24x12xf32>, + %arg1: tensor<12x25xf32>, + %arg2: tensor<24x25xf32>, + %iv0 : index, %iv1 : index, + %iv2 : index) -> tensor<24x25xf32> { + %0 = affine.min #map()[%iv2] + + // CHECK: %[[s0:.*]] = memref.subview %[[arg0]] + %1 = tensor.extract_slice %arg0[%iv0, %iv2] [4, %0] [1, 1] : tensor<24x12xf32> to tensor<4x?xf32> + // CHECK: %[[s1:.*]] = memref.subview %[[arg1]] + %2 = tensor.extract_slice %arg1[%iv2, %iv1] [%0, 5] [1, 1] : tensor<12x25xf32> to tensor + // CHECK: %[[s2:.*]] = memref.subview %[[arg2]] + %3 = tensor.extract_slice %arg2[%iv0, %iv1] [4, 5] [1, 1] : tensor<24x25xf32> to tensor<4x5xf32> + + // CHECK: %[[alloc0:.*]] = memref.alloc() : memref<4x7xf32, 3> + // CHECK: linalg.fill {{.*}} outs(%[[alloc0]] + // CHECK: %[[alloc0_view:.*]] = memref.subview %[[alloc0]][0, 0] [4, %{{.*}}] [1, 1] + // CHECK: memref.copy %[[s0]], %[[alloc0_view]] + + // CHECK: %[[alloc1:.*]] = memref.alloc() : memref<7x5xf32, 3> + // CHECK: linalg.fill {{.*}} outs(%[[alloc1]] + // CHECK: %[[alloc1_view:.*]] = memref.subview %[[alloc1]][0, 0] [%{{.*}}, 5] [1, 1] + // CHECK: memref.copy %[[s1]], %[[alloc1_view]] + + // CHECK: %[[alloc2:.*]] = memref.alloc() : memref<4x5xf32, 3> + // CHECK: linalg.fill {{.*}} outs(%[[alloc2]] + // No subview because there is 0 padding + // CHECK: memref.copy %[[s2]], %[[alloc2]] + + // CHECK: linalg.matmul ins(%[[alloc0]], %[[alloc1]] : {{.*}}) outs(%[[alloc2]] : {{.*}}) + // Copy back result. + // CHECK: memref.copy %[[alloc2]], %[[s2]] + %4 = linalg.matmul ins(%1, %2 : tensor<4x?xf32>, tensor) outs(%3 : tensor<4x5xf32>) -> tensor<4x5xf32> + + // insert_slice bufferizes to a no-op. + %5 = tensor.insert_slice %4 into %arg2[%iv0, %iv1] [4, 5] [1, 1] : tensor<4x5xf32> into tensor<24x25xf32> + func.return %5 : tensor<24x25xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %padded, %pad = transform.structured.pad %0 { + padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], + padding_dimensions=[0, 1, 2], + pack_paddings=[1, 1, 1] + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) + %pad_result = transform.get_result %pad[0] : (!transform.any_op) -> !transform.any_value + %buffer, %replacement = transform.structured.bufferize_to_allocation %pad_result {memory_space = 3} + %2 = transform.bufferization.one_shot_bufferize %arg1 {bufferize_function_boundaries=true} : (!transform.any_op) -> !transform.any_op + +} diff --git a/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir b/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir --- a/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-hoist-pad-build-packing-loop-nest.mlir @@ -17,10 +17,10 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) // In this case, the pad op is actually empty: we only tile the first dimension // and it does not have an impact on the RHS operand. @@ -49,10 +49,10 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[2] : (!transform.any_op) -> !transform.any_op @@ -87,10 +87,10 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[0] : (!transform.any_op) -> !transform.any_op @@ -125,10 +125,10 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[0] : (!transform.any_op) -> !transform.any_op @@ -161,10 +161,10 @@ %matmul_l1, %loops_l1:2 = transform.structured.tile_to_scf_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[2] : (!transform.any_op) -> !transform.any_op diff --git a/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir --- a/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-hoist-pad.mlir @@ -17,11 +17,11 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], copy_back = false - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) // In this case, the pad op is actually empty: we only tile the first dimension // and it does not have an impact on the RHS operand. @@ -53,11 +53,11 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], copy_back = false - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[2] : (!transform.any_op) -> !transform.op<"tensor.pad"> @@ -96,11 +96,11 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], copy_back = false - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[0] : (!transform.any_op) -> !transform.any_op @@ -141,11 +141,11 @@ %matmul_l1, %loops_l1 = transform.structured.tile_to_scf_for %matmul [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], copy_back = false - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[0] : (!transform.any_op) -> !transform.any_op @@ -185,11 +185,11 @@ %matmul_l1, %loops_l1:2 = transform.structured.tile_to_scf_for %matmul [5, 0, 7] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op) - %matmul_padded = transform.structured.pad %matmul_l1 { + %matmul_padded, %0 = transform.structured.pad %matmul_l1 { padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], copy_back = false - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) %pad = transform.get_producer_of_operand %matmul_padded[2] : (!transform.any_op) -> !transform.op<"tensor.pad"> diff --git a/mlir/test/Dialect/Linalg/transform-op-pad.mlir b/mlir/test/Dialect/Linalg/transform-op-pad.mlir --- a/mlir/test/Dialect/Linalg/transform-op-pad.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-pad.mlir @@ -33,11 +33,11 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -65,12 +65,12 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pad_to_multiple_of=[2, 2, 1], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -107,11 +107,11 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -128,11 +128,11 @@ ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op // expected-error @below {{op expects a padding value of type 'f32', got 0 : i32}} - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0: i32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -149,11 +149,11 @@ ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op // expected-error @below {{expects a padding that parses to 'f32', got "{foo}"}} - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=["{foo}", 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -173,11 +173,11 @@ %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op // This error is silenceable and is not reported by this transform // {{transform.structured.pad failed to apply}} - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 0] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -228,11 +228,11 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -278,9 +278,9 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.pad %0 { + %padded, %pad = transform.structured.pad %0 { padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32], padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1] - } : (!transform.any_op) -> !transform.any_op + } : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } diff --git a/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir b/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir --- a/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir +++ b/mlir/test/Dialect/Linalg/transform-ops-invalid.mlir @@ -11,7 +11,7 @@ transform.sequence failures(propagate) { ^bb0(%arg0: !transform.any_op): // expected-error@below {{expects padding_dimensions to contain positive integers, found [1, -7]}} - transform.structured.pad %arg0 {padding_dimensions=[1, -7]} : (!transform.any_op) -> !transform.any_op + transform.structured.pad %arg0 {padding_dimensions=[1, -7]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -19,7 +19,7 @@ transform.sequence failures(propagate) { ^bb0(%arg0: !transform.any_op): // expected-error@below {{expects pack_paddings to contain booleans (0/1), found [1, 7]}} - transform.structured.pad %arg0 {pack_paddings=[1, 7]} : (!transform.any_op) -> !transform.any_op + transform.structured.pad %arg0 {pack_paddings=[1, 7]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- @@ -27,7 +27,7 @@ transform.sequence failures(propagate) { ^bb0(%arg0: !transform.any_op): // expected-error@below {{expects transpose_paddings to be a permutation, found [1, 1]}} - transform.structured.pad %arg0 {transpose_paddings=[[1, 1]]} : (!transform.any_op) -> !transform.any_op + transform.structured.pad %arg0 {transpose_paddings=[[1, 1]]} : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } // ----- diff --git a/mlir/test/Dialect/Linalg/transform-ops.mlir b/mlir/test/Dialect/Linalg/transform-ops.mlir --- a/mlir/test/Dialect/Linalg/transform-ops.mlir +++ b/mlir/test/Dialect/Linalg/transform-ops.mlir @@ -21,7 +21,7 @@ transform.sequence failures(propagate) { ^bb1(%arg0: !transform.any_op): // CHECK: transform.structured.pad - %0 = transform.structured.pad %arg0 : (!transform.any_op) -> !transform.any_op + %0, %1 = transform.structured.pad %arg0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) } transform.sequence failures(propagate) {