diff --git a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp --- a/mlir/lib/Dialect/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Dialect/Linalg/Utils/Utils.cpp @@ -357,12 +357,17 @@ })) return PadTensorOp::createPadHighOp(type, source, pad, nofold, loc, b); - // Exit if the sizes of the dynamic sizes of `sliceOp` do not match the size - // of the slice padded by `padTensorOp`. + // Exit if `padTensorOpSliceOp`, which defines the slice used by + // `padTensorOp`, is rank-reducing. auto padTensorOpSliceOp = padTensorOp.source().getDefiningOp(); - if (!padTensorOpSliceOp || - llvm::any_of(llvm::zip(sliceOp.getMixedSizes(), + if (!padTensorOpSliceOp || sliceOp.getMixedSizes().size() != + padTensorOpSliceOp.getMixedSizes().size()) + return PadTensorOp::createPadHighOp(type, source, pad, nofold, loc, b); + + // Exit if the sizes of the dynamic sizes of `sliceOp` do not match the size + // of the slice padded by `padTensorOp`. + if (llvm::any_of(llvm::zip(sliceOp.getMixedSizes(), padTensorOpSliceOp.getMixedSizes()), [](std::tuple it) { return !isEqualConstantIntOrValue(std::get<0>(it), diff --git a/mlir/test/Dialect/Linalg/pad.mlir b/mlir/test/Dialect/Linalg/pad.mlir --- a/mlir/test/Dialect/Linalg/pad.mlir +++ b/mlir/test/Dialect/Linalg/pad.mlir @@ -277,6 +277,31 @@ #map0 = affine_map<()[s0] -> (64, s0)> +// MATMUL: different_padding_dynamic_rank +func @different_padding_dynamic_rank(%arg0: tensor<64x64x1xf32>, + %iv0 : index) -> tensor { + %cst = arith.constant 0.0 : f32 + %size = affine.min #map0()[%iv0] + %0 = tensor.extract_slice %arg0[0, 0, 0] [%size, %size, 1] [1, 1, 1] : tensor<64x64x1xf32> to tensor + %1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] { + ^bb0(%arg3: index, %arg4: index): // no predecessors + linalg.yield %cst : f32 + } : tensor to tensor<64x64xf32> + %2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32> + %3 = tensor.extract_slice %2[0, 0] [%size, %size] [1, 1] : tensor<64x64xf32> to tensor + + // Different dynamic ranks prevent composing the paddings ([%size, %size, 1] vs [%size, %size]). + // MATMUL: = linalg.fill + // MATMUL: = linalg.pad_tensor + // MATMUL: = linalg.matmul + %4 = linalg.matmul ins(%3, %3 : tensor, tensor) outs(%3 : tensor) -> tensor + return %4 : tensor +} + +// ----- + +#map0 = affine_map<()[s0] -> (64, s0)> + // MATMUL: different_padding_static_sizes func @different_padding_static_sizes(%arg0: tensor<62x62xf32>, %iv0 : index) -> tensor {