diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp @@ -954,7 +954,7 @@ SmallVector dynDims; for (unsigned i = 0; i < type.getRank(); ++i) { if (type.isDynamicDim(i)) - dynDims.push_back(asValue(rewriter, loc, sliceOp.getMixedOffsets()[i])); + dynDims.push_back(asValue(rewriter, loc, sliceOp.getMixedSizes()[i])); } // Create GenerateOp. diff --git a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir --- a/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir +++ b/mlir/test/Dialect/Linalg/subtensor-of-padtensor.mlir @@ -153,3 +153,27 @@ return %1 : tensor<3x4xf32> } +// ----- + +// CHECK-LABEL: @dynamic_extract_size +// CHECK-SAME: %[[ARG0:.*]]: tensor, %[[ARG1:.*]]: index +// CHECK-NOT: linalg.pad_tensor +// CHECK: %[[C0:.*]] = constant 0 : index +// CHECK: tensor.dim %[[ARG0]], %[[C0]] +// CHECK: %[[RESULT:.*]] = scf.if %{{.*}} -> (tensor) { +// CHECK: %[[GEN:.*]] = tensor.generate %[[ARG1]] +// CHECK: scf.yield %[[GEN]] +// CHECK: } else { +// CHECK: %[[SUBTENSOR:.*]] = tensor.extract_slice %[[ARG0]][%{{.*}}, 4] [%{{.*}}, 1] [1, 1] : tensor to tensor +// CHECK: %[[PADTENSOR:.*]] = linalg.pad_tensor %[[SUBTENSOR]] low[0, 0] high[%{{.*}}, 3] +// CHECK: scf.yield %[[PADTENSOR]] +// CHECK: } +// CHECK: return %[[RESULT]] +func @dynamic_extract_size(%arg0 : tensor, %s1: index, %pad : f32) -> tensor { + %0 = linalg.pad_tensor %arg0 low[0, 0] high[7, 8] { + ^bb0(%arg1: index, %arg2: index): + linalg.yield %pad : f32 + } : tensor to tensor + %1 = tensor.extract_slice %0[2, 4] [%s1, 4] [1, 1] : tensor to tensor + return %1 : tensor +}