diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -38,15 +38,11 @@ decisions during One-Shot Bufferize. }]; - let arguments = - (ins Variadic:$sizes, I64ArrayAttr:$static_sizes); + let arguments = (ins Variadic:$dynamicSizes); let results = (outs AnyTensor:$result); - let assemblyFormat = [{ - custom($sizes, $static_sizes) attr-dict - `:` type($result) - }]; + let assemblyFormat = "`(`$dynamicSizes`)` attr-dict `:` type($result)"; let extraClassDeclaration = [{ LogicalResult bufferize(RewriterBase &rewriter, BufferizationState &state); @@ -56,81 +52,41 @@ return false; } - static StringRef getStaticSizesAttrName() { - return "static_sizes"; - } - RankedTensorType getType() { return getResult().getType().cast(); } - // Infer the shape of the result tensor given the static shapes - // and element type of the result tensor. - static Type inferResultType(ArrayRef staticSizes, Type elementType, - Attribute encoding = {}); - // Return true if the size of the tensor is dynamic at `idx` - bool isDynamicSize(unsigned idx) { - APInt v = *(static_sizes().getAsValueRange().begin() + idx); - return ShapedType::isDynamic(v.getSExtValue()); - } - - // Assert that the size of the result tensor is static at `idx` - // and return the shape. - int64_t getStaticSize(unsigned idx) { - assert(!isDynamicSize(idx) && "expected static size"); - APInt v = *(static_sizes(). - template getAsValueRange().begin() + idx); - return v.getSExtValue(); + bool isDynamicDim(unsigned idx) { + return getType().isDynamicDim(idx); } // Return the argument position that contains the dynamic size of // the tensor at dimension `idx`. Asserts that the shape is // dynamic at that `idx`. unsigned getIndexOfDynamicSize(unsigned idx) { - assert(isDynamicSize(idx) && "expected dynamic size"); + assert(isDynamicDim(idx) && "expected dynamic size"); + ArrayRef shape = getType().getShape(); return std::count_if( - static_sizes().getValue().begin(), - static_sizes().getValue().begin() + idx, - [&](Attribute attr) { - return ShapedType::isDynamic(attr.cast().getInt()); - }); + shape.begin(), shape.begin() + idx, + [&](int64_t size) { return ShapedType::isDynamic(size); }); } - // Return both static and dynamic sizes as a list of `OpFoldResult`. - SmallVector getMixedSizes(); - // Return the Value of the dynamic size of the tensor at dimension // `idx`. Asserts that the shape is dynamic at that `idx. Value getDynamicSize(unsigned idx) { return getOperand(getIndexOfDynamicSize(idx)); } - }]; - let builders = [ - OpBuilder<(ins "ValueRange":$shape, - "ArrayRef":$staticShape, "Type":$elementType), - [{ - build($_builder, $_state, - AllocTensorOp::inferResultType(staticShape, elementType), - shape, $_builder.getI64ArrayAttr(staticShape)); - }]>, - OpBuilder<(ins "ValueRange":$shape, "Type":$elementType), - [{ - SmallVector staticShape( - shape.size(), ShapedType::kDynamicSize); - build($_builder, $_state, shape, staticShape, elementType); - }]>, - OpBuilder<(ins "ArrayRef":$staticShape, "Type":$elementType), - [{ - build($_builder, $_state, ValueRange{}, staticShape, elementType); - }]>, - OpBuilder<(ins "ArrayRef":$sizes, "Type":$elementType, - CArg<"ArrayRef", "{}">:$attrs)> - ]; + // Assert that the size of the result tensor is static at `idx` + // and return the shape. + int64_t getStaticSize(unsigned idx) { + assert(!isDynamicDim(idx) && "expected static size"); + return getType().getShape()[idx]; + } + }]; let hasCanonicalizer = 1; - let hasCustomAssemblyFormat = 1; let hasVerifier = 1; } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -12,6 +12,7 @@ #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/Matchers.h" using namespace mlir; using namespace mlir::bufferization; @@ -145,62 +146,14 @@ return success(); } -void AllocTensorOp::build(OpBuilder &b, OperationState &result, - ArrayRef sizes, Type elementType, - ArrayRef attrs) { - SmallVector dynamicSizes; - SmallVector staticSizes; - dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, - ShapedType::kDynamicSize); - auto resultType = RankedTensorType ::get(staticSizes, elementType); - build(b, result, resultType, dynamicSizes, b.getI64ArrayAttr(staticSizes)); - result.addAttributes(attrs); -} - LogicalResult AllocTensorOp::verify() { - RankedTensorType resultType = getType(); - SmallVector staticSizes = llvm::to_vector<4>(llvm::map_range( - static_sizes().cast(), - [](Attribute a) -> int64_t { return a.cast().getInt(); })); - - if (failed(verifyListOfOperandsOrIntegers( - *this, "sizes", resultType.getRank(), static_sizes(), sizes(), - ShapedType::isDynamic))) - return failure(); - - if (static_sizes().size() != static_cast(resultType.getRank())) - return emitError("expected ") << resultType.getRank() << " sizes values"; - - Type expectedType = AllocTensorOp::inferResultType( - staticSizes, resultType.getElementType(), resultType.getEncoding()); - if (resultType != expectedType) { - return emitError("specified type ") - << resultType << " does not match the inferred type " - << expectedType; - } + if (getType().getNumDynamicDims() != + static_cast(dynamicSizes().size())) + return emitError("expected ") + << getType().getNumDynamicDims() << " dynamic sizes"; return success(); } -Type AllocTensorOp::inferResultType(ArrayRef staticSizes, - Type elementType, Attribute encoding) { - return RankedTensorType::get(staticSizes, elementType, encoding); -} - -SmallVector AllocTensorOp::getMixedSizes() { - SmallVector mixedSizes; - mixedSizes.reserve(getType().getRank()); - unsigned dynamicValIndex = 0; - for (Attribute attr : static_sizes()) { - auto intAttr = attr.cast(); - if (!ShapedType::isDynamic(intAttr.getInt())) { - mixedSizes.push_back(intAttr); - continue; - } - mixedSizes.push_back(sizes()[dynamicValIndex++]); - } - return mixedSizes; -} - namespace { /// Change the type of the result of a `bufferization.alloc_tensor` by making /// the result type statically sized along dimension that in the original @@ -208,46 +161,36 @@ /// `constant` op. For example: /// /// %c5 = arith.constant 5: index -/// %0 = bufferization.alloc_tensor [%arg0, %c5] : tensor +/// %0 = bufferization.alloc_tensor(%arg0, %c5) : tensor /// /// to /// -/// %0 = bufferization.alloc_tensor [%arg0, 5] : tensor +/// %0 = bufferization.alloc_tensor(%arg0) : tensor struct ReplaceStaticShapeDims : OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(AllocTensorOp op, PatternRewriter &rewriter) const override { - SmallVector dynamicSizes; - SmallVector staticSizes; - for (unsigned i = 0, e = op.getType().getRank(); i != e; ++i) { - // If the size is already static, nothing to do. - if (!op.isDynamicSize(i)) { - staticSizes.push_back(op.getStaticSize(i)); + SmallVector newShape = llvm::to_vector(op.getType().getShape()); + SmallVector newDynamicSizes; + unsigned int dynValCounter = 0; + for (int64_t i = 0; i < op.getType().getRank(); ++i) { + if (!op.isDynamicDim(i)) continue; + Value value = op.dynamicSizes()[dynValCounter++]; + APInt intVal; + if (matchPattern(value, m_ConstantInt(&intVal))) { + newShape[i] = intVal.getSExtValue(); + } else { + newDynamicSizes.push_back(value); } - - // If the size is dynamic but defined using a `constant` op, get the - // constant value to find the static size to use. - unsigned operandNum = op.getIndexOfDynamicSize(i); - Value sizeOperand = op.getOperand(operandNum); - if (auto constantIndexOp = - sizeOperand.getDefiningOp()) { - staticSizes.push_back(constantIndexOp.value()); - continue; - } - - // Fallback case. Keep the size dynamic. - dynamicSizes.push_back(sizeOperand); - staticSizes.push_back(ShapedType::kDynamicSize); } - RankedTensorType newType = - RankedTensorType::get(staticSizes, op.getType().getElementType()); + RankedTensorType newType = RankedTensorType::get( + newShape, op.getType().getElementType(), op.getType().getEncoding()); if (newType == op.getType()) return failure(); auto newOp = - rewriter.create(op.getLoc(), newType, dynamicSizes, - rewriter.getI64ArrayAttr(staticSizes)); + rewriter.create(op.getLoc(), newType, newDynamicSizes); rewriter.replaceOpWithNewOp(op, op.getType(), newOp); return success(); } @@ -262,7 +205,7 @@ auto allocTensorOp = dimOp.source().getDefiningOp(); if (!allocTensorOp || !maybeConstantIndex) return failure(); - if (!allocTensorOp.isDynamicSize(*maybeConstantIndex)) + if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex)) return failure(); rewriter.replaceOp(dimOp, allocTensorOp.getDynamicSize(*maybeConstantIndex)); @@ -280,7 +223,7 @@ OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) { auto shapes = llvm::to_vector<4>(llvm::map_range( llvm::seq(0, getType().getRank()), [&](int64_t dim) -> Value { - if (isDynamicSize(dim)) + if (isDynamicDim(dim)) return getDynamicSize(dim); return builder.create(getLoc(), getStaticSize(dim)); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp @@ -32,7 +32,7 @@ // Example: `foo` fails bufferization because %0 is not equivalent to any bbArg. // ``` // func @foo() -> tensor { -// %0 = linalg.alloc_tensor [...] : tensor +// %0 = bufferization.alloc_tensor(...) : tensor // return %0 : tensor // } // ``` diff --git a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/InitTensorToAllocTensor.cpp @@ -23,8 +23,8 @@ LogicalResult matchAndRewrite(InitTensorOp op, PatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp( - op, op.getMixedSizes(), op.getType().getElementType()); + rewriter.replaceOpWithNewOp(op, op.getType(), + op.sizes()); return success(); } }; diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-alloc-tensor-elimination.mlir @@ -18,7 +18,7 @@ // insert_slice. AllocTensorOp replaces the alloc_tensor with an out-of-place // extract_slice. // CHECK: %[[EXTRACT_SLICE_ALLOC:.*]] = memref.alloc(%[[sz]]) - %a = bufferization.alloc_tensor[%sz] : tensor + %a = bufferization.alloc_tensor(%sz) : tensor // CHECK: linalg.fill ins({{.*}} : f32) outs(%[[EXTRACT_SLICE_ALLOC]] : memref) %f = linalg.fill ins(%f0 : f32) outs(%a : tensor) -> tensor @@ -50,7 +50,7 @@ // alloc_tensor itself does not alloc but forwards to the insert_slice. // InitTensorOp replaces the alloc_tensor with an inplace extract_slice. // CHECK: %[[T_SUBVIEW:.*]] = memref.subview %[[FUNC_ARG]][42] [%[[sz]]] [1] - %a = bufferization.alloc_tensor[%sz] : tensor + %a = bufferization.alloc_tensor(%sz) : tensor // CHECK: linalg.fill ins({{.*}} : f32) outs(%[[T_SUBVIEW]] : memref) -> tensor @@ -71,7 +71,7 @@ %c5 = arith.constant 5 : index // CHECK-NOT: memref.alloc - %blank = bufferization.alloc_tensor [5] : tensor<5xf32> + %blank = bufferization.alloc_tensor() : tensor<5xf32> // CHECK: scf.for %[[iv:.*]] = %{{.*}} to %[[sz]] step %{{.*}} { %r = scf.for %iv = %c0 to %sz step %c5 iter_args(%bb = %t) -> (tensor) { @@ -102,7 +102,7 @@ // CHECK-NOT: memref.alloc // CHECK: %[[subview:.*]] = memref.subview %[[t]][%[[idx]]] [5] [1] - %blank = bufferization.alloc_tensor [5] : tensor<5xf32> + %blank = bufferization.alloc_tensor() : tensor<5xf32> // CHECK: scf.for %[[iv:.*]] = %{{.*}} to %[[sz]] step %{{.*}} { %r = scf.for %iv = %c0 to %sz step %c5 iter_args(%bb = %t) -> (tensor) { diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir @@ -16,7 +16,7 @@ // CHECK-NOT: dealloc // CHECK: scf.yield %[[casted]] %sz = "test.some_op"() : () -> (index) - %0 = bufferization.alloc_tensor[%sz] : tensor + %0 = bufferization.alloc_tensor(%sz) : tensor scf.yield %0 : tensor } else { // CHECK: } else { diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir @@ -142,7 +142,7 @@ // bufferizes out-of-place. // CHECK: %[[m1:.*]] = memref.alloc() {{.*}} : memref<10xf32> // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10xf32> - %t1 = bufferization.alloc_tensor [10] : tensor<10xf32> + %t1 = bufferization.alloc_tensor() : tensor<10xf32> // CHECK: linalg.fill ins(%{{.*}}{{.*}}outs(%[[m1]] // CHECK: %[[filled_tensor:.*]] = bufferization.to_tensor %[[m1]] diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir @@ -44,7 +44,7 @@ // CHECK-LABEL: func @func_without_tensor_args func.func @func_without_tensor_args(%v : vector<10xf32>) -> () { // CHECK: %[[alloc:.*]] = memref.alloc() - %0 = bufferization.alloc_tensor[10] : tensor<10xf32> + %0 = bufferization.alloc_tensor() : tensor<10xf32> %c0 = arith.constant 0 : index // CHECK: vector.transfer_write %{{.*}}, %[[alloc]] @@ -97,7 +97,7 @@ // CHECK-LABEL: func @copy_deallocated( func.func @copy_deallocated() -> tensor<10xf32> { // CHECK: %[[alloc:.*]] = memref.alloc() - %0 = bufferization.alloc_tensor[10] : tensor<10xf32> + %0 = bufferization.alloc_tensor() : tensor<10xf32> // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]] // CHECK: memref.dealloc %[[alloc]] // CHECK: return %[[alloc_tensor]] @@ -111,7 +111,7 @@ func.func @select_different_tensors(%t: tensor, %sz: index, %c: i1) -> tensor { // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref - %0 = bufferization.alloc_tensor [%sz] : tensor + %0 = bufferization.alloc_tensor(%sz) : tensor // A cast must be inserted because %t and %0 have different memref types. // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref to memref diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir @@ -17,7 +17,7 @@ // CHECK: %[[alloc:.*]] = memref.alloc // CHECK: return %[[alloc]] func.func @create_tensor() -> tensor<10xf32> { - %0 = bufferization.alloc_tensor [10] : tensor<10xf32> + %0 = bufferization.alloc_tensor() : tensor<10xf32> return %0 : tensor<10xf32> } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-analysis.mlir @@ -682,7 +682,7 @@ %cst_0 = arith.constant 0.000000e+00 : f32 %cst_1 = arith.constant 1.000000e+00 : f32 - %7 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %7 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: linalg.fill // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false"]} @@ -720,7 +720,7 @@ %cst_0 = arith.constant 0.000000e+00 : f32 %cst_1 = arith.constant 1.000000e+00 : f32 - %7 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %7 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: linalg.fill // CHECK-SAME: {__inplace_operands_attr__ = ["none", "false"]} @@ -1252,7 +1252,7 @@ %lb : index, %ub : index, %step : index, %sz: index, %sz2: index) -> (tensor) { - %B = bufferization.alloc_tensor [%sz2] : tensor + %B = bufferization.alloc_tensor(%sz2) : tensor // CHECK: scf.for {{.*}} { %r0 = scf.for %i = %lb to %ub step %step iter_args(%t = %A) -> (tensor) { @@ -1280,7 +1280,7 @@ %lb : index, %ub : index, %step : index, %sz: index, %sz2: index, %f: f32) -> (tensor) { - %B = bufferization.alloc_tensor [%sz2] : tensor + %B = bufferization.alloc_tensor(%sz2) : tensor %C = tensor.insert %f into %B[%lb] : tensor // CHECK: scf.for {{.*}} { diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-invalid.mlir @@ -60,7 +60,7 @@ scf.yield %t1 : tensor } else { // This buffer aliases. - %t2 = bufferization.alloc_tensor [%idx] : tensor + %t2 = bufferization.alloc_tensor(%idx) : tensor // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}} scf.yield %t2 : tensor } @@ -221,7 +221,7 @@ func.func @mini_test_case1() -> tensor<10x20xf32> { %f0 = arith.constant 0.0 : f32 - %t = bufferization.alloc_tensor [10, 20] : tensor<10x20xf32> + %t = bufferization.alloc_tensor() : tensor<10x20xf32> %r = linalg.fill ins(%f0 : f32) outs(%t : tensor<10x20xf32>) -> tensor<10x20xf32> // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}} return %r : tensor<10x20xf32> @@ -274,7 +274,7 @@ // ----- func.func @foo(%t : tensor<5xf32>) -> (tensor<5xf32>) { - %0 = bufferization.alloc_tensor [5] : tensor<5xf32> + %0 = bufferization.alloc_tensor() : tensor<5xf32> // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}} return %0 : tensor<5xf32> } @@ -291,7 +291,7 @@ func.func @destination_passing_style_dominance_test_1(%cst : f32, %idx : index, %idx2 : index) -> f32 { %0 = scf.execute_region -> tensor { - %1 = bufferization.alloc_tensor [%idx] : tensor + %1 = bufferization.alloc_tensor(%idx) : tensor // expected-error @+1 {{operand #0 of ReturnLike op does not satisfy destination passing style}} scf.yield %1 : tensor } @@ -304,7 +304,7 @@ func.func @destination_passing_style_dominance_test_2(%cst : f32, %idx : index, %idx2 : index) -> f32 { - %1 = bufferization.alloc_tensor [%idx] : tensor + %1 = bufferization.alloc_tensor(%idx) : tensor %0 = scf.execute_region -> tensor { // This YieldOp is in destination-passing style, thus no error. diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir @@ -70,7 +70,7 @@ // CHECK-FULLY-DYNAMIC-LAYOUT-MAP-SAME: #[[$map2a]]> { func.func @return_extract_slice(%idx: index, %sz: index) -> (tensor<2x?xf32>) { - %t = bufferization.alloc_tensor [20, 10] : tensor<20x10xf32> + %t = bufferization.alloc_tensor() : tensor<20x10xf32> %0 = tensor.extract_slice %t[%idx, %idx][2, %sz][1, 1] : tensor<20x10xf32> to tensor<2x?xf32> return %0 : tensor<2x?xf32> @@ -120,7 +120,7 @@ // CHECK-LABEL: func @func_without_tensor_args func.func @func_without_tensor_args(%v : vector<10xf32>) -> () { // CHECK: %[[alloc:.*]] = memref.alloc() - %0 = bufferization.alloc_tensor[10] : tensor<10xf32> + %0 = bufferization.alloc_tensor() : tensor<10xf32> %c0 = arith.constant 0 : index // CHECK: vector.transfer_write %{{.*}}, %[[alloc]] @@ -456,9 +456,9 @@ // CHECK-DAG: %[[cA:.*]] = memref.cast %[[A]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-DAG: %[[cB:.*]] = memref.cast %[[B]] : memref<64xf32> to memref<64xf32, #[[$DYN_1D_MAP]]> // CHECK-DAG: %[[cC:.*]] = memref.cast %[[C]] : memref to memref - %A = bufferization.alloc_tensor [64] : tensor<64xf32> - %B = bufferization.alloc_tensor [64] : tensor<64xf32> - %C = bufferization.alloc_tensor [] : tensor + %A = bufferization.alloc_tensor() : tensor<64xf32> + %B = bufferization.alloc_tensor() : tensor<64xf32> + %C = bufferization.alloc_tensor() : tensor // CHECK-DAG: linalg.fill ins(%[[C1]] : f32) outs(%[[A]] : memref<64xf32>) // CHECK-DAG: linalg.fill ins(%[[C2]] : f32) outs(%[[B]] : memref<64xf32>) diff --git a/mlir/test/Dialect/Bufferization/canonicalize.mlir b/mlir/test/Dialect/Bufferization/canonicalize.mlir --- a/mlir/test/Dialect/Bufferization/canonicalize.mlir +++ b/mlir/test/Dialect/Bufferization/canonicalize.mlir @@ -249,10 +249,10 @@ func.func @alloc_tensor_canonicalize() -> (tensor<4x5x?xf32>) { %c6 = arith.constant 6 : index - %0 = bufferization.alloc_tensor [4, 5, %c6] : tensor<4x5x?xf32> + %0 = bufferization.alloc_tensor(%c6) : tensor<4x5x?xf32> return %0 : tensor<4x5x?xf32> } // CHECK: func @alloc_tensor_canonicalize -// CHECK: %[[T0:.+]] = bufferization.alloc_tensor [4, 5, 6] : tensor<4x5x6xf32> +// CHECK: %[[T0:.+]] = bufferization.alloc_tensor() : tensor<4x5x6xf32> // CHECK: %[[T1:.+]] = tensor.cast %[[T0]] : tensor<4x5x6xf32> to tensor<4x5x?xf32> // CHECK: return %[[T1]] diff --git a/mlir/test/Dialect/Bufferization/invalid.mlir b/mlir/test/Dialect/Bufferization/invalid.mlir --- a/mlir/test/Dialect/Bufferization/invalid.mlir +++ b/mlir/test/Dialect/Bufferization/invalid.mlir @@ -1,26 +1,8 @@ // RUN: mlir-opt %s -split-input-file -verify-diagnostics -func.func @alloc_tensor_err(%arg0 : index, %arg1 : index) -{ - // expected-error @+1 {{specified type 'tensor<4x?x?x5xf32>' does not match the inferred type 'tensor<4x5x?x?xf32>'}} - %1 = bufferization.alloc_tensor [4, 5, %arg0, %arg1] : tensor<4x?x?x5xf32> - return -} - -// ----- - -func.func @alloc_tensor_err(%arg0 : index) -{ - // expected-error @+1 {{expected 4 sizes values}} - %1 = bufferization.alloc_tensor [4, 5, %arg0] : tensor<4x?x?x5xf32> - return -} - -// ----- - func.func @alloc_tensor_err(%arg0 : index) { - // expected-error @+1 {{expected 2 dynamic sizes values}} - %1 = "bufferization.alloc_tensor"(%arg0) {static_sizes = [4, -1, -1, 5]} : (index) -> tensor<4x?x?x5xf32> + // expected-error @+1 {{expected 2 dynamic sizes}} + %1 = bufferization.alloc_tensor(%arg0) : tensor<4x?x?x5xf32> return } diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir --- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-2fill-extract-matmul-all-perms.mlir @@ -15,7 +15,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -42,7 +42,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -69,7 +69,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -96,7 +96,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -123,7 +123,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -150,7 +150,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -177,7 +177,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -204,7 +204,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -231,7 +231,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -258,7 +258,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -285,7 +285,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -312,7 +312,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["none", "false"]} %2 = linalg.fill ins(%cst_0 : f32) outs(%0 : tensor<256x256xf32>) -> tensor<256x256xf32> @@ -339,7 +339,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -366,7 +366,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -392,7 +392,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -419,7 +419,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -446,7 +446,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -473,7 +473,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %3 = tensor.extract_slice %0[0, 0] [256, 16] [1, 1] : tensor<256x256xf32> to tensor<256x16xf32> @@ -500,7 +500,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> @@ -527,7 +527,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> @@ -554,7 +554,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> @@ -581,7 +581,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> @@ -608,7 +608,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> @@ -635,7 +635,7 @@ %c0 = arith.constant 0 : index %cst = arith.constant 0.000000e+00 : f32 %cst_0 = arith.constant 1.000000e+00 : f32 - %0 = bufferization.alloc_tensor [256, 256] : tensor<256x256xf32> + %0 = bufferization.alloc_tensor() : tensor<256x256xf32> // CHECK: {__inplace_operands_attr__ = ["false"]} %4 = tensor.extract_slice %0[0, 0] [16, 256] [1, 1] : tensor<256x256xf32> to tensor<16x256xf32> diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir --- a/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize-analysis-init-tensor-elimination.mlir @@ -10,7 +10,7 @@ // CHECK: tensor.extract_slice // CHECK-SAME: {__inplace_operands_attr__ = ["false", "none"] // Instead of allocating, share buffer with some inplace bufferization? - %0 = bufferization.alloc_tensor [%arg1] : tensor + %0 = bufferization.alloc_tensor(%arg1) : tensor // CHECK: linalg.fill // CHECK-SAME: {__inplace_operands_attr__ = ["none", "true"] @@ -37,7 +37,7 @@ // CHECK: tensor.extract_slice // CHECK-SAME: {__inplace_operands_attr__ = ["true", "none"] // Instead of allocating, share buffer with some inplace bufferization? - %0 = bufferization.alloc_tensor [%arg1] : tensor + %0 = bufferization.alloc_tensor(%arg1) : tensor // CHECK: linalg.fill // CHECK-SAME: {__inplace_operands_attr__ = ["none", "true"] diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir @@ -347,9 +347,9 @@ %c0 = arith.constant 0 : index %c32 = arith.constant 32 : index %c8 = arith.constant 8 : index - %0 = bufferization.alloc_tensor [4, 1, 6, 8] : tensor<4x1x6x8xf32> + %0 = bufferization.alloc_tensor() : tensor<4x1x6x8xf32> %1 = tensor.cast %0 : tensor<4x1x6x8xf32> to tensor - %2 = bufferization.alloc_tensor [1, 6, 8] : tensor<1x6x8xf32> + %2 = bufferization.alloc_tensor() : tensor<1x6x8xf32> %3 = scf.for %arg3 = %c0 to %c32 step %c8 iter_args(%arg4 = %1) -> (tensor) { %4 = affine.apply affine_map<(d0) -> (d0 ceildiv 8)>(%arg3) %5 = tensor.insert_slice %2 into %arg4[%4,0, 0, 0] [1, 1, 6, 8] [1, 1, 1, 1] : @@ -370,7 +370,7 @@ // CHECK-NOT: copy // CHECK: memref.store // CHECK: memref.store - %0 = bufferization.alloc_tensor [5] : tensor<5xf32> + %0 = bufferization.alloc_tensor() : tensor<5xf32> %1 = tensor.insert %f1 into %0[%idx] : tensor<5xf32> %2 = tensor.insert %f2 into %0[%idx] : tensor<5xf32> return %1, %2 : tensor<5xf32>, tensor<5xf32> diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-analysis.mlir @@ -583,7 +583,7 @@ { // CHECK: scf.for {{.*}} { %r0 = scf.for %i = %lb to %ub step %step iter_args(%t = %A) -> (tensor) { - %B = bufferization.alloc_tensor [%sz] : tensor + %B = bufferization.alloc_tensor(%sz) : tensor %i2 = arith.index_cast %i : index to i32 %i3 = arith.sitofp %i2 : i32 to f32 // The tensor.insert is in-place because the %B is defined inside the loop. diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir @@ -220,7 +220,7 @@ // CHECK: return %[[r]] func.func @scf_execute_region_yield_non_equivalent(%i: index, %j: index) -> f32 { %r = scf.execute_region -> (tensor) { - %t2 = bufferization.alloc_tensor [%i] : tensor + %t2 = bufferization.alloc_tensor(%i) : tensor scf.yield %t2 : tensor } %f = tensor.extract %r[%j] : tensor @@ -274,7 +274,7 @@ func.func @scf_for_yield_allocation(%t: tensor, %lb : index, %ub : index, %step : index) -> tensor { %r = scf.for %i = %lb to %ub step %step iter_args(%a = %t) -> tensor { - %t2 = bufferization.alloc_tensor [%i] : tensor + %t2 = bufferization.alloc_tensor(%i) : tensor scf.yield %t2 : tensor } diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir @@ -179,9 +179,9 @@ %c8 = arith.constant 8 : index %c32 = arith.constant 32 : index %c0 = arith.constant 0 : index - %0 = bufferization.alloc_tensor [4, 1, 6, 8] : tensor<4x1x6x8xf32> + %0 = bufferization.alloc_tensor() : tensor<4x1x6x8xf32> %1 = tensor.cast %0 : tensor<4x1x6x8xf32> to tensor - %2 = bufferization.alloc_tensor [1, 6, 8] : tensor<1x6x8xf32> + %2 = bufferization.alloc_tensor() : tensor<1x6x8xf32> %5 = scf.for %arg7 = %c0 to %c32 step %c8 iter_args(%arg8 = %1) -> (tensor) { %7 = affine.apply affine_map<(d0) -> (d0 ceildiv 8)>(%arg7) %8 = tensor.extract_slice %arg0[%i, %j, %arg7] [1, 6, 8] [1, 1, 1] : tensor<8x18x32xf32> to tensor<1x6x8xf32> diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-one-shot-bufferize.mlir @@ -16,7 +16,7 @@ %c0 = arith.constant 0 : index %0 = linalg.fill ins(%cst : f32) outs(%arg2 : tensor) -> tensor %1 = affine.apply #map0(%c0, %c64)[%c2] - %2 = bufferization.alloc_tensor [%1, 2] : tensor + %2 = bufferization.alloc_tensor(%1) : tensor %3 = scf.for %arg3 = %c0 to %c64 step %c2 iter_args(%arg4 = %2) -> (tensor) { %8 = affine.apply #map1(%arg3, %c0)[%c2] %9 = tensor.extract_slice %arg1[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32> @@ -33,13 +33,13 @@ // call @printMemrefF32(%B) : (tensor<*xf32>) -> () %4 = affine.apply #map0(%c0, %c64)[%c2] - %5 = bufferization.alloc_tensor [%4, 2] : tensor + %5 = bufferization.alloc_tensor(%4) : tensor %6 = scf.for %arg3 = %c0 to %c64 step %c2 iter_args(%arg4 = %5) -> (tensor) { %8 = affine.apply #map1(%arg3, %c0)[%c2] %9 = tensor.extract_slice %arg0[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32> %10 = tensor.cast %9 : tensor<2xf32> to tensor %11 = tensor.pad %10 low[%c0] high[%c0] { - ^bb0(%arg5: index): + ^bb0(%arg5: index): tensor.yield %cst : f32 } : tensor to tensor<2xf32> %12 = tensor.insert_slice %11 into %arg4[%8, 0] [1, 2] [1, 1] : tensor<2xf32> into tensor @@ -80,9 +80,9 @@ %v1 = arith.constant 1.0 : f32 %v2 = arith.constant 2.0 : f32 - %A = bufferization.alloc_tensor [64] : tensor<64xf32> - %B = bufferization.alloc_tensor [64] : tensor<64xf32> - %C = bufferization.alloc_tensor [] : tensor + %A = bufferization.alloc_tensor() : tensor<64xf32> + %B = bufferization.alloc_tensor() : tensor<64xf32> + %C = bufferization.alloc_tensor() : tensor %AA = linalg.fill ins(%v1 : f32) outs(%A : tensor<64xf32>) -> tensor<64xf32> %BB = linalg.fill ins(%v2 : f32) outs(%B : tensor<64xf32>) -> tensor<64xf32> %CC = linalg.fill ins(%v0 : f32) outs(%C : tensor) -> tensor