diff --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md --- a/mlir/docs/Bufferization.md +++ b/mlir/docs/Bufferization.md @@ -97,10 +97,9 @@ Looking first at the partial bufferization passes, we see that there are a sequence of `FuncOp` passes (which run in parallel on functions). These function -passes are bracketed by `tensor-constant-bufferize` and `func-bufferize`, which -are module passes (and thus serialize the parallel compilation process). These -two passes must be module passes because they make changes to the top-level -module. +passes are bracketed by `arith-bufferize` and `func-bufferize`, which are module +passes (and thus serialize the parallel compilation process). These two passes +must be module passes because they make changes to the top-level module. The bulk of the bufferization work is done by the function passes. Most of these passes are provided as part of the upstream MLIR distribution and bufferize @@ -235,7 +234,7 @@ - This is an example of a pass that is not split along dialect subdivisions. -- `tensor-constant-bufferize` +- `arith-bufferize` ([code](https://github.com/llvm/llvm-project/blob/bc8acf2ce8ad6e8c9b1d97b2e02d3f4ad26e1d9d/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp#L1), [test](https://github.com/llvm/llvm-project/blob/bc8acf2ce8ad6e8c9b1d97b2e02d3f4ad26e1d9d/mlir/test/Dialect/Standard/tensor-constant-bufferize.mlir#L1)) diff --git a/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.h b/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.h @@ -17,6 +17,9 @@ /// Create a pass to bufferize Arithmetic ops. std::unique_ptr createArithmeticBufferizePass(); +/// Create a pass to bufferize arith.constant ops. +std::unique_ptr createConstantBufferizePass(uint64_t alignment = 0); + /// Add patterns to expand Arithmetic ops for LLVM lowering. void populateArithmeticExpandOpsPatterns(RewritePatternSet &patterns); diff --git a/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.td b/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Arithmetic/Transforms/Passes.td @@ -11,9 +11,21 @@ include "mlir/Pass/PassBase.td" -def ArithmeticBufferize : Pass<"arith-bufferize", "FuncOp"> { +def ArithmeticBufferize : Pass<"arith-bufferize", "ModuleOp"> { let summary = "Bufferize Arithmetic dialect ops."; + let description = [{ + This pass bufferizes arith dialect ops. + + This pass needs to be a module pass because it inserts memref.global + ops into the module, which cannot be done safely from a function pass due to + multi-threading. Most other bufferization passes can run in parallel at + function granularity. + }]; let constructor = "mlir::arith::createArithmeticBufferizePass()"; + let options = [ + Option<"alignment", "alignment", "unsigned", /*default=*/"0", + "Create global memrefs with a specified alignment">, + ]; } def ArithmeticExpandOps : Pass<"arith-expand", "FuncOp"> { diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -27,19 +27,21 @@ namespace bufferization { -// TODO: from some HW description. -static constexpr int64_t kBufferAlignments = 128; - class BufferizableOpInterface; struct BufferizationOptions; class BufferizationState; /// Options for ComprehensiveBufferize. struct BufferizationOptions { - using AllocationFn = std::function(OpBuilder &, Location, - MemRefType, ValueRange)>; + /// Allocator function: Generate a memref allocation with the given type, + /// dynamic extents and alignment. + using AllocationFn = std::function( + OpBuilder &, Location, MemRefType, ValueRange, unsigned int)>; + /// Deallocator function: Deallocate a buffer that was allocated with + /// AllocatorFn. using DeallocationFn = std::function; + /// Memcpy function: Generate a memcpy between two buffers. using MemCpyFn = std::function; @@ -50,14 +52,13 @@ /// Return `true` if the op is allowed to be bufferized. bool isOpAllowed(Operation *op) const { - if (!dialectFilter.hasValue()) + if (!hasFilter) return true; - return dialectFilter->contains(op->getDialect()->getNamespace()); + return dialectFilter.contains(op->getDialect()->getNamespace()) || + operationFilter.contains(op->getName().getStringRef()); } - /// Allow-list the given dialects in the dialect filter. Only ops from - /// allow-listed dialects will be bufferized. If no dialect is added, ops from - /// any dialect will be bufferized. + /// Allow the given dialects and activate the filter (`hasFilter`). template void addToDialectFilter() { // The following expands a call to addToDialectFilterImpl for each dialect @@ -68,6 +69,14 @@ 0, (addToDialectFilterImpl(), 0)...}; } + /// Allow the given ops and activate the filter (`hasFilter`). + template + void addToOperationFilter() { + // FIXME: In c++17 this can be simplified by using 'fold expressions'. + (void)std::initializer_list{0, + (addToOperationFilterImpl(), 0)...}; + } + /// Try to cast the given op to BufferizableOpInterface if the op is allow /// listed. BufferizableOpInterface dynCastBufferizableOp(Operation *op) const; @@ -110,23 +119,37 @@ /// For debugging only. Should be used together with `testAnalysisOnly`. bool printConflicts = false; - /// Only bufferize ops from dialects that are allowed-listed by the filter. - /// All other ops are ignored. This option controls the scope of partial - /// bufferization. + /// Buffer alignment for new memory allocations. + unsigned int bufferAlignment = 128; + + /// If set to `true`, only ops that belong to a filtered dialect + /// (`dialectFilter`) and filtered ops (`operationFilter`) are processed. All + /// other ops are ignored. If set to `false`, all ops are bufferized (as long + /// as they implement BufferizableOpInterface). /// - /// Note: If no filter is specified, all ops are bufferized (as long as they - /// implement BufferizableOpInterface). If a filter is specified, - /// `allowUnknownOps` should be enabled. Otherwise, bufferization would fail - /// when encountering an op that is forbidden by the filter. - Optional> dialectFilter; + /// If a filter is specified, `allowUnknownOps` should be enabled. Otherwise, + /// bufferization would fail when encountering a non-filtered op. + bool hasFilter = false; + + /// A set of allowed dialects. + DenseSet dialectFilter; + + /// A set of allowed ops. + DenseSet operationFilter; private: - /// Allow-list a dialect in the dialect filter. + /// Allow a dialect. template void addToDialectFilterImpl() { - if (!dialectFilter.hasValue()) - dialectFilter.emplace(); - dialectFilter->insert(DialectT::getDialectNamespace()); + hasFilter = true; + dialectFilter.insert(DialectT::getDialectNamespace()); + } + + /// Allow an op. + template + void addToOperationFilterImpl() { + hasFilter = true; + operationFilter.insert(OpTy::getOperationName()); } }; diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h @@ -121,22 +121,12 @@ Liveness liveness; }; -// Support class to create global ops for tensor-valued constants in the -// program. Globals are created lazily at the top of the `moduleOp` with pretty +// Create a global op for the given tensor-valued constant in the program. +// Globals are created lazily at the top of the enclosing ModuleOp with pretty // names. Duplicates are avoided. -class GlobalCreator { -public: - GlobalCreator(ModuleOp module, unsigned alignment = 0) - : moduleOp(module), alignment(alignment) {} - memref::GlobalOp getGlobalFor(arith::ConstantOp constantOp); +FailureOr getGlobalFor(arith::ConstantOp constantOp, + uint64_t alignment); -private: - ModuleOp moduleOp; - unsigned alignment; - // This could use memref::GlobalOp key but we avoid introducing a new - // dependence to the memref dialect for this. - DenseMap globals; -}; } // namespace bufferization } // namespace mlir diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.h @@ -19,7 +19,6 @@ namespace mlir { namespace bufferization { class BufferizeTypeConverter; -class GlobalCreator; } // namespace bufferization class RewritePatternSet; @@ -34,16 +33,6 @@ /// Creates an instance of func bufferization pass. std::unique_ptr createFuncBufferizePass(); -/// Add patterns to bufferize tensor constants into global memrefs to the given -/// pattern list. -void populateTensorConstantBufferizePatterns( - bufferization::GlobalCreator &globalCreator, - bufferization::BufferizeTypeConverter &typeConverter, - RewritePatternSet &patterns); - -/// Creates an instance of tensor constant bufferization pass. -std::unique_ptr createTensorConstantBufferizePass(unsigned alignment = 0); - //===----------------------------------------------------------------------===// // Registration //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/StandardOps/Transforms/Passes.td @@ -47,23 +47,4 @@ "memref::MemRefDialect"]; } -def TensorConstantBufferize : Pass<"tensor-constant-bufferize", "ModuleOp"> { - let summary = "Bufferize tensor constants."; - let description = [{ - This pass bufferizes tensor constants. - - This pass needs to be a module pass because it inserts memref.global - ops into the module, which cannot be done safely from a function pass due to - multi-threading. Most other bufferization passes can run in parallel at - function granularity. - }]; - let constructor = "mlir::createTensorConstantBufferizePass()"; - let dependentDialects = ["bufferization::BufferizationDialect", - "memref::MemRefDialect"]; - let options = [ - Option<"alignment", "alignment", "unsigned", /*default=*/"0", - "Create global memrefs with a specified alignment">, - ]; -} - #endif // MLIR_DIALECT_STANDARD_TRANSFORMS_PASSES diff --git a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.cpp @@ -39,8 +39,11 @@ // Create global memory segment and replace tensor with memref pointing to // that memory segment. - GlobalCreator globalCreator(moduleOp); - auto globalMemref = globalCreator.getGlobalFor(constantOp); + FailureOr globalOp = + getGlobalFor(constantOp, state.getOptions().bufferAlignment); + if (failed(globalOp)) + return failure(); + memref::GlobalOp globalMemref = globalOp.getValue(); replaceOpWithNewBufferizedOp( rewriter, op, globalMemref.type(), globalMemref.getName()); diff --git a/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Arithmetic/Transforms/Bufferize.cpp @@ -22,10 +22,21 @@ /// Pass to bufferize Arithmetic ops. struct ArithmeticBufferizePass : public ArithmeticBufferizeBase { + ArithmeticBufferizePass(uint64_t alignment = 0, bool constantOpOnly = false) + : ArithmeticBufferizeBase(), + constantOpOnly(constantOpOnly) { + this->alignment = alignment; + } + void runOnOperation() override { std::unique_ptr options = getPartialBufferizationOptions(); - options->addToDialectFilter(); + if (constantOpOnly) { + options->addToOperationFilter(); + } else { + options->addToDialectFilter(); + } + options->bufferAlignment = alignment; if (failed(bufferizeOp(getOperation(), *options))) signalPassFailure(); @@ -36,9 +47,18 @@ arith::ArithmeticDialect>(); arith::registerBufferizableOpInterfaceExternalModels(registry); } + +private: + bool constantOpOnly; }; } // namespace std::unique_ptr mlir::arith::createArithmeticBufferizePass() { return std::make_unique(); } + +std::unique_ptr +mlir::arith::createConstantBufferizePass(uint64_t alignment) { + return std::make_unique(alignment, + /*constantOpOnly=*/true); +} diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -464,11 +464,12 @@ ValueRange dynShape, const BufferizationOptions &options) { if (options.allocationFn) - return (*options.allocationFn)(b, loc, type, dynShape); + return (*options.allocationFn)(b, loc, type, dynShape, + options.bufferAlignment); // Default bufferallocation via AllocOp. Value allocated = b.create( - loc, type, dynShape, b.getI64IntegerAttr(kBufferAlignments)); + loc, type, dynShape, b.getI64IntegerAttr(options.bufferAlignment)); return allocated; } diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp @@ -144,16 +144,27 @@ // BufferPlacementTransformationBase //===----------------------------------------------------------------------===// -memref::GlobalOp GlobalCreator::getGlobalFor(arith::ConstantOp constantOp) { +FailureOr +bufferization::getGlobalFor(arith::ConstantOp constantOp, uint64_t alignment) { auto type = constantOp.getType().cast(); - - BufferizeTypeConverter typeConverter; + auto moduleOp = constantOp->getParentOfType(); + if (!moduleOp) + return failure(); // If we already have a global for this constant value, no need to do // anything else. - auto it = globals.find(constantOp.getValue()); - if (it != globals.end()) - return cast(it->second); + for (Operation &op : moduleOp.getRegion().getOps()) { + auto globalOp = dyn_cast(&op); + if (!globalOp) + continue; + if (!globalOp.initial_value().hasValue()) + continue; + uint64_t opAlignment = + globalOp.alignment().hasValue() ? globalOp.alignment().getValue() : 0; + Attribute initialValue = globalOp.initial_value().getValue(); + if (opAlignment == alignment && initialValue == constantOp.getValue()) + return globalOp; + } // Create a builder without an insertion point. We will insert using the // symbol table to guarantee unique names. @@ -171,6 +182,7 @@ alignment > 0 ? IntegerAttr::get(globalBuilder.getI64Type(), alignment) : IntegerAttr(); + BufferizeTypeConverter typeConverter; auto global = globalBuilder.create( constantOp.getLoc(), (Twine("__constant_") + os.str()).str(), /*sym_visibility=*/globalBuilder.getStringAttr("private"), @@ -182,6 +194,5 @@ // The symbol table inserts at the end of the module, but globals are a bit // nicer if they are at the beginning. global->moveBefore(&moduleOp.front()); - globals[constantOp.getValue()] = global; return global; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp @@ -71,9 +71,10 @@ static FailureOr allocationFnUsingAlloca(OpBuilder &b, Location loc, MemRefType type, - ValueRange dynShape) { + ValueRange dynShape, + unsigned int bufferAlignment) { Value allocated = b.create( - loc, type, dynShape, b.getI64IntegerAttr(kBufferAlignments)); + loc, type, dynShape, b.getI64IntegerAttr(bufferAlignment)); return allocated; } diff --git a/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt b/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/StandardOps/Transforms/CMakeLists.txt @@ -3,7 +3,6 @@ DecomposeCallGraphTypes.cpp FuncBufferize.cpp FuncConversions.cpp - TensorConstantBufferize.cpp ADDITIONAL_HEADER_DIRS ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/StandardOps/Transforms diff --git a/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp b/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp deleted file mode 100644 --- a/mlir/lib/Dialect/StandardOps/Transforms/TensorConstantBufferize.cpp +++ /dev/null @@ -1,92 +0,0 @@ -//===- Bufferize.cpp - Bufferization for std ops --------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file implements bufferization of tensor-valued arith.constant ops. -// -//===----------------------------------------------------------------------===// - -#include "PassDetail.h" -#include "mlir/Dialect/Bufferization/IR/Bufferization.h" -#include "mlir/Dialect/Bufferization/Transforms/BufferUtils.h" -#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h" -#include "mlir/Dialect/MemRef/IR/MemRef.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" -#include "mlir/Dialect/StandardOps/Transforms/Passes.h" -#include "mlir/IR/BlockAndValueMapping.h" -#include "mlir/Transforms/DialectConversion.h" - -using namespace mlir; -using namespace mlir::bufferization; - -namespace { -class BufferizeTensorConstantOp - : public OpConversionPattern { -public: - BufferizeTensorConstantOp(GlobalCreator &globals, - TypeConverter &typeConverter, MLIRContext *context) - : OpConversionPattern(typeConverter, context, - /*benefit=*/1), - globals(globals) {} - - LogicalResult - matchAndRewrite(arith::ConstantOp op, OpAdaptor adaptor, - ConversionPatternRewriter &rewriter) const override { - auto type = op.getType().dyn_cast(); - if (!type) - return failure(); - - auto globalMemref = globals.getGlobalFor(op); - rewriter.replaceOpWithNewOp(op, globalMemref.type(), - globalMemref.getName()); - return success(); - } - GlobalCreator &globals; -}; -} // namespace - -void mlir::populateTensorConstantBufferizePatterns( - GlobalCreator &globalCreator, - bufferization::BufferizeTypeConverter &typeConverter, - RewritePatternSet &patterns) { - patterns.add(globalCreator, typeConverter, - patterns.getContext()); -} - -namespace { -class TensorConstantBufferizePass - : public TensorConstantBufferizeBase { -public: - explicit TensorConstantBufferizePass(unsigned alignment) { - if (alignment) - this->alignment = alignment; - } - - void runOnOperation() override { - auto module = getOperation(); - GlobalCreator globals(module, alignment); - - auto *context = &getContext(); - bufferization::BufferizeTypeConverter typeConverter; - RewritePatternSet patterns(context); - ConversionTarget target(*context); - - target.addLegalDialect(); - populateTensorConstantBufferizePatterns(globals, typeConverter, patterns); - target.addDynamicallyLegalOp([&](arith::ConstantOp op) { - return typeConverter.isLegal(op.getType()); - }); - if (failed(applyPartialConversion(module, target, std::move(patterns)))) - signalPassFailure(); - } -}; -} // namespace - -std::unique_ptr -mlir::createTensorConstantBufferizePass(unsigned alignment) { - return std::make_unique(alignment); -} diff --git a/mlir/test/Dialect/Arithmetic/bufferize.mlir b/mlir/test/Dialect/Arithmetic/bufferize.mlir --- a/mlir/test/Dialect/Arithmetic/bufferize.mlir +++ b/mlir/test/Dialect/Arithmetic/bufferize.mlir @@ -1,4 +1,5 @@ -// RUN: mlir-opt %s -arith-bufferize | FileCheck %s +// RUN: mlir-opt %s -arith-bufferize -split-input-file | FileCheck %s +// RUN: mlir-opt %s -arith-bufferize=alignment=64 -split-input-file | FileCheck --check-prefix=ALIGNED %s // CHECK-LABEL: func @index_cast( // CHECK-SAME: %[[TENSOR:.*]]: tensor, %[[SCALAR:.*]]: i32 @@ -12,3 +13,70 @@ // CHECK-SAME: memref to memref // CHECK-NEXT: %[[INDEX_TENSOR:.*]] = bufferization.to_tensor %[[INDEX_MEMREF]] // CHECK: return %[[INDEX_TENSOR]] + +// ----- + +// CHECK-LABEL: module { + +// We check the debug name too since we put some effort into making that readable. +// The name isn't load-bearing though. + +// CHECK: memref.global "private" constant @__constant_3x4xf32 : memref<3x4xf32> = dense<7.000000e+00> +// CHECK-NOT: alignment + +// ALIGNED: memref.global "private" constant @__constant_3x4xf32 : memref<3x4xf32> = dense<7.000000e+00> +// ALIGNED-SAME: {alignment = 64 : i64} + +// CHECK: @basic +func @basic() -> tensor<3x4xf32> { + // CHECK: %[[MEMREF:.*]] = memref.get_global @__constant_3x4xf32 : memref<3x4xf32> + // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF]] + %0 = arith.constant dense<7.0> : tensor<3x4xf32> + // CHECK: return %[[TENSOR]] + return %0 : tensor<3x4xf32> +} + +// CHECK: } + +// ----- + +// CHECK-LABEL: module { + +// Only one global is created. +// CHECK: memref.global +// CHECK-NOT: memref.global +func @duplicate_constants() -> (tensor<3x4xf32>, tensor<3x4xf32>) { + %0 = arith.constant dense<7.0> : tensor<3x4xf32> + %1 = arith.constant dense<7.0> : tensor<3x4xf32> + return %0, %1 : tensor<3x4xf32>, tensor<3x4xf32> +} + +// CHECK: } + +// ----- + +// CHECK-LABEL: module { + +// Two globals are created. +// CHECK: memref.global +// CHECK: memref.global +// CHECK-NOT: memref.global +func @multiple_constants() -> (tensor<3x4xf32>, tensor<3x4xf32>) { + %0 = arith.constant dense<7.0> : tensor<3x4xf32> + %1 = arith.constant dense<8.0> : tensor<3x4xf32> + return %0, %1 : tensor<3x4xf32>, tensor<3x4xf32> +} + +// CHECK: } + +// ----- + +// CHECK-LABEL: module { +// We don't convert non-tensor globals. +// CHECK-NOT: memref.global +func @non_tensor() { + %0 = arith.constant 7 : i32 + return +} + +// CHECK: } diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -4,7 +4,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-MIR // // RUN: mlir-opt %s -sparsification --sparse-tensor-conversion \ -// RUN: --func-bufferize --tensor-constant-bufferize \ +// RUN: --func-bufferize --arith-bufferize \ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -4,7 +4,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-MIR // // RUN: mlir-opt %s -sparsification --sparse-tensor-conversion \ -// RUN: --func-bufferize --tensor-constant-bufferize \ +// RUN: --func-bufferize --arith-bufferize \ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir @@ -4,7 +4,7 @@ // RUN: FileCheck %s --check-prefix=CHECK-MIR // // RUN: mlir-opt %s -sparsification --sparse-tensor-conversion \ -// RUN: --func-bufferize --tensor-constant-bufferize \ +// RUN: --func-bufferize --arith-bufferize \ // RUN: --tensor-bufferize --finalizing-bufferize | \ // RUN: FileCheck %s --check-prefix=CHECK-LIR diff --git a/mlir/test/Dialect/Standard/tensor-constant-bufferize.mlir b/mlir/test/Dialect/Standard/tensor-constant-bufferize.mlir deleted file mode 100644 --- a/mlir/test/Dialect/Standard/tensor-constant-bufferize.mlir +++ /dev/null @@ -1,67 +0,0 @@ -// RUN: mlir-opt %s -tensor-constant-bufferize -split-input-file | FileCheck %s -// RUN: mlir-opt %s -tensor-constant-bufferize=alignment=64 -split-input-file | FileCheck --check-prefix=ALIGNED %s - -// CHECK-LABEL: module { - -// We check the debug name too since we put some effort into making that readable. -// The name isn't load-bearing though. - -// CHECK: memref.global "private" constant @__constant_3x4xf32 : memref<3x4xf32> = dense<7.000000e+00> -// CHECK-NOT: alignment - -// ALIGNED: memref.global "private" constant @__constant_3x4xf32 : memref<3x4xf32> = dense<7.000000e+00> -// ALIGNED-SAME: {alignment = 64 : i64} - -// CHECK: @basic -func @basic() -> tensor<3x4xf32> { - // CHECK: %[[MEMREF:.*]] = memref.get_global @__constant_3x4xf32 : memref<3x4xf32> - // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF]] - %0 = arith.constant dense<7.0> : tensor<3x4xf32> - // CHECK: return %[[TENSOR]] - return %0 : tensor<3x4xf32> -} - -// CHECK: } - -// ----- - -// CHECK-LABEL: module { - -// Only one global is created. -// CHECK: memref.global -// CHECK-NOT: memref.global -func @duplicate_constants() -> (tensor<3x4xf32>, tensor<3x4xf32>) { - %0 = arith.constant dense<7.0> : tensor<3x4xf32> - %1 = arith.constant dense<7.0> : tensor<3x4xf32> - return %0, %1 : tensor<3x4xf32>, tensor<3x4xf32> -} - -// CHECK: } - -// ----- - -// CHECK-LABEL: module { - -// Two globals are created. -// CHECK: memref.global -// CHECK: memref.global -// CHECK-NOT: memref.global -func @multiple_constants() -> (tensor<3x4xf32>, tensor<3x4xf32>) { - %0 = arith.constant dense<7.0> : tensor<3x4xf32> - %1 = arith.constant dense<8.0> : tensor<3x4xf32> - return %0, %1 : tensor<3x4xf32>, tensor<3x4xf32> -} - -// CHECK: } - -// ----- - -// CHECK-LABEL: module { -// We don't convert non-tensor globals. -// CHECK-NOT: memref.global -func @non_tensor() { - %0 = arith.constant 7 : i32 - return -} - -// CHECK: } diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-collapse-tensor.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-bufferize -std-bufferize \ -// RUN: -tensor-constant-bufferize -tensor-bufferize -func-bufferize \ +// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation -convert-linalg-to-llvm \ // RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-elementwise.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -convert-elementwise-to-linalg -std-bufferize \ -// RUN: -tensor-constant-bufferize -linalg-bufferize -tensor-bufferize \ +// RUN: -arith-bufferize -linalg-bufferize -tensor-bufferize \ // RUN: -func-bufferize -buffer-deallocation -convert-linalg-to-loops \ // RUN: -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-expand-tensor.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-bufferize -std-bufferize \ -// RUN: -tensor-constant-bufferize -tensor-bufferize -func-bufferize \ +// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation -convert-linalg-to-llvm \ // RUN: -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-padtensor.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-bufferize -std-bufferize \ -// RUN: -tensor-constant-bufferize -tensor-bufferize -func-bufferize \ +// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ // RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert-multiple-uses.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-bufferize -std-bufferize \ -// RUN: -tensor-constant-bufferize -tensor-bufferize -func-bufferize \ +// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ // RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-subtensor-insert.mlir @@ -1,5 +1,5 @@ // RUN: mlir-opt %s -linalg-bufferize -std-bufferize \ -// RUN: -tensor-constant-bufferize -tensor-bufferize -func-bufferize \ +// RUN: -arith-bufferize -tensor-bufferize -func-bufferize \ // RUN: -finalizing-bufferize -buffer-deallocation \ // RUN: -convert-linalg-to-loops -convert-scf-to-std -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-e2e.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -tensor-constant-bufferize -std-bufferize -linalg-bufferize \ +// RUN: mlir-opt %s -arith-bufferize -std-bufferize -linalg-bufferize \ // RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops \ // RUN: -convert-linalg-to-llvm --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-tensor-matmul.mlir @@ -1,5 +1,5 @@ // UNSUPPORTED: asan -// RUN: mlir-opt %s -linalg-bufferize -std-bufferize -tensor-constant-bufferize \ +// RUN: mlir-opt %s -linalg-bufferize -std-bufferize -arith-bufferize \ // RUN: -tensor-bufferize -func-bufferize -finalizing-bufferize -buffer-deallocation -convert-linalg-to-loops -convert-scf-to-std \ // RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ @@ -7,7 +7,7 @@ // RUN: | FileCheck %s // RUN: mlir-opt %s -linalg-tile="tile-sizes=1,2,3" -linalg-bufferize \ -// RUN: -scf-bufferize -std-bufferize -tensor-constant-bufferize -tensor-bufferize \ +// RUN: -scf-bufferize -std-bufferize -arith-bufferize -tensor-bufferize \ // RUN: -func-bufferize \ // RUN: -finalizing-bufferize -convert-linalg-to-loops -convert-scf-to-std -convert-scf-to-std \ // RUN: -convert-linalg-to-llvm -lower-affine -convert-scf-to-std --convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_cast.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_constant_to_sparse_tensor.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_dyn.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_ptr.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_conversion_sparse2dense.mlir @@ -2,7 +2,7 @@ // RUN: -sparsification -sparse-tensor-conversion \ // RUN: -linalg-bufferize -convert-linalg-to-loops \ // RUN: -convert-vector-to-scf -convert-scf-to-std \ -// RUN: -func-bufferize -tensor-constant-bufferize -tensor-bufferize \ +// RUN: -func-bufferize -arith-bufferize -tensor-bufferize \ // RUN: -std-bufferize -finalizing-bufferize \ // RUN: -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm \ // RUN: -reconcile-unrealized-casts \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_filter_conv2d.mlir @@ -2,7 +2,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ @@ -17,7 +17,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.tns" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matmul.mlir @@ -3,7 +3,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matrix_ops.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=16 enable-simd-index32" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/mttkrp_b.tns" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_mult_elt.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_reduction.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir @@ -2,7 +2,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ @@ -17,7 +17,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions.mlir @@ -2,7 +2,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ @@ -17,7 +17,7 @@ // RUN: --linalg-generalize-named-ops --linalg-fuse-elementwise-ops \ // RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=4 enable-simd-index32" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_mm_fusion.mlir @@ -3,7 +3,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ @@ -19,7 +19,7 @@ // RUN: --sparsification="vectorization-strategy=2 vl=8" --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_scale.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ @@ -14,7 +14,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=4" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/wide.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: mlir-cpu-runner \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ @@ -15,7 +15,7 @@ // RUN: mlir-opt %s \ // RUN: --sparsification="vectorization-strategy=2 vl=2" --sparse-tensor-conversion \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-std-to-llvm --reconcile-unrealized-casts | \ // RUN: TENSOR0="%mlir_integration_test_dir/data/test_symmetric.mtx" \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_tensor_ops.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_vector_ops.mlir @@ -2,7 +2,7 @@ // RUN: --sparsification --sparse-tensor-conversion \ // RUN: --linalg-bufferize --convert-linalg-to-loops \ // RUN: --convert-vector-to-scf --convert-scf-to-std \ -// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \ +// RUN: --func-bufferize --arith-bufferize --tensor-bufferize \ // RUN: --std-bufferize --finalizing-bufferize --lower-affine \ // RUN: --convert-vector-to-llvm --convert-memref-to-llvm --convert-math-to-llvm \ // RUN: --convert-std-to-llvm --reconcile-unrealized-casts | \ diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SDDMM.py @@ -129,7 +129,7 @@ f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' - f'tensor-constant-bufferize,' + f'arith-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' f'lower-affine,' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_SpMM.py @@ -119,7 +119,7 @@ f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' - f'tensor-constant-bufferize,' + f'arith-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' f'lower-affine,' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_elementwise_add_sparse_output.py @@ -71,7 +71,7 @@ f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' - f'tensor-constant-bufferize,' + f'arith-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' f'lower-affine,' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py @@ -79,7 +79,7 @@ f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' - f'tensor-constant-bufferize,' + f'arith-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' f'lower-affine,' diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py --- a/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py +++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_stress.py @@ -177,7 +177,7 @@ f'builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),' f'convert-scf-to-std,' f'func-bufferize,' - f'tensor-constant-bufferize,' + f'arith-bufferize,' f'builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize),' f'convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},' f'lower-affine,' diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/tools/mlir_pytaco.py @@ -136,7 +136,7 @@ def _compile_mlir(module: ir.Module) -> ir.Module: """Compiles an MLIR module and returns the compiled module.""" - # TODO: Replace this with a pipeline implemented for + # TODO: Replace this with a pipeline implemented for # https://github.com/llvm/llvm-project/issues/51751. pipeline = ( f"sparsification," @@ -144,7 +144,7 @@ f"builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf)," f"convert-scf-to-std," f"func-bufferize," - f"tensor-constant-bufferize," + f"arith-bufferize," f"builtin.func(tensor-bufferize,std-bufferize,finalizing-bufferize)," f"convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}}," f"lower-affine," diff --git a/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp b/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp --- a/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestComprehensiveBufferize.cpp @@ -116,9 +116,9 @@ options->createDeallocs = createDeallocs; if (dialectFilter.hasValue()) { - options->dialectFilter.emplace(); + options->hasFilter = true; for (const std::string &dialectNamespace : dialectFilter) - options->dialectFilter->insert(dialectNamespace); + options->dialectFilter.insert(dialectNamespace); } Operation *op = getOperation();