diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h @@ -26,10 +26,6 @@ /// Specifies whether returning newly allocated memrefs should be allowed. /// Otherwise, a pass failure is triggered. bool allowReturnAllocs = false; - - /// Specifies whether buffer return values that are equivalent to a FuncOp - /// bbArg should be dropped. - bool dropEquivalentFuncResults = true; }; /// The BufferizationAliasInfo class maintains a list of buffer aliases and diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h @@ -39,6 +39,13 @@ /// Also update all call sites. LogicalResult promoteBufferResultsToOutParams(ModuleOp module); +/// Creates a pass that drops memref function results that are equivalent to a +/// function argument. +std::unique_ptr createDropEquivalentBufferResultsPass(); + +/// Drop all memref function results that are equivalent to a function argument. +LogicalResult dropEquivalentBufferResults(ModuleOp module); + /// Creates a pass that finalizes a partial bufferization by removing remaining /// bufferization.to_tensor and bufferization.to_memref operations. std::unique_ptr> createFinalizingBufferizePass(); diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -154,6 +154,20 @@ let constructor = "mlir::bufferization::createBufferizationBufferizePass()"; } +def DropEquivalentBufferResults : Pass<"drop-equivalent-buffer-results", "ModuleOp"> { + let summary = "Remove MemRef return values that are equivalent to a bbArg"; + let description = [{ + This pass removes MemRef return values from functions if they are equivalent + to a function bbArg. In that case, the return value is redundant and the + respective CallOp operand can be used at the call site. + + Note: If a bbArg buffer is not returned directly but casted to beforehand, + the buffer is still considered equivalent. + }]; + let constructor = "mlir::bufferization::createDropEquivalentBufferResultsPass()"; + let dependentDialects = ["memref::MemRefDialect"]; +} + def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> { let summary = "One-Shot Bufferize"; let description = [{ @@ -221,11 +235,6 @@ and supports only simple cases at the moment. In particular: * Recursive or circular function call graphs are not supported. - * When a returned tensor can be proven to be equivalent to a tensor function - argument, the return value disappears. Instead, the buffer of the tensor - argument is modified in-place. - * Returning non-equivalent tensors is forbidden by default and must be - explicitly activated with `allow-return-allocs`. * External functions (without bodies) that return a tensor are not supported. * Function with multiple blocks or multiple ReturnOps are not supported. @@ -255,9 +264,6 @@ `test-analysis-only`. }]; let options = [ - Option<"dropEquivalentFuncResults", "drop-equivalent-func-results", "bool", - /*default=*/"true", - "Drop buffer return values that are equivalent to a FuncOp arg.">, Option<"allowReturnAllocs", "allow-return-allocs", "bool", /*default=*/"false", "Allows returning/yielding new allocations from a block.">, diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -180,7 +180,6 @@ if (!options) { // Make new bufferization options if none were provided when creating the // pass. - opt.dropEquivalentFuncResults = dropEquivalentFuncResults; opt.allowReturnAllocs = allowReturnAllocs; opt.allowUnknownOps = allowUnknownOps; opt.alwaysAliasingWithDest = alwaysAliasingWithDest; diff --git a/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt --- a/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt @@ -5,6 +5,7 @@ BufferOptimizations.cpp BufferResultsToOutParams.cpp BufferUtils.cpp + DropEquivalentBufferResults.cpp FuncBufferizableOpInterfaceImpl.cpp OneShotAnalysis.cpp OneShotModuleBufferize.cpp @@ -26,3 +27,4 @@ MLIRTensor MLIRTransforms ) + diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp new file mode 100644 --- /dev/null +++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp @@ -0,0 +1,151 @@ +//===- DropEquivalentBufferResults.cpp - Calling convention conversion ----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This pass drops return values from functions if they are equivalent to one of +// their arguments. E.g.: +// +// ``` +// func.func @foo(%m : memref) -> (memref) { +// return %m : memref +// } +// ``` +// +// This functions is rewritten to: +// +// ``` +// func.func @foo(%m : memref) { +// return +// } +// ``` +// +// All call sites are updated accordingly. If a function returns a cast of a +// function argument, it is also considered equivalent. A cast is inserted at +// the call site in that case. + +#include "PassDetail.h" +#include "mlir/Dialect/Bufferization/Transforms/Passes.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/IR/Operation.h" +#include "mlir/Pass/Pass.h" + +using namespace mlir; + +/// Return the unique ReturnOp that terminates `funcOp`. +/// Return nullptr if there is no such unique ReturnOp. +static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) { + func::ReturnOp returnOp; + for (Block &b : funcOp.getBody()) { + if (auto candidateOp = dyn_cast(b.getTerminator())) { + if (returnOp) + return nullptr; + returnOp = candidateOp; + } + } + return returnOp; +} + +/// Return the func::FuncOp called by `callOp`. +static func::FuncOp getCalledFunction(CallOpInterface callOp) { + SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast(); + if (!sym) + return nullptr; + return dyn_cast_or_null( + SymbolTable::lookupNearestSymbolFrom(callOp, sym)); +} + +LogicalResult +mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) { + IRRewriter rewriter(module.getContext()); + + for (auto funcOp : module.getOps()) { + if (funcOp.isExternal()) + continue; + func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp); + // TODO: Support functions with multiple blocks. + if (!returnOp) + continue; + + // Compute erased results. + SmallVector newReturnValues; + BitVector erasedResultIndices(funcOp.getFunctionType().getNumResults()); + DenseMap resultToArgs; + for (const auto &it : llvm::enumerate(returnOp.operands())) { + bool erased = false; + for (BlockArgument bbArg : funcOp.getArguments()) { + Value val = it.value(); + while (auto castOp = val.getDefiningOp()) + val = castOp.source(); + + if (val == bbArg) { + resultToArgs[it.index()] = bbArg.getArgNumber(); + erased = true; + break; + } + } + + if (erased) { + erasedResultIndices.set(it.index()); + } else { + newReturnValues.push_back(it.value()); + } + } + + // Update function. + funcOp.eraseResults(erasedResultIndices); + returnOp.operandsMutable().assign(newReturnValues); + + // Update function calls. + module.walk([&](func::CallOp callOp) { + if (getCalledFunction(callOp) != funcOp) + return WalkResult::skip(); + + rewriter.setInsertionPoint(callOp); + auto newCallOp = rewriter.create(callOp.getLoc(), funcOp, + callOp.operands()); + SmallVector newResults; + int64_t nextResult = 0; + for (int64_t i = 0; i < callOp.getNumResults(); ++i) { + if (!resultToArgs.count(i)) { + // This result was not erased. + newResults.push_back(newCallOp.getResult(nextResult++)); + continue; + } + + // This result was erased. + Value replacement = callOp.getOperand(resultToArgs[i]); + Type expectedType = callOp.getResult(i).getType(); + if (replacement.getType() != expectedType) { + // A cast must be inserted at the call site. + replacement = rewriter.create( + callOp.getLoc(), expectedType, replacement); + } + newResults.push_back(replacement); + } + rewriter.replaceOp(callOp, newResults); + return WalkResult::advance(); + }); + } + + return success(); +} + +namespace { +struct DropEquivalentBufferResultsPass + : DropEquivalentBufferResultsBase { + void runOnOperation() override { + if (failed(bufferization::dropEquivalentBufferResults(getOperation()))) + return signalPassFailure(); + } +}; +} // namespace + +std::unique_ptr +mlir::bufferization::createDropEquivalentBufferResultsPass() { + return std::make_unique(); +} diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -223,7 +223,31 @@ BufferRelation bufferRelation(Operation *op, OpResult opResult, const AnalysisState &state) const { - return BufferRelation::Equivalent; + func::CallOp callOp = cast(op); + FuncOp funcOp = getCalledFunction(callOp); + assert(funcOp && "expected CallOp to a FuncOp"); + const FuncAnalysisState &funcState = getFuncAnalysisState(state); + if (getFuncOpAnalysisState(state, funcOp) != + FuncOpAnalysisState::Analyzed) { + // Function not analyzed yet. The conservative answer is "None". + return BufferRelation::None; + } + + Optional maybeEquiv = + getEquivalentFuncArgIdx(funcOp, funcState, opResult.getResultNumber()); + if (maybeEquiv.hasValue()) { +#ifndef NDEBUG + SmallVector aliasingOpOperands = + getAliasingOpOperand(op, opResult, state); + assert(aliasingOpOperands.size() == 1 && + "expected exactly 1 aliasing OpOperand"); + assert(aliasingOpOperands.front()->getOperandNumber() == + maybeEquiv.getValue() && + "inconsistent analysis state"); +#endif + return BufferRelation::Equivalent; + } + return BufferRelation::None; } /// All function arguments are writable. It is the responsibility of the @@ -236,10 +260,6 @@ FuncOp funcOp = getCalledFunction(callOp); assert(funcOp && "expected CallOp to a FuncOp"); FunctionType funcType = funcOp.getFunctionType(); - const FuncAnalysisState &funcState = - getFuncAnalysisState(state.getAnalysisState()); - const OneShotBufferizationOptions &options = - static_cast(state.getOptions()); // Result types of the bufferized CallOp. SmallVector resultTypes; @@ -252,22 +272,7 @@ // Operands of the bufferized CallOp. SmallVector newOperands(numOperands, Value()); - // Based on previously gathered equivalence information, we know if a - // tensor result folds onto an operand. These are the only tensor value - // results that are supported at the moment. - // - // For tensors return values that do not fold onto an operand, additional - // work is needed (TODO) to either: - // * hoist a result into an inplaceable operand or - // * devise a better representation to truly return a buffer. - // - // Note: If a function has no body, no equivalence information is - // available. Consequently, a tensor return value cannot be proven to fold - // onto a FuncOp bbArg, so calls to such functions are not bufferizable at - // the moment. - - // 1. Compute the result types of the new CallOp. Tensor results that are - // equivalent to a FuncOp bbArg are no longer returned. + // 1. Compute the result types of the new CallOp. for (const auto &it : llvm::enumerate(callOp.getResultTypes())) { unsigned returnValIdx = it.index(); Type returnType = it.value(); @@ -278,28 +283,7 @@ continue; } - if (options.dropEquivalentFuncResults) { - if (Optional bbArgIdx = - getEquivalentFuncArgIdx(funcOp, funcState, returnValIdx)) { - // Return operands that are equivalent to some bbArg, are not - // returned. - FailureOr bufferOrFailure = - state.getBuffer(rewriter, callOp->getOpOperand(*bbArgIdx)); - if (failed(bufferOrFailure)) - return failure(); - replacementValues[returnValIdx] = *bufferOrFailure; - newOperands[*bbArgIdx] = *bufferOrFailure; - continue; - } - } - - if (!options.allowReturnAllocs) - return callOp->emitError( - "call to FuncOp that returns non-equivalent tensors not supported"); - - // Returning a memref. This memref is not equivalent to any bbArg. It is - // likely a newly allocated buffer. We may want to hoist such allocations - // to the call site in the future. + // Returning a memref. retValMapping[returnValIdx] = resultTypes.size(); resultTypes.push_back(funcType.getResult(resultTypes.size())); } @@ -315,9 +299,7 @@ continue; } - // Retrieve buffers for tensor operands. Tensor operand buffers, who's - // corresponding FuncOp bbArgs are equivalent to a returned tensor, were - // already stored in `newOperands` during Step 1. + // Retrieve buffers for tensor operands. Value buffer = newOperands[idx]; if (!buffer) { FailureOr bufferOrFailure = state.getBuffer(rewriter, opOperand); @@ -348,7 +330,7 @@ Operation *newCallOp = rewriter.create( callOp.getLoc(), funcOp.getSymName(), resultTypes, newOperands); newCallOp->setAttrs(callOp->getAttrs()); - // Get replacement values for non-tensor / non-equivalent results. + // Get replacement values. for (unsigned i = 0; i < replacementValues.size(); ++i) { if (replacementValues[i]) continue; @@ -400,20 +382,12 @@ /// function body has been bufferized, function return types can be switched /// to more concise memref types as part of `foldMemRefCasts`. /// - /// When a tensor function argument is known to be equivalent to a tensor - /// result, it is dropped from the return values. - /// /// All function bbArgs are writable unless they are explicitly marked as /// read-only. Callers must insert copies when needed. - /// - /// Note: Returning a memref is possible, but corresponding CallOp - /// bufferizations fail unless `allowReturnAllocs`. LogicalResult bufferize(Operation *op, RewriterBase &rewriter, BufferizationState &state) const { auto funcOp = cast(op); FunctionType funcType = funcOp.getFunctionType(); - const FuncAnalysisState &funcState = - getFuncAnalysisState(state.getAnalysisState()); const OneShotBufferizationOptions &options = static_cast(state.getOptions()); @@ -492,24 +466,6 @@ continue; } - // If return operand is equivalent to some bbArg, no need to return it. - if (options.dropEquivalentFuncResults) { - if (Optional equivBbArgIdx = getEquivalentFuncArgIdx( - funcOp, funcState, returnOperand.getOperandNumber())) { - // TODO: Use memref type with fully dynamic layout map and add folder - // for memref.cast + memref.copy. - Value toMemrefOp = rewriter.create( - loc, getMemRefType(tensorType, options), returnVal); - BlockArgument equivBbArg = funcOp.getArgument(*equivBbArgIdx); - // Note: This copy will fold away. It must be inserted here to ensure - // that `returnVal` still has at least one use and does not fold away. - if (failed( - options.createMemCpy(rewriter, loc, toMemrefOp, equivBbArg))) - return funcOp->emitError("could not generate copy for bbArg"); - continue; - } - } - BaseMemRefType resultType; if (options.functionBoundaryTypeConversion == BufferizationOptions::LayoutMapOption::IdentityLayoutMap) { diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp @@ -24,19 +24,6 @@ // * `funcOpBbArgReadWriteAnalysis` determines whether or not a tensor bbArg is // read/written. // -// Only tensors that are equivalent to some FuncOp bbArg may be returned. -// Bufferization currently fails if other tensors (in particular tensors that -// bufferize out-of-place and result in a new buffer allocation) are returned. -// In the future, such allocations could be hoisted to the caller. -// -// Example: `foo` fails bufferization because %0 is not equivalent to any bbArg. -// ``` -// func @foo() -> tensor { -// %0 = bufferization.alloc_tensor(...) : tensor -// return %0 : tensor -// } -// ``` -// // Module Bufferization implements the following calling convention. // // * In the absence of conflicts within a FuncOp, the FuncOp's bbArgs may always @@ -464,17 +451,6 @@ foldMemRefCasts(funcOp); } - // Check result. - for (func::FuncOp funcOp : orderedFuncOps) { - if (!options.allowReturnAllocs && - llvm::any_of(funcOp.getFunctionType().getResults(), [](Type t) { - return t.isa(); - })) { - funcOp->emitError("memref return type is unsupported"); - return failure(); - } - } - // Post-pass cleanup of function argument attributes. moduleOp.walk([&](func::FuncOp op) { for (BlockArgument bbArg : op.getArguments()) diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -split-input-file | FileCheck %s -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs drop-equivalent-func-results=false" -split-input-file | FileCheck %s --check-prefix=EQUIV +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -drop-equivalent-buffer-results -split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -split-input-file | FileCheck %s --check-prefix=NO-DROP // Run fuzzer with different seeds. // RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null @@ -73,6 +73,6 @@ // CHECK-SAME: %[[A:.*]]: memref + // CHECK: return %[[call]], %[[casted]] : f32, memref } @@ -261,7 +261,7 @@ // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] // CHECK-NOT: copy // CHECK: call @does_not_read(%[[casted]]) -// CHECK: %[[r:.*]] = memref.load %[[alloc]] +// CHECK: %[[r:.*]] = memref.load %[[casted]] // CHECK: memref.dealloc %[[alloc]] func.func @main(%t: tensor {bufferization.writable = false}) -> f32 { %0 = call @does_not_read(%t) : (tensor) -> (tensor) @@ -472,7 +472,7 @@ %res = call @init_and_dot(%AA, %BB, %CC) : (tensor<64xf32>, tensor<64xf32>, tensor) -> tensor - // CHECK-NEXT: %[[dC:.*]] = memref.cast %[[C]] : memref to memref<*xf32> + // CHECK-NEXT: %[[dC:.*]] = memref.cast %[[cC]] : memref to memref<*xf32> %res2 = tensor.cast %res: tensor to tensor<*xf32> // CHECK-NEXT: call @printMemrefF32(%[[dC]]) : (memref<*xf32>) -> () @@ -562,9 +562,11 @@ %c0: index, %c10: index, %c1: index) -> tensor { // CHECK-NOT: alloc // CHECK-NOT: copy + // CHECK: scf.for {{.*}} iter_args(%[[t1:.*]] = %[[arg0]]) %1 = scf.for %iv = %c0 to %c10 step %c1 iter_args(%t1 = %t0) -> (tensor) { - // CHECK: call @inner_func(%[[arg0]]) + // CHECK: call @inner_func(%[[t1]]) %3 = func.call @inner_func(%t1) : (tensor) -> tensor + // CHECK: scf.yield %[[t1]] scf.yield %3 : tensor } return %1: tensor diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-loop-hoisting -split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-loop-hoisting -drop-equivalent-buffer-results -split-input-file | FileCheck %s // Run fuzzer with different seeds. // RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null @@ -6,7 +6,7 @@ // RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null // Test bufferization using memref types that have no layout map. -// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP +// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP // TODO: Some test cases from this file should be moved to other dialects. diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir --- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-deallocation -split-input-file | FileCheck %s +// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s // Run fuzzer with different seeds. // RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir --- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s // Run fuzzer with different seeds. // RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null