diff --git a/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h --- a/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.h @@ -41,7 +41,7 @@ // TODO: Could be replaced with a "bufferization strategy" object with virtual // functions in the future. struct AllocationCallbacks { - using AllocationFn = std::function( + using AllocationFn = std::function( OpBuilder &, Location, MemRefType, ArrayRef)>; using DeallocationFn = std::function; using MemCpyFn = std::function; @@ -361,15 +361,15 @@ Value findLastPrecedingWrite(Value value); /// Creates a memref allocation. - Optional createAlloc(OpBuilder &b, Location loc, MemRefType type, - ArrayRef dynShape); + FailureOr createAlloc(OpBuilder &b, Location loc, MemRefType type, + ArrayRef dynShape); /// Creates a memref allocation for the given shaped value. This function may /// perform additional optimizations such as buffer allocation hoisting. If /// `createDealloc`, a deallocation op is inserted at the point where the /// allocation goes out of scope. - Value createAlloc(OpBuilder &b, Location loc, Value shapedValue, - bool deallocMemref = true); + FailureOr createAlloc(OpBuilder &b, Location loc, Value shapedValue, + bool deallocMemref = true); /// Creates a memref deallocation. The given memref buffer must have been /// allocated using `createAlloc`. diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp @@ -41,9 +41,9 @@ /// Default allocation function that is used by the comprehensive bufferization /// pass. The default currently creates a ranked memref using `memref.alloc`. -static Optional defaultAllocationFn(OpBuilder &b, Location loc, - MemRefType type, - ArrayRef dynShape) { +static FailureOr defaultAllocationFn(OpBuilder &b, Location loc, + MemRefType type, + ArrayRef dynShape) { Value allocated = b.create( loc, type, dynShape, b.getI64IntegerAttr(kBufferAlignments)); return allocated; @@ -392,8 +392,10 @@ // allocation should be inserted (in the absence of allocation hoisting). setInsertionPointAfter(rewriter, operandBuffer); // Allocate the result buffer. - Value resultBuffer = + FailureOr resultBuffer = createAlloc(rewriter, loc, operandBuffer, options.createDeallocs); + if (failed(resultBuffer)) + return failure(); bool skipCopy = false; // Do not copy if the last preceding write of `operand` is an op that does // not write (skipping ops that merely create aliases). E.g., InitTensorOp. @@ -414,7 +416,7 @@ if (!skipCopy) { // The copy happens right before the op that is bufferized. rewriter.setInsertionPoint(op); - createMemCpy(rewriter, loc, operandBuffer, resultBuffer); + createMemCpy(rewriter, loc, operandBuffer, *resultBuffer); } return resultBuffer; } @@ -539,7 +541,8 @@ /// Create an AllocOp/DeallocOp pair, where the AllocOp is after /// `shapedValue.getDefiningOp` (or at the top of the block in case of a /// bbArg) and the DeallocOp is at the end of the block. -Value mlir::linalg::comprehensive_bufferize::BufferizationState::createAlloc( +FailureOr +mlir::linalg::comprehensive_bufferize::BufferizationState::createAlloc( OpBuilder &b, Location loc, Value shapedValue, bool deallocMemref) { // Take a guard before anything else. OpBuilder::InsertionGuard g(b); @@ -551,10 +554,9 @@ // Note: getAllocationTypeAndShape also sets the insertion point. MemRefType allocMemRefType = getAllocationTypeAndShape(b, loc, shapedValue, dynShape); - Optional allocated = createAlloc(b, loc, allocMemRefType, dynShape); - // TODO: For now just assert the value is returned. Eventually need to - // error-propagate. - assert(allocated && "allocation failed"); + FailureOr allocated = createAlloc(b, loc, allocMemRefType, dynShape); + if (failed(allocated)) + return failure(); Value casted = allocated.getValue(); if (memRefType && memRefType != allocMemRefType) { casted = b.create(loc, memRefType, allocated.getValue()); @@ -570,7 +572,7 @@ } /// Create a memref allocation. -Optional +FailureOr mlir::linalg::comprehensive_bufferize::BufferizationState::createAlloc( OpBuilder &b, Location loc, MemRefType type, ArrayRef dynShape) { return options.allocationFns->allocationFn(b, loc, type, dynShape); diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/LinalgInterfaceImpl.cpp @@ -55,6 +55,8 @@ OpResult opResult = op.getTiedOpResult(opOperand); assert(opResult && "could not find correspond OpResult"); FailureOr resultBuffer = state.getResultBuffer(rewriter, opResult); + if (failed(resultBuffer)) + return failure(); newOutputBuffers.push_back(*resultBuffer); } @@ -210,10 +212,12 @@ if (initTensorOp->getUses().empty()) return success(); - Value alloc = state.createAlloc(rewriter, initTensorOp->getLoc(), - initTensorOp.result(), - state.getOptions().createDeallocs); - state.replaceOpWithBufferizedValues(rewriter, op, alloc); + FailureOr alloc = state.createAlloc( + rewriter, initTensorOp->getLoc(), initTensorOp.result(), + state.getOptions().createDeallocs); + if (failed(alloc)) + return failure(); + state.replaceOpWithBufferizedValues(rewriter, op, *alloc); return success(); } }; @@ -286,6 +290,8 @@ if (value.getType().isa()) { FailureOr buffer = state.getResultBuffer( rewriter, tiledLoopOp->getResult(nextResultNum++)); + if (failed(buffer)) + return failure(); newOutputs.push_back(*buffer); newResults.push_back(*buffer); } else { diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/SCFInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/SCFInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/SCFInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/SCFInterfaceImpl.cpp @@ -295,10 +295,19 @@ }; // Construct a new scf.for op with memref instead of tensor values. + bool resultBufferFailure = false; SmallVector initArgs = convert(forOp.getInitArgs(), [&](Value val, int64_t index) { - return *state.getResultBuffer(rewriter, forOp->getOpResult(index)); + FailureOr resultBuffer = + state.getResultBuffer(rewriter, forOp->getOpResult(index)); + if (failed(resultBuffer)) { + resultBufferFailure = true; + return Value(); + } + return *resultBuffer; }); + if (resultBufferFailure) + return failure(); auto newForOp = rewriter.create( forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(), forOp.getStep(), initArgs); diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp @@ -70,6 +70,8 @@ // The result buffer still has the old (pre-cast) type. FailureOr resultBuffer = state.getResultBuffer(rewriter, castOp->getResult(0)); + if (failed(resultBuffer)) + return failure(); auto sourceMemRefType = resultBuffer->getType().cast(); Attribute memorySpace = sourceMemRefType.getMemorySpace(); TensorType resultTensorType = @@ -166,9 +168,14 @@ // If not inplaceable, alloc. bool inplace = state.isInPlace(extractSliceOp->getResult(0)); Value alloc; - if (!inplace) - alloc = state.createAlloc(rewriter, loc, extractSliceOp.result(), - state.getOptions().createDeallocs); + if (!inplace) { + FailureOr allocOrFailure = + state.createAlloc(rewriter, loc, extractSliceOp.result(), + state.getOptions().createDeallocs); + if (failed(allocOrFailure)) + return failure(); + alloc = *allocOrFailure; + } // Bufferize to subview. auto subviewMemRefType = @@ -255,6 +262,8 @@ auto insertOp = cast(op); FailureOr destMemref = state.getResultBuffer(rewriter, insertOp->getOpResult(0)); + if (failed(destMemref)) + return failure(); rewriter.create(insertOp.getLoc(), insertOp.scalar(), *destMemref, insertOp.indices()); state.replaceOpWithBufferizedValues(rewriter, op, *destMemref); @@ -438,6 +447,8 @@ // When bufferizing out-of-place, `getResultBuffer` allocates. FailureOr dstMemref = state.getResultBuffer(rewriter, insertSliceOp->getResult(0)); + if (failed(dstMemref)) + return failure(); bool needCopy = !tensorState.insertSliceOpsWithoutCopy.contains(insertSliceOp); diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/VectorInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/VectorInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/VectorInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/VectorInterfaceImpl.cpp @@ -100,6 +100,8 @@ // this point. FailureOr resultBuffer = state.getResultBuffer(rewriter, op->getResult(0)); + if (failed(resultBuffer)) + return failure(); rewriter.create( writeOp.getLoc(), writeOp.vector(), *resultBuffer, writeOp.indices(), writeOp.permutation_mapAttr(), writeOp.in_boundsAttr()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferizePass.cpp @@ -64,9 +64,9 @@ (void)applyPatternsAndFoldGreedily(moduleOp, std::move(patterns)); } -static Optional allocationFnUsingAlloca(OpBuilder &b, Location loc, - MemRefType type, - ArrayRef dynShape) { +static FailureOr allocationFnUsingAlloca(OpBuilder &b, Location loc, + MemRefType type, + ArrayRef dynShape) { Value allocated = b.create( loc, type, dynShape, b.getI64IntegerAttr(kBufferAlignments)); return allocated;