diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizableOpInterface.cpp @@ -549,6 +549,9 @@ return failure(); Value casted = allocated.getValue(); if (memRefType && memRefType != allocMemRefType) { + assert(memref::CastOp::areCastCompatible(allocated.getValue().getType(), + memRefType) && + "createAlloc: cast incompatible"); casted = b.create(loc, memRefType, allocated.getValue()); } diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizationInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizationInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizationInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/BufferizationInterfaceImpl.cpp @@ -77,9 +77,13 @@ // Insert cast in case to_memref(to_tensor(x))'s type is different from // x's type. - if (toTensorOp.memref().getType() != toMemrefOp.getType()) + if (toTensorOp.memref().getType() != toMemrefOp.getType()) { + assert(memref::CastOp::areCastCompatible(buffer.getType(), + toMemrefOp.getType()) && + "ToMemrefOp::bufferize : cast incompatible"); buffer = rewriter.create(toMemrefOp.getLoc(), buffer, toMemrefOp.getType()); + } replaceOpWithBufferizedValues(rewriter, toMemrefOp, buffer); return success(); } diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp @@ -386,7 +386,10 @@ // Replace all uses of bbArg through a ToMemRefOp by a memref::CastOp. for (auto &use : llvm::make_early_inc_range(bbArg.getUses())) { if (auto toMemrefOp = - dyn_cast(use.getOwner())) { + dyn_cast(use.getOwner())) { + assert(memref::CastOp::areCastCompatible( + memref.getType(), toMemrefOp.memref().getType()) && + "bufferizeFuncOpBoundary: cast incompatible"); auto castOp = b.create( funcOp.getLoc(), toMemrefOp.memref().getType(), memref); toMemrefOp.memref().replaceAllUsesWith(castOp); @@ -525,6 +528,8 @@ bbArg.setType(desiredMemrefType); OpBuilder b(bbArg.getContext()); b.setInsertionPointToStart(bbArg.getOwner()); + assert(memref::CastOp::areCastCompatible(bbArg.getType(), memrefType) && + "layoutPostProcessing: cast incompatible"); // Cast back to the original memrefType and let it canonicalize. Value cast = b.create(funcOp.getLoc(), memrefType, bbArg); @@ -537,6 +542,10 @@ // such cases. auto castArg = [&](Operation *caller) { OpBuilder b(caller); + assert( + memref::CastOp::areCastCompatible( + caller->getOperand(argNumber).getType(), desiredMemrefType) && + "layoutPostProcessing.2: cast incompatible"); Value newOperand = b.create( funcOp.getLoc(), desiredMemrefType, caller->getOperand(argNumber)); operandsPerCaller.find(caller)->getSecond().push_back(newOperand); @@ -703,6 +712,9 @@ // that will either canonicalize away or fail compilation until we can do // something better. if (buffer.getType() != memRefType) { + assert( + memref::CastOp::areCastCompatible(buffer.getType(), memRefType) && + "CallOp::bufferize: cast incompatible"); Value castBuffer = rewriter.create(callOp.getLoc(), memRefType, buffer); buffer = castBuffer; diff --git a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp --- a/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Linalg/ComprehensiveBufferize/TensorInterfaceImpl.cpp @@ -77,6 +77,9 @@ } // Replace the op with a memref.cast. + assert(memref::CastOp::areCastCompatible(resultBuffer->getType(), + resultMemRefType) && + "CallOp::bufferize: cast incompatible"); replaceOpWithNewBufferizedOp(rewriter, op, resultMemRefType, *resultBuffer);