diff --git a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ComprehensiveBufferize.cpp @@ -10,7 +10,7 @@ // This is a specialized pass that supports inplace analysis for a fixed subset // of ops that have well-defined inplace semantics. // This pass caters to high-performance codegen where buffer reuse is deemed -// necessary: the pass should fail if the bufferized form of the function needs +// critical: the pass should fail if the bufferized form of the function needs // to return any buffer. // Generic control-flow and branching are unsupported. // Composability with extensible set of ops is not a first-class concern. @@ -24,53 +24,81 @@ // rewrites. New allocations are introduced during this step. // TODO: Allocation + depending op hoisting to outermost enclosing // sequential scope. -// c. at the end of this bufferization, 2 cases may occur: -// * inplaceable function arguments may be reused in place after the -// function itself has been bufferized. This is encoded by IR resembling: +// c. at the end of this bufferization, 3 cases may occur: +// i. inplaceable function arguments may be reused in place after the +// function itself has been bufferized. This is encoded by IR resembling: // -// ``` -// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> -// func @foo(%A: tensor {linalg.inplaceable = true}) -> tensor { -// %0 = memref.buffer_cast %A : memref -// // ... uses of %0 -// %res = memref.tensor_load %0 : memref -// return %res : tensor -// } -// ``` +// ``` +// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> +// func @foo(%A: tensor {linalg.inplaceable = true}) +// -> tensor { +// %0 = memref.buffer_cast %A : memref +// // ... uses of %0 +// %res = memref.tensor_load %0 : memref +// return %res : tensor +// } +// ``` // -// this is the cue for the bufferization of the function foo (and calls to -// it) may bufferize to `func @foo(%A: memref)`. -// To fully achieve bufferization, an additional analysis is needed to -// determine whether function argument/operand pairs bufferize to a single -// inplace buffer argument (i.e. functions may return tensors in arbitrary -// order that may not match argument numbers). -// * results that don't map to an inplaceable function argument must be -// allocated. Since memref semantics wrt ownership of the underlying -// memory region are not well-defined, comprehensive bufferization chooses -// to perform allocations in a scoped fashion: returning memrefs is always -// considered illegal. Such scenarios are encoded by IR resembling: +// this is the cue for the bufferization of the function foo (and calls +// to it) may bufferize to `func @foo(%A: memref)`. +// To fully achieve bufferization, an additional analysis is needed to +// determine whether function argument/operand pairs bufferize to a +// single inplace buffer argument (i.e. functions may return tensors in +// arbitrary order that may not match argument numbers). // -// ``` -// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> -// func @foo(%A: tensor {linalg.inplaceable = true}) -> tensor { -// %0 = memref.buffer_cast %A : memref -// %1 = memref.dim %0, %c0 : memref -// %2 = memref.alloc(%1) : memref -// %3 = memref.cast %2 : memref to memref -// // ... uses of %3 -// memref.dealloc %2 : memref -// %res = memref.tensor_load %3 : memref -// return %res : tensor -// } -// ``` +// ii. results that don't map to an inplaceable function argument are +// generally allocated. Since memref semantics wrt ownership of the +// underlying memory region are not well-defined, comprehensive +// bufferization chooses to perform allocations in a scoped fashion: +// returning memrefs is always considered illegal. +// Such scenarios are encoded by IR resembling: // -// this is the cue for the bufferization of the function foo (and calls to -// it) that it must bufferize to -// `func @foo(%A: memref, -// %B: memref)` (i.e. make a cloned -// allocation of the result tensor) -// To fully achieve bufferization, the alloc/dealloc pair must be lifted -// out of the function at each call site. +// ``` +// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> +// func @foo(%A: tensor {linalg.inplaceable = true}) +// -> tensor { +// %0 = memref.buffer_cast %A : memref +// %1 = memref.dim %0, %c0 : memref +// %2 = memref.alloc(%1) : memref +// %3 = memref.cast %2 : memref to memref +// // ... uses of %3 +// memref.dealloc %2 : memref +// %res = memref.tensor_load %3 : memref +// return %res : tensor +// } +// ``` +// +// this is the cue for the bufferization of the function foo (and calls +// to it) that it must bufferize to `func @foo(%A: memref, +// %B: memref)` (i.e. make a cloned +// allocation of the result tensor) +// To fully achieve bufferization, the alloc/dealloc pair must be lifted +// out of the function at each call site. +// +// iii. as an optimization over ii., it may be possible to reuse an argument +// and only want to return a subtensor. +// This may forego allocation by letting *all* callers decide whether to +// pass a new *aliasing* memref function argument (i.e. a subview). +// Without loss of generality, callers may agree to allocate a new buffer +// to avoid this aliasing. Such scenarios are encoded by IR resembling: +// +// ``` +// #map = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> +// func @foo(%arg0: tensor {linalg.inplaceable = true}) +// -> tensor<4xf32> { +// %0 = memref.buffer_cast %arg0 : memref +// %1 = memref.subview %0[0] [4] [1] : memref to +// memref<4xf32, #map> +// // ... inplace computes into %1 +// %3 = memref.tensor_load %1 : memref<4xf32, #map> +// return %3 : tensor<4xf32> +// } +// ``` +// +// Note: In the future, it may be worthwhile to design special bufferization +// ops to encode the desired semantics at function boundaries for i., ii. and +// iii. // // Lastly, note that layout map chosen to bufferize is the most dynamic // canonical strided layout of the proper rank. This ensures compatibility with @@ -78,7 +106,6 @@ // canonicalization are responsible for clean ups. #include "PassDetail.h" -#include "mlir/Analysis/SliceAnalysis.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Linalg/Passes.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" @@ -87,7 +114,10 @@ #include "mlir/Pass/Pass.h" #include "mlir/Transforms/BufferUtils.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/EquivalenceClasses.h" #include "llvm/ADT/ScopeExit.h" +#include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/TypeSwitch.h" @@ -98,77 +128,53 @@ using namespace tensor; #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ") +#define LDBG(X) LLVM_DEBUG(DBGS() << X) + +inline const char *endl(Value v) { return v.isa() ? "" : "\n"; } //===----------------------------------------------------------------------===// -// Op-specific semantics helper to retrieve matching inplaceable result. +// Bufferization-specific BlockAndValueMapping support with debugging. //===----------------------------------------------------------------------===// -/// Return the OpResult that matches an operand. -/// Return null if no such result exists. -OpResult getMatchingOpResult(LinalgOp linalgOp, OpOperand &opOperand) { - if (!opOperand.get().getType().isa()) - return OpResult(); - // For now assume inputs are never inplaceable. - // TODO: refine this. - if (opOperand.getOperandNumber() < linalgOp.getNumInputs()) - return OpResult(); - // For now assume if the operand appears twice, it is not inplaceable. - // TODO: refine this. - for (auto &opOperand2 : linalgOp->getOpOperands()) { - if (opOperand.getOperandNumber() == opOperand2.getOperandNumber()) - continue; - if (opOperand.get() == opOperand2.get()) - return OpResult(); - } - int64_t outputOperandIndex = - opOperand.getOperandNumber() - linalgOp.getNumInputs(); - int64_t numOutputBuffers = 0; - for (unsigned idx = 0; idx < outputOperandIndex; ++idx) - if (!linalgOp.getOutputOperand(idx)->get().getType().isa()) - ++numOutputBuffers; - return linalgOp->getResult(outputOperandIndex - numOutputBuffers); -} - -/// Return the OpResult that matches an operand. -/// Return null if no such result exists. -OpResult getMatchingOpResult(VectorTransferOpInterface op, - OpOperand &opOperand) { - if (opOperand.get() != op.source() || - !op.source().getType().isa()) - return OpResult(); - return op->getResult(0); +/// Wrapper for better debugging. +static void map(BlockAndValueMapping &bvm, ValueRange keys, ValueRange values) { + assert(!keys.empty() && "Unexpected empty keys"); + LDBG("Map: " << keys.front() << " to " << values.front() << "\n"); + return bvm.map(keys, values); } -/// Return the OpResult that matches an operand. -/// Return null if no such result exists. -OpResult getMatchingOpResult(SubTensorInsertOp op, OpOperand &opOperand) { - if (opOperand.get() != op.dest()) - return OpResult(); - return op->getResult(0); +/// Wrapper for better debugging. +static void map(BlockAndValueMapping &bvm, Value key, Value value) { + LDBG("Map: " << key << " to " << value << "\n"); + return bvm.map(key, value); } -/// Determine which results may be reused inplace by the bufferization -/// patterns of `bufferizeFuncOpInternals`. -/// The inplace analysis uses this information along with interfering read -/// analysis to determine which op results reuse the same buffer as some -/// operand. -OpResult getMatchingOpResult(OpOperand &opOperand) { - return llvm::TypeSwitch(opOperand.getOwner()) - // clang-format off - // Ops that perform destructive updates on operand(s) to produce - // result(s). - .Case( - [&](auto op) { return getMatchingOpResult(op, opOperand); }) - // Other ops. - .Case([&](auto op) { return OpResult(); }) - .Default([&](Operation *op) { return OpResult(); }); - // clang-format on +/// Wrapper for better debugging. +static Value lookup(BlockAndValueMapping &bvm, Value key) { + // TODO: if key comes from bbArg, forward. + assert(key.getType().isa()); + if (!bvm.lookupOrNull(key)) { + if (auto bbArg = key.dyn_cast()) { + if (isa(key.getParentBlock()->getParentOp())) + key.getParentBlock()->getParentOp()->dump(); + else + key.getParentBlock()->getParentOp()->getParentOfType()->dump(); + bbArg.getOwner()->getParentOp()->dump(); + } else { + key.getDefiningOp()->getParentOfType()->dump(); + } + llvm::errs() << "NO VALUE FOR KEY: " << key << "\n"; + return Value(); + } + return bvm.lookup(key); } //===----------------------------------------------------------------------===// // Bufferization-specific attribute manipulation. +// These could be simplified with helper structs on the side, for now attributes +// allow simple embedding in the IR which simplifies testing. +// This could also be folded in BufferizationAliasInfo or a Bufferizer class +// that uses BufferizationAliasInfo. //===----------------------------------------------------------------------===// /// Attribute marker to specify op results that can be bufferized inPlace. @@ -218,8 +224,8 @@ llvm::to_vector<4>(attr.getAsValueRange())) : SmallVector(op->getNumResults(), stringify(InPlaceSpec::None)); - LLVM_DEBUG(DBGS() << "Set inPlace=" << stringify(inPlace) << ": " << *op - << " @idx=" << opResult.getResultNumber() << "\n"); + LDBG("->set inPlace=" << stringify(inPlace) << ": " << *op + << " @idx=" << opResult.getResultNumber() << "\n"); inPlaceVector[opResult.getResultNumber()] = stringify(inPlace); op->setAttr(kInPlaceResultsAttrName, OpBuilder(op).getStrArrayAttr(inPlaceVector)); @@ -259,42 +265,603 @@ return attr.getValue() ? InPlaceSpec::True : InPlaceSpec::False; } +static InPlaceSpec getInPlace(Value v) { + if (auto bbArg = v.dyn_cast()) + return getInPlace(bbArg); + return getInPlace(v.cast()); +} + //===----------------------------------------------------------------------===// -// Bufferization-specific BlockAndValueMapping support with debugging. +// Op-specific semantics helper to retrieve matching inplaceable result. +// These should become proper interfaces interfaces when the time is right. +// Modulo better naming, these helpers / interfaces comprise information on: +// 1. Whether an op has a known bufferization behavior (i.e. an instance of +// BufferizableOpInterface). +// 2. Whether an op, when bufferized inplace, can guarantee an +// (OpOperand, OpResult) pair bufferizes to equivalent (i.e. the same) +// buffers in memory. +// 3. Whether an op operand, when bufferized inplace, aliases a return value. +// 4. Whether an op return value, when bufferized inplace, aliases an operand. +// 5. Wheher an op bufferizes to a memory read. +// 6. Wheher an op bufferizes to a memory write. +// These interfaces are necessary to distinguish between various cases and allow +// special inplace behavior for (SubTensorOp, SubTensorInsertOp) pairs. //===----------------------------------------------------------------------===// -/// Wrapper for better debugging. -static void map(BlockAndValueMapping &bvm, ValueRange keys, ValueRange values) { - assert(!keys.empty() && "Unexpected empty keys"); - LLVM_DEBUG(DBGS() << "Map: " << keys.front() << " to " << values.front() - << "\n"); - return bvm.map(keys, values); +/// Return `true` if the op is explicitly supported by bufferization or if it +/// has no result tensors. +/// Other cases must be conservative. +static bool hasKnownBufferizationAliasingBehavior(Operation *op) { + return + // clang-format off + isa(op) + // clang-format on + || llvm::none_of(op->getResultTypes(), + [](Type t) { return t.isa(); }); } -/// Wrapper for better debugging. -static void map(BlockAndValueMapping &bvm, Value key, Value value) { - LLVM_DEBUG(DBGS() << "Map: " << key << " to " << value << "\n"); - return bvm.map(key, value); +/// Return the OpResult that may bufferize into the same buffer as `opOperand` +/// when the op is bufferized inplace. +/// Return null if no such result exists. +static OpResult getInplaceableOpResult(LinalgOp linalgOp, + OpOperand &opOperand) { + if (!opOperand.get().getType().isa()) + return OpResult(); + // For now assume inputs are never inplaceable. + // TODO: refine this. + if (opOperand.getOperandNumber() < linalgOp.getNumInputs()) + return OpResult(); + int64_t outputOperandIndex = + opOperand.getOperandNumber() - linalgOp.getNumInputs(); + int64_t numOutputBuffers = 0; + for (unsigned idx = 0; idx < outputOperandIndex; ++idx) + if (!linalgOp.getOutputOperand(idx)->get().getType().isa()) + ++numOutputBuffers; + return linalgOp->getResult(outputOperandIndex - numOutputBuffers); } -/// Wrapper for better debugging. -static Value lookup(BlockAndValueMapping &bvm, Value key) { - // TODO: if key comes from bbArg, forward. - assert(key.getType().isa()); - if (!bvm.lookupOrNull(key)) { - if (auto bbArg = key.dyn_cast()) { - if (isa(key.getParentBlock()->getParentOp())) - key.getParentBlock()->getParentOp()->dump(); - else - key.getParentBlock()->getParentOp()->getParentOfType()->dump(); - bbArg.getOwner()->getParentOp()->dump(); - } else { - key.getDefiningOp()->getParentOfType()->dump(); +/// Return the OpResult that may bufferize into the same buffer as `opOperand` +/// when the op is bufferized inplace. +/// Return null if no such result exists. +static OpResult getInplaceableOpResult(VectorTransferOpInterface op, + OpOperand &opOperand) { + if (opOperand.get() != op.source() || + !op.source().getType().isa()) + return OpResult(); + return op->getResult(0); +} + +/// Return the OpResult that may bufferize into the same buffer as `opOperand` +/// when the op is bufferized inplace. +/// Return null if no such result exists. +static OpResult getInplaceableOpResult(SubTensorInsertOp op, + OpOperand &opOperand) { + if (opOperand.get() != op.dest()) + return OpResult(); + return op->getResult(0); +} + +/// Return the OpResult that may bufferize into the same buffer as `opOperand` +/// when the op is bufferized inplace. +/// The inplace analysis uses this information along with interfering read +/// analysis to determine which op results reuse the same buffer as some +/// operand. +static OpResult getInplaceableOpResult(OpOperand &opOperand) { + return llvm::TypeSwitch(opOperand.getOwner()) + // clang-format off + // Ops that perform destructive updates on operand(s) to produce + // result(s). + .Case( + [&](auto op) { return getInplaceableOpResult(op, opOperand); }) + // ReturnOp has no result. + .Case([&](auto op) { return OpResult(); }) + // SubTensorOp is special, when bufferized inplace it just returns an + // alias to its operand. Its result is never inplaceable on its operand. + .Case([&](auto op) { return OpResult(); }) + // Other ops. + .Default([&](Operation *op) { return OpResult(); }); + // clang-format on +} + +/// Determine which OpResult will alias with `opOperand` if the op is bufferized +/// in place. This is a superset of `getInplaceableOpResult`. +/// Return llvm::None if the owner of `opOperand` does not have known +/// bufferization aliasing behavior, which indicates that the op must allocate +/// all of its tensor results. +/// TODO: in the future this may need to evolve towards a list of OpResult. +static llvm::Optional getAliasingOpResult(OpOperand &opOperand) { + if (!hasKnownBufferizationAliasingBehavior(opOperand.getOwner())) + return llvm::None; + return llvm::TypeSwitch(opOperand.getOwner()) + // ReturnOp has no result. + .Case([&](auto op) { return OpResult(); }) + // SubTensorOp is different: its result is not inplaceable on op.source + // but when bufferized inplace, the result is an aliasing subregion of + // op.source. + .Case([&](SubTensorOp op) { return op->getResult(0); }) + .Default( + [&](Operation *op) { return getInplaceableOpResult(opOperand); }); +} + +/// Determine which OpOperand* will alias with `result` if the op is bufferized +/// in place. +/// Return llvm::None if the owner of `opOperand` does not have known +/// bufferization aliasing behavior, which indicates that the op must allocate +/// all of its tensor results. +/// TODO: in the future this may need to evolve towards a list of OpOperand*. +static llvm::Optional getAliasingOpOperand(OpResult result) { + if (!hasKnownBufferizationAliasingBehavior(result.getDefiningOp())) + return llvm::None; + return llvm::TypeSwitch(result.getDefiningOp()) + .Case([&](LinalgOp op) { + return op.getOutputTensorOperands()[result.getResultNumber()]; + }) + .Case([&](SubTensorOp op) { return &op->getOpOperand(0); }) + .Case([&](SubTensorInsertOp op) { return &op->getOpOperand(1); }) + .Case([&](vector::TransferWriteOp op) { return &op->getOpOperand(1); }) + .Default([&](Operation *op) { + llvm_unreachable("unexpected defining op"); + return nullptr; + }); +} + +/// Return true if `opOperand` bufferizes to a memory read. +static bool bufferizesToMemoryRead(OpOperand &opOperand) { + auto maybeOpResult = getAliasingOpResult(opOperand); + // Unknown op that returns a tensor. The inplace analysis does not support + // it. Conservatively return true. + if (!maybeOpResult) + return true; + // A ReturnOp is neither a read nor a write. + if (isa(opOperand.getOwner())) + return false; + // Supported op without a matching result for opOperand. + // This does not bufferize to a write so it bufferizes to an unused operand or + // to a read. Conservatively assume a read. + if (!*maybeOpResult) + return true; + // A SubTensorOp by itself does not bufferize to a read, some use may. + if (isa(*maybeOpResult->getDefiningOp())) + return false; + // If we have a matching OpResult, this may or may not be a read. + if (auto linalgOp = dyn_cast(opOperand.getOwner())) + return linalgOp.isInitTensor(&opOperand); + return true; +} + +/// Return true if `opOperand` bufferizes to a memory write. +/// If inPlace is passedas true, additionally requires the write to be inPlace. +static bool bufferizesToMemoryWrite(OpOperand &opOperand, + bool inPlace = false) { + auto maybeOpResult = getAliasingOpResult(opOperand); + // Unknown op that returns a tensor. The inplace analysis does not support + // it. Conservatively return true. + if (!maybeOpResult) + return true; + // A ReturnOp is neither a read nor a write. + if (isa(opOperand.getOwner())) + return false; + // Supported op without a matching result for opOperand. + // This does not bufferize to a write. + if (!*maybeOpResult) + return false; + // A SubTensorOp by itself does not bufferize to a write, some use may. + if (isa(*maybeOpResult->getDefiningOp())) + return false; + // If we have a matching OpResult, this is a write. + return !inPlace || getInPlace(*maybeOpResult) == InPlaceSpec::True; +} + +//===----------------------------------------------------------------------===// +// Bufferization-specific alias analysis. +//===----------------------------------------------------------------------===// + +/// Iteratively apply `callback` while walking def-use chains. +/// The forward walk follows a 1-1 operand-to-result matching based on which +/// operand actually bufferizes to a buffer that aliases the result. +static void +walkForwardWithCallback(Value root, + llvm::function_ref callback) { + LDBG("----Start walkForwardWithCallback:\n"); + llvm::SetVector worklist; + worklist.insert(root); + int start = 0, size = worklist.size(); + while (start != size) { + for (Value current : worklist.getArrayRef().drop_front(start)) { + LDBG("--------current value: " << current << endl(current)); + + // Propagate through all uses. + for (auto &use : current.getUses()) { + // Update current based on the op type. + auto maybeAliasingResult = getAliasingOpResult(use); + + // Op with unknown behavior. + // TODO: handle conservatively. + if (!maybeAliasingResult) + llvm_unreachable("unexpected unsupported op"); + + // Legitimately no aliasing result. + if (!*maybeAliasingResult) + continue; + + WalkResult walkResult = callback(*maybeAliasingResult); + if (walkResult.wasSkipped()) + continue; + if (walkResult.wasInterrupted()) + return; + + worklist.insert(*maybeAliasingResult); + } } - llvm::errs() << "NO VALUE FOR KEY: " << key << "\n"; - return Value(); + start = size; + size = worklist.size(); } - return bvm.lookup(key); +} + +namespace { + +/// The BufferizationAliasInfo class maintains a list of buffer aliases and +/// equivalence classes to support bufferization. +/// SubTensorOps have special behavior, they act as a level of indirection for +/// bufferization. They don't create reads or writes themselves and analysis +/// needs to look through their uses. +/// SubTensorOp + SubTensorInsertOp have special joint behavior: they may +/// bufferize to the same buffer (i.e. subview), which is what introduces the +/// need for bufferization classes. +/// Some of these functionalities could be refactored in a Bufferizer class that +/// uses BufferizationAliasInfo. +class BufferizationAliasInfo { +public: + BufferizationAliasInfo(FuncOp funcOp); + + /// Return true if the buffer to which `operand` would bufferize is equivalent + /// to some use that would bufferize to a write to a buffer. + bool aliasesNonWriteableBuffer(OpOperand &operand); + + /// Return true if the buffer to which `operand` would bufferize is equivalent + /// to some use that would bufferize to a write to a buffer. + bool aliasesInPlaceWrite(SubTensorOp subTensorOp); + + // Merge result's and operand's aliasing sets and iterates to a fixed point. + // Additionally sets them to be equivalent if `setEquivalent` is true. + void bufferizeInPlace(OpResult result, OpOperand &operand, + bool setEquivalent = false); + + // Find an inplace write W among the uses of aliasInfo[rootWrite], that + // interferes with a read R among the uses of aliasInfo[rootRead]. + // Such a (W, R) pair is an interference to the inplace bufferization of + // rootWrite when R does not properly dominate W. + bool existsReadWriteInterference(Value rootWrite, Value rootRead, + const DominanceInfo &domInfo); + + // Return true if we find any read to opOperand.get() or any of its aliases, + // that does not dominate opOperand.getOwner(). + bool existsNonDominatingRead(OpOperand &opOperand, + const DominanceInfo &domInfo); + + // Print to `os`. + void print(llvm::raw_ostream &os); + + // Print to `llvm::errs()`. + inline void dump() { print(llvm::errs()); } + +private: + // Check aliasInfo for `v` exists and return a reference to it. + llvm::DenseSet &getAliasInfoRef(Value v); + + // Iteratively merge alias sets until a fixed-point. + void iterateToFixedPoint(); + + // Union all the aliasing sets of all aliases of v1 and v2. + bool merge(Value v1, Value v2); + + // This is one particular type of relationship between ops on tensors that + // reduce to an equivalence on buffers. This should be generalized and exposed + // as interfaces on the proper types. + bool equivalentSubTensorOps(SubTensorOp st, SubTensorInsertOp sti); + + // Helper function to traverse + void buildAliasInfo(Value root); + + // Auxiliary structure to store all the values agiven value aliases with. + // TODO: improve alias computation by + DenseMap> aliasInfo; + + // EquivalenceClasses wants comparable elements because it uses std::set. + // ValueWrapper wraps Value and uses pointer comparison on the defining op. + // This is a poor man's comparison but it's not like UnionFind needs ordering + // anyway .. + struct ValueWrapper { + ValueWrapper(Value val) : v(val) {} + operator Value() const { return v; } + bool operator<(const ValueWrapper &wrap) const { + return v.getImpl() < wrap.v.getImpl(); + } + bool operator==(const ValueWrapper &wrap) const { return v == wrap.v; } + Value v; + }; + // Auxiliary structure to store all the equivalent buffer classes. + llvm::EquivalenceClasses equivalentBuffers; +}; +} // namespace + +BufferizationAliasInfo::BufferizationAliasInfo(FuncOp funcOp) { + for (auto bbArg : funcOp.getArguments()) { + if (!bbArg.getType().isa()) + continue; + buildAliasInfo(bbArg); + LDBG("insert: " << bbArg << endl(bbArg)); + equivalentBuffers.insert(bbArg); + } + funcOp.walk([&](Operation *op) { + for (Value v : op->getResults()) { + if (!v.getType().isa()) + continue; + buildAliasInfo(v); + LDBG("insert: " << v << endl(v)); + equivalentBuffers.insert(v); + } + }); + iterateToFixedPoint(); + LLVM_DEBUG(dump()); +} + +/// Return true if the buffer to which `operand` would bufferize is equivalent +/// to some use that would bufferize to a write to a buffer. +bool BufferizationAliasInfo::aliasesNonWriteableBuffer(OpOperand &operand) { + LDBG("----Start aliasesNonWriteableBuffer\n"); + LDBG("-------for operand #" << operand.getOperandNumber() << ": " + << *(operand.getOwner()) << "\n"); + for (Value v : getAliasInfoRef(operand.get())) { + LDBG("-----------examine: " << v << endl(v)); + if (auto bbArg = v.dyn_cast()) { + // Uses of function arguments that may be written to can be skipped. + if (isa(bbArg.getOwner()->getParentOp()) && + getInPlace(bbArg) == InPlaceSpec::True) { + LDBG("-----------bbArg is writeable -> skip: " << bbArg); + continue; + } + // Conservatively dump any other block argument for now. + LDBG("-----------notWriteable: " << v << endl(v)); + return true; + } + + if (Operation *op = v.getDefiningOp()) { + if (isa(op) || !hasKnownBufferizationAliasingBehavior(op)) { + LDBG("-----------notWriteable: " << v << endl(v)); + return true; + } + } + } + LDBG("---->operand is writeable\n"); + return false; +} + +/// Return true if the buffer to which `operand` would bufferize is equivalent +/// to some use that would bufferize to a write to a buffer. +bool BufferizationAliasInfo::aliasesInPlaceWrite(SubTensorOp subTensorOp) { + LDBG("----Start isEquivalentToInPlaceWrite\n"); + LDBG("-------for op: " << *subTensorOp.getOperation() << "\n"); + for (Value v : getAliasInfoRef(subTensorOp.result())) { + for (auto &use : v.getUses()) { + if (bufferizesToMemoryWrite(use, /*inPlace=*/true)) { + LDBG("-----------wants to bufferize to inPlace write: " + << *use.getOwner() << "\n"); + return true; + } + } + } + LDBG("----------->subtensor does not alias an inplace write"); + return false; +} + +// Merge result's and operand's aliasing sets and iterates to a fixed point. +// Additionally sets them to be equivalent if `setEquivalent` is true. +void BufferizationAliasInfo::bufferizeInPlace(OpResult result, + OpOperand &operand, + bool setEquivalent) { + if (merge(result, operand.get())) + iterateToFixedPoint(); + if (setEquivalent) + equivalentBuffers.unionSets(result, operand.get()); + LLVM_DEBUG(dump()); +} + +// Find an inplace write W among the uses of aliasInfo[rootWrite], that +// interferes with a read R among the uses of aliasInfo[rootRead]. +// Such a (W, R) pair is an interference to the inplace bufferization of +// rootWrite when R does not properly dominate W. +bool BufferizationAliasInfo::existsReadWriteInterference( + Value rootWrite, Value rootRead, const DominanceInfo &domInfo) { + LDBG("----Start existsReadWriteInterference\n"); + llvm::DenseSet usesWrite; + auto &aliasListWrite = getAliasInfoRef(rootWrite); + for (Value vWrite : aliasListWrite) + for (auto &uWrite : vWrite.getUses()) + usesWrite.insert(&uWrite); + + llvm::DenseSet usesRead; + auto &aliasListRead = getAliasInfoRef(rootRead); + for (Value vRead : aliasListRead) + for (auto &uRead : vRead.getUses()) + usesRead.insert(&uRead); + + for (OpOperand *uWrite : usesWrite) { + // If the use of rootWrite does not bufferize to a write, skip. + if (!bufferizesToMemoryWrite(*uWrite, /*inPlace=*/true)) + continue; + LDBG("----++++aliasWrite #" << uWrite->getOperandNumber() + << " in: " << *uWrite->getOwner() << "\n"); + for (OpOperand *uRead : usesRead) { + // Don't consider self-use. + if (uWrite == uRead) + continue; + // If the use of rootRead does not bufferize to a read, skip. + if (!bufferizesToMemoryRead(*uRead)) + continue; + LDBG("---- aliasRead #" << uRead->getOperandNumber() + << " in: " << *uRead->getOwner() << "\n"); + Operation *opWrite = uWrite->getOwner(); + Operation *opRead = uRead->getOwner(); + // Self W interference between different operands must be treated + // carefuly. For now remain conservative. + if (opRead == opWrite) { + if (equivalentSubTensorOps(rootRead.getDefiningOp(), + dyn_cast(opWrite))) { + LDBG("---->matching subtensors: " << *opWrite << "\n"); + continue; + } + LDBG("---->found interfering inplace self-write: " << *opWrite << "\n"); + LDBG(" Root value to write: " << rootWrite << endl(rootWrite)); + LDBG(" Root value to read: " << rootRead << endl(rootRead)); + return true; + } + // If opRead properly dominates opWrite, the read cannot be affected + // by the write: there is no interference. + if (domInfo.properlyDominates(opRead, opWrite)) + continue; + // At this point, opWrite properly dominates opRead or there is no + // clear dominance and we need to be conservative. + LDBG("---->found RW interference on : " << *opWrite << "\n"); + LDBG(" Root value to write: " << rootWrite << endl(rootWrite)); + LDBG(" with interfering write: " << *opWrite << "\n"); + LDBG(" Root value to read: " << rootRead << endl(rootRead)); + LDBG(" with interfering read: " << *opRead << "\n"); + return true; + } + } + LDBG("----No interference found\n"); + return false; +} + +// Return true if we find any read to opOperand.get() or any of its aliases, +// that does not dominate opOperand.getOwner(). +bool BufferizationAliasInfo::existsNonDominatingRead( + OpOperand &opOperand, const DominanceInfo &domInfo) { + LDBG("----Start existsNonDominatingRead\n"); + Operation *op = opOperand.getOwner(); + for (Value alias : getAliasInfoRef(opOperand.get())) { + for (OpOperand &wantReadUse : alias.getUses()) { + LDBG("--------current operand #" << wantReadUse.getOperandNumber() << ": " + << *(wantReadUse.getOwner()) << "\n"); + if (!bufferizesToMemoryRead(wantReadUse)) { + LDBG("------------not a read -> skip\n"); + continue; + } + if (&wantReadUse == &opOperand) { + LDBG("------------self-read is not an interference -> skip\n"); + continue; + } + if (domInfo.properlyDominates(wantReadUse.getOwner(), op)) { + LDBG("------------read properly dominates -> skip\n"); + continue; + } + LDBG("------------found interfering read -> stop\n"); + return true; + } + } + return false; +} + +void BufferizationAliasInfo::print(llvm::raw_ostream &os) { + os << "\n/========================== AliasInfo " + "==========================\n"; + for (auto it : aliasInfo) { + os << "|\n| -- source: " << it.getFirst() << endl(it.getFirst()); + for (auto v : it.getSecond()) + os << "| ---- target: " << v << endl(v); + } + os << "|\n\\====================== End AliasInfo " + "======================\n\n"; + os << "\n/********************* Equivalent Buffers *********************\n"; + for (auto it = equivalentBuffers.begin(), eit = equivalentBuffers.end(); + it != eit; ++it) { + if (!it->isLeader()) + continue; + Value leader = it->getData(); + os << "|\n| -- leader: " << leader << endl(leader); + for (auto mit = equivalentBuffers.member_begin(it), + meit = equivalentBuffers.member_end(); + mit != meit; ++mit) { + Value v = static_cast(*mit); + os << "| ---- equivalent member: " << v << endl(v); + } + } + os << "|\n\\***************** End Equivalent Buffers *****************\n\n"; +} + +llvm::DenseSet &BufferizationAliasInfo::getAliasInfoRef(Value v) { + auto it = aliasInfo.find(v); + if (it == aliasInfo.end()) { + llvm::errs() << "No aliasInfo for " << v << "\n"; + llvm_unreachable("Missing alias"); + } + return it->getSecond(); +} + +// Iteratively merge alias sets until a fixed-point. +void BufferizationAliasInfo::iterateToFixedPoint() { + while (true) { + bool changed = false; + for (auto it : aliasInfo) + for (auto v : it.getSecond()) + changed |= merge(it.getFirst(), v); + if (!changed) + break; + } +} + +// Union all the aliasing sets of all aliases of v1 and v2. +bool BufferizationAliasInfo::merge(Value v1, Value v2) { + bool changed = false; + for (auto v : getAliasInfoRef(v1)) + changed |= llvm::set_union(getAliasInfoRef(v), getAliasInfoRef(v2)); + for (auto v : getAliasInfoRef(v2)) + changed |= llvm::set_union(getAliasInfoRef(v), getAliasInfoRef(v1)); + return changed; +} + +// This is one particular type of relationship between ops on tensors that +// reduce to an equivalence on buffers. This should be generalized and exposed +// as interfaces on the proper types. +bool BufferizationAliasInfo::equivalentSubTensorOps(SubTensorOp st, + SubTensorInsertOp sti) { + if (!st || !sti) + return false; + if (!equivalentBuffers.isEquivalent(st.source(), sti.dest())) + return false; + if (!sameOffsetsSizesAndStrides(st, sti, isEqualConstantIntOrValue)) + return false; + if (!equivalentBuffers.isEquivalent(st.result(), sti.source())) + return false; + return true; +} + +void BufferizationAliasInfo::buildAliasInfo(Value root) { + if (!root.getType().isa()) + return; + + LDBG("----Start buildAliasInfo for " << root << endl(root)); + Value terminalValue; + llvm::DenseSet rootAliasInfo; + rootAliasInfo.insert(root); + auto callback = [&](OpResult opResult) { + if (getInPlace(opResult) != InPlaceSpec::True) { + LDBG("--------only inplace creates alias; skip: " << opResult << "\n"); + return WalkResult::skip(); + } + rootAliasInfo.insert(opResult); + return WalkResult::advance(); + }; + + walkForwardWithCallback(root, callback); + aliasInfo.try_emplace(root, rootAliasInfo); } //===----------------------------------------------------------------------===// @@ -425,7 +992,7 @@ // If output tensor is marked inPlace, just use the buffer. // The following uses internal knowledge of the position of tied operand / // results. - OpResult tiedResult = getMatchingOpResult(op, *opOperand); + OpResult tiedResult = getInplaceableOpResult(*opOperand); if (getInPlace(tiedResult) == InPlaceSpec::True) { Value v = lookup(bvm, output); if (!v) @@ -480,7 +1047,7 @@ if (op.hasBufferSemantics()) return failure(); - LLVM_DEBUG(DBGS() << "bufferize: " << *op << "\n"); + LDBG("bufferize: " << *op << "\n"); b.setInsertionPoint(op); Location loc = op.getLoc(); @@ -558,11 +1125,11 @@ /// Bufferize SubTensorOp to subview with optional alloc + copy depending on /// whether or not it is marked inplaceable. -/// Note that `getMatchingOpResult` on a SubTensorOp always returns null. +/// Note that `getInplaceableOpResult` on a SubTensorOp always returns null. /// As consequence a SubTensorOp always alloc + copy when taken in isolation. static LogicalResult bufferize(OpBuilder &b, SubTensorOp subTensorOp, BlockAndValueMapping &bvm) { - LLVM_DEBUG(DBGS() << "bufferize: " << *subTensorOp << "\n"); + LDBG("bufferize: " << *subTensorOp << "\n"); // Take a guard before anything else. OpBuilder::InsertionGuard g(b); @@ -608,7 +1175,7 @@ static LogicalResult bufferize(OpBuilder &b, SubTensorInsertOp subTensorInsertOp, BlockAndValueMapping &bvm) { - LLVM_DEBUG(DBGS() << "bufferize: " << *subTensorInsertOp << "\n"); + LDBG("bufferize: " << *subTensorInsertOp << "\n"); // Take a guard before anything else. OpBuilder::InsertionGuard g(b); @@ -656,8 +1223,8 @@ else inPlaceProducer = getInPlace(source.cast()); if (inPlaceProducer != InPlaceSpec::True) { - LLVM_DEBUG(DBGS() << "subtensor_insert needs extra source copy: " << source - << " -> copy\n"); + LDBG("subtensor_insert needs extra source copy: " << source + << " -> copy\n"); // Take a subview of the dst. Value subView = b.create( loc, subviewMemRefType, dstMemref, subTensorInsertOp.getMixedOffsets(), @@ -680,7 +1247,7 @@ if (op.getShapedType().isa()) return failure(); - LLVM_DEBUG(DBGS() << "bufferize: " << *op << "\n"); + LDBG("bufferize: " << *op << "\n"); /// transfer_read from buffer always reads from the bufferized op.source(). if (auto readOp = dyn_cast(op.getOperation())) { @@ -723,169 +1290,172 @@ } //===----------------------------------------------------------------------===// -// Functions and calls bufferization support. +// Bufferization analyses. //===----------------------------------------------------------------------===// -/// Determine whether any subsequent read of the tensor `opOperand` may occur. -/// For now, this assumes any use is a read. If any use of the tensor does not -/// properly dominate `opOperand.getOwner()`, then the tensor cannot be -/// bufferized inPlace. -// TODO: For now, this assumes any use is a read. Refine this. -bool hasInterferingTensorRead(OpOperand &opOperand, - const DominanceInfo &domInfo) { - if (!opOperand.get().getType().isa()) - return false; - for (auto &use : opOperand.get().getUses()) { - Operation *user = use.getOwner(); - // If properly dominate, there is a clear sequence point and we can dismiss - // read. - if (domInfo.properlyDominates(user, opOperand.getOwner())) - continue; - // Otherwise, we need to analyze self-dependencies, for now just let it go. - // TODO: proper self-dependence analysis. - if (domInfo.dominates(user, opOperand.getOwner())) - continue; - if (user == opOperand.getOwner() && - use.getOperandNumber() == opOperand.getOperandNumber()) - continue; - LLVM_DEBUG(DBGS() << "found interfering read operand #" - << opOperand.getOperandNumber() - << " in op: " << *opOperand.getOwner() << "\n"); - return true; - } - LLVM_DEBUG(DBGS() << "no interfering read\n"); - return false; -} - -/// Return false if either: -/// 1. `opOperand` is produced by a constant op. For now this is assumed to be -/// bufferized to a GlobalMemrefOp that cannot be written. Generalize in the -/// future. -/// 2.`opOperand` is a BlockArgument of a FuncOp that is not known to be -/// bufferizable inplace. -/// Return true otherwise. -static bool bufferizeToWriteable(OpOperand &opOperand) { - // Constant tensors are deemed not bufferizable for now. - if (auto constantOp = - dyn_cast_or_null(opOperand.get().getDefiningOp())) - return !constantOp.getResult().getType().isa(); - if (auto bbArg = opOperand.get().dyn_cast()) { - // Uses of function arguments that may not be written-to need to be copied. - // If the function argument itself is not inplaceable, early return false. - // If is is inplaceable, interfering tensor read need to be checked. - // - // TODO: better propagate the fact that we want a single clone inside the - // function. Atm every user that wants to write inplace will create its own - // alloc, irrespective of whether or not interfering reads occur. - if (isa(bbArg.getOwner()->getParentOp())) { - if (getInPlace(bbArg) != InPlaceSpec::True) - return false; - } else { - // Conservatively dump any other block argument for now. - return false; - } - } - return true; -} - -/// Return false if either: -/// 1. `opOperand` is produced by a constant op. For now this is assumed to be -/// bufferized to a GlobalMemrefOp that cannot be written. Generalize in the -/// future. -/// 2.`opOperand` is a BlockArgument of a FuncOp that is not known to be -/// bufferizable inplace. -/// 3.`opOperand` has an interfering tensor read. -/// Return true otherwise. -static bool isBufferizableInPlace(OpOperand &opOperand, - const DominanceInfo &domInfo) { - return bufferizeToWriteable(opOperand) && - !hasInterferingTensorRead(opOperand, domInfo); -} - -/// Return true if `operand` bufferizes to a buffer that is known to never be -/// written. -static bool bufferizeToReadOnly(OpOperand &operand) { - return llvm::TypeSwitch(operand.getOwner()) - .Case([&](LinalgOp linalgOp) { return linalgOp.isInputTensor(&operand); }) - .Default([&](Operation *op) { return false; }); -} - -/// Assume operand is a use of a `subTensorOp`. -/// Return true if this use bufferizes to a buffer that is known to never be -/// written. -/// Note: This function takes into consideration uses of subTensorOp and whether -/// the owner of those uses is inplaceable. This needs to be run in postorder to -/// provide the most accurate analysis; otherwise it is conservative. -static bool subTensorUseBufferizesToReadOnly(OpOperand &operand) { - assert(operand.get().getDefiningOp() && "expected subtensor op"); - if (auto subTensorInsertOp = - dyn_cast(operand.getOwner())) { - return operand.getOperandNumber() == 0 /* source of the subTensorInsert*/ && - // If the subTensorInsertOp is not inplace, there is no possible - // internal aliasing with subTensorOp, which is inplaceable. - getInPlace(subTensorInsertOp->getResult(0)) != InPlaceSpec::True; +/// Assume all other ops that are not SubTensorOp have been analyzed already and +/// are marked with an inplace attribute. +/// A SubTensorOp-specific analysis is necessary because it has special +/// semantics from the point of view of inplace bufferization: +/// - when bufferized inplace, it just turns into a subview and aliases with +/// everything its source aliases. This can only be done if a number of +/// more stringent conditions are met on the resulting aliases. +/// - when bufferized out of place, it performs an alloc + copy. +/// This could be merged with the analysis for other ops but it is clearer as a +/// standalone function. +static void bufferizableInPlaceAnalysis(SubTensorOp subTensorOp, + BufferizationAliasInfo &aliasInfo, + const DominanceInfo &domInfo) { + LDBG("\n"); + LDBG("Try to bufferize subtensor inplace: " << *subTensorOp << "\n"); + + // If `subTensorOp` were to be bufferized inplace, it cannot end up aliasing + // a write into a non-writeable buffer. + bool createsAliasingWriteToNonWriteableBuffer = + aliasInfo.aliasesInPlaceWrite(subTensorOp) && + aliasInfo.aliasesNonWriteableBuffer(subTensorOp->getOpOperand(0)); + + if (createsAliasingWriteToNonWriteableBuffer) + LDBG("->the corresponding buffer is not writeable\n"); + LDBG("->bufferizes to writeable inplace buffer\n"); + + // In any of subTensorOp.result's aliases, can we find 2 such that we hit an + // interfering write? + Value s = subTensorOp.source(), r = subTensorOp.result(); + bool foundInterference = + createsAliasingWriteToNonWriteableBuffer || + aliasInfo.existsReadWriteInterference(s, s, domInfo) || + aliasInfo.existsReadWriteInterference(s, r, domInfo) || + aliasInfo.existsReadWriteInterference(r, s, domInfo) || + aliasInfo.existsReadWriteInterference(r, r, domInfo); + if (foundInterference) { + setInPlaceOpResult(subTensorOp->getResult(0), InPlaceSpec::False); + } else { + setInPlaceOpResult(subTensorOp->getResult(0), InPlaceSpec::True); + // When SubTensor bufferizes inplace, the result is not an equivalent buffer + // (unless some trivial canonicalization that should have happend already). + // So we leave the `setEquivalent` argument to its default `false` value. + aliasInfo.bufferizeInPlace(subTensorOp->getResult(0), + subTensorOp->getOpOperand(0)); } - return bufferizeToReadOnly(operand); + LDBG("Done bufferizing subtensor\n"); } -/// Return true if `dominator.getOwner()` dominates all other uses of -/// `dominator.get()`. -static bool dominatesAllOtherUses(OpOperand &dominator, - const DominanceInfo &domInfo) { - for (OpOperand &use : dominator.get().getUses()) { - // Same use. - if (use.getOwner() == dominator.getOwner() && - use.getOperandNumber() == dominator.getOperandNumber()) - continue; - if (!domInfo.properlyDominates(dominator.getOwner(), use.getOwner())) - return false; +/// Analyze the (opOperand, result) pair to determine whether the result can +/// be bufferized inPlace. If successful, InPlaceSpec::True is set for +/// `result`. Otherwise, InPlaceSpec::False is set for `result`. +static void bufferizableInPlaceAnalysis(OpOperand &opOperand, OpResult result, + BufferizationAliasInfo &aliasInfo, + const DominanceInfo &domInfo) { + assert(result && !isa(result.getDefiningOp()) && + "expected OpResult not coming from a SubTensorOp"); + + auto operandNumber = opOperand.getOperandNumber(); + auto resultNumber = result.getResultNumber(); + LDBG("\n"); + LDBG("Try to bufferize inplace result #" << resultNumber << " (operand #" + << operandNumber << ") in " << result + << "\n"); + + // `result` must bufferize to a writeable buffer to be a candidate. + // This means the use->def chain not backpropagate to a function that is not + // inplaceable or to a constant op to be considered. + bool createsAliasingWriteToNonWriteableBuffer = + aliasInfo.aliasesNonWriteableBuffer(opOperand); + if (createsAliasingWriteToNonWriteableBuffer) + LDBG("->the corresponding buffer is not writeable\n"); + LDBG("->bufferizes to writeable inplace buffer\n"); + + Value s = opOperand.get(), r = result; + bool foundInterference = + createsAliasingWriteToNonWriteableBuffer || + aliasInfo.existsNonDominatingRead(opOperand, domInfo) || + // if (!foundInterference) { + // // SubTensorInsertOp require additional checks. + // if (isa(opOperand.getOwner())) { + // foundInterference = + aliasInfo.existsReadWriteInterference(s, s, domInfo) || + aliasInfo.existsReadWriteInterference(s, r, domInfo) || + aliasInfo.existsReadWriteInterference(r, s, domInfo) || + aliasInfo.existsReadWriteInterference(r, r, domInfo); + // } + // } + + if (foundInterference) { + setInPlaceOpResult(result, InPlaceSpec::False); + } else { + setInPlaceOpResult(result, InPlaceSpec::True); + aliasInfo.bufferizeInPlace(result, opOperand, /*setEquivalent=*/true); } - return true; -} - -/// SubTensorOp introduces potential aliasing and a combination of things need -/// to occur to determine whether it is inplaceable. -static void analyzeInPlaceSubTensor(SubTensorOp subTensorOp, - const DominanceInfo &domInfo) { - // Case 1: - // a. All uses are known to bufferize to readonly buffers. - // b. The source has no use that is not dominated by subTensorOp. - // This can skip bufferizeToWriteable analysis / function boundary annotation. - if (llvm::all_of(subTensorOp.result().getUses(), - subTensorUseBufferizesToReadOnly) && - dominatesAllOtherUses(subTensorOp->getOpOperand(0), domInfo)) - return setInPlaceOpResult(subTensorOp->getResult(0), InPlaceSpec::True); - - // TODO: Implement more advanced use cases.There is a notion of transitivity - // and interference sets lurking. + LDBG("Done bufferizing result #" << resultNumber << "\n"); } /// Analyze the internals of a FuncOp to determine inplaceable ops. static void inPlaceAnalysisFuncOpInternals(FuncOp funcOp, + BufferizationAliasInfo &aliasInfo, const DominanceInfo &domInfo) { + LLVM_DEBUG(llvm::dbgs() << "\n\n"); + LDBG("Begin InPlaceAnalysisFuncOpInternals:\n" << funcOp << "\n"); assert(funcOp && funcOp->getNumRegions() > 0 && !funcOp.body().empty() && "expected a funcOp definition with a body"); - funcOp.walk([&](Operation *op) { - // Skip SubTensorOp in a first pass. - if (auto subTensorOp = dyn_cast(op)) - return analyzeInPlaceSubTensor(subTensorOp, domInfo); + // Bufferize SubTensorInsertOp greedily: we almost never want to bufferize + // the tensor "inserted into" to become out-of-place. This implementation + // does not distinguish between different SubTensorInsertOps. If we want + // finer-grained behavior, we could order the SubTensorInsertOps with some + // metric. + SmallVector subTensorInsertOps; + funcOp.walk([&](SubTensorInsertOp subTensorInsertOp) { + subTensorInsertOps.push_back(subTensorInsertOp); + }); + // Walk SubTensorInsertOps in reverse. + for (SubTensorInsertOp subTensorInsertOp : + llvm::reverse(subTensorInsertOps)) { + OpOperand &destOpOperand = subTensorInsertOp->getOpOperand(1); + bufferizableInPlaceAnalysis(destOpOperand, + getInplaceableOpResult(destOpOperand), + aliasInfo, domInfo); + } - // All other ops are checked for `isBufferizableInPlace`. + // Bufferize all ops except SubTensorOp and SubTensorInsertOp which are + // handled separately. + SmallVector ops; + funcOp.walk([&](Operation *op) { + if (isa(op)) + return; + auto isaTensor = [](Type t) { return t.isa(); }; + // No tensors => no buffers. + if (llvm::none_of(op->getOperandTypes(), isaTensor) && + llvm::none_of(op->getResultTypes(), isaTensor)) + return; + ops.push_back(op); + }); + // Walk other ops in reverse. + for (Operation *op : llvm::reverse(ops)) { for (OpOperand &opOperand : op->getOpOperands()) { - OpResult result = getMatchingOpResult(opOperand); - if (result && isBufferizableInPlace(opOperand, domInfo)) { - LLVM_DEBUG(DBGS() << "bufferizable inplace operand #" - << opOperand.getOperandNumber() << " in " << *op); - setInPlaceOpResult(result); - } + OpResult result = getInplaceableOpResult(opOperand); + if (!result) + continue; + bufferizableInPlaceAnalysis(opOperand, result, aliasInfo, domInfo); } + } + + // Finally, bufferize SubTensorOp top-down by walking the IR forward. + funcOp.walk([&](SubTensorOp subTensorOp) { + bufferizableInPlaceAnalysis(subTensorOp, aliasInfo, domInfo); }); + + LDBG("End InPlaceAnalysisFuncOpInternals:\n" << funcOp << "\n"); } -static LogicalResult bufferizeFuncOpInternals( - FuncOp funcOp, BlockAndValueMapping &bvm, - const DenseMap> &tiedResultsMap) { +//===----------------------------------------------------------------------===// +// Bufferization entry-point. +//===----------------------------------------------------------------------===// + +static LogicalResult bufferizeFuncOpInternals(FuncOp funcOp, + BlockAndValueMapping &bvm) { + LLVM_DEBUG(llvm::dbgs() << "\n\n"); + LDBG("Begin BufferizeFuncOpInternals:\n" << funcOp << "\n"); OpBuilder b(funcOp->getContext()); /// Start by bufferizing `funcOp` arguments. if (failed(bufferize(b, funcOp, bvm))) @@ -919,6 +1489,8 @@ } return WalkResult::advance(); }); + LDBG("End BufferizeFuncOpInternals:\n" << funcOp << "\n"); + if (result.wasInterrupted()) return failure(); return success(); @@ -938,27 +1510,22 @@ void LinalgComprehensiveFuncBufferize::runOnFunction() { auto funcOp = getFunction(); + + // Analysis phase. DominanceInfo domInfo(funcOp); - BlockAndValueMapping bvm; - DenseMap> tiedResultsMap; - LLVM_DEBUG(llvm::dbgs() << "\n\n"); - LLVM_DEBUG(DBGS() << "Begin InPlaceAnalysisFuncOpInternals:\n" - << funcOp << "\n"); - inPlaceAnalysisFuncOpInternals(funcOp, domInfo); - LLVM_DEBUG(DBGS() << "End InPlaceAnalysisFuncOpInternals:\n" - << funcOp << "\n"); + BufferizationAliasInfo aliasInfo(funcOp); + inPlaceAnalysisFuncOpInternals(funcOp, aliasInfo, domInfo); if (testAnalysisOnly) return; - LLVM_DEBUG(llvm::dbgs() << "\n\n"); - LLVM_DEBUG(DBGS() << "Begin BufferizeFuncOpInternals:\n" << funcOp << "\n"); + // Bufferization phase. + BlockAndValueMapping bvm; auto guard = llvm::make_scope_exit([&] { funcOp.walk( [&](Operation *op) { op->removeAttr(kInPlaceResultsAttrName); }); - LLVM_DEBUG(DBGS() << "End BufferizeFuncOpInternals:\n" << funcOp << "\n"); }); - if (failed(bufferizeFuncOpInternals(funcOp, bvm, tiedResultsMap))) + if (failed(bufferizeFuncOpInternals(funcOp, bvm))) signalPassFailure(); } diff --git a/mlir/test/Dialect/Linalg/comprehensive-foo.mlir b/mlir/test/Dialect/Linalg/comprehensive-foo.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Linalg/comprehensive-foo.mlir @@ -0,0 +1,24 @@ +// RUN: mlir-opt %s -linalg-comprehensive-func-bufferize=test-analysis-only -mlir-disable-threading -debug -split-input-file +//| FileCheck %s + +// CHECK-LABEL: func @nested_subtensor_and_insert +func @subtensor_to_linalg_write_use( + %A : tensor, + %B : tensor {linalg.inplaceable = true}, + %idx : index) + -> (tensor, tensor) +{ + // %sA = subtensor %A[0, 0][%idx, %idx][1, 1] : tensor to tensor + // %ssA = subtensor %sA[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + // %rsA = subtensor_insert %ssA into %sA[0, 0][4, 4][1, 1] : tensor<4x4xf32> into tensor + // %rA = subtensor_insert %rsA into %A[0, 0][%idx, %idx][1, 1] : tensor into tensor + + %sB = subtensor %B[0, 0][%idx, %idx][1, 1] : tensor to tensor + %ssB = subtensor %sB[0, 0][4, %idx][1, 1] : tensor to tensor<4x?xf32> + %sssB = subtensor %ssB[0, 0][4, 4][1, 1] : tensor<4x?xf32> to tensor<4x4xf32> + %rssB = subtensor_insert %sssB into %ssB[0, 0][4, 4][1, 1] : tensor<4x4xf32> into tensor<4x?xf32> + %rsB = subtensor_insert %rssB into %sB[0, 0][4, %idx][1, 1] : tensor<4x?xf32> into tensor + %rB = subtensor_insert %rsB into %B[0, 0][%idx, %idx][1, 1] : tensor into tensor + + return %rB, %rB: tensor, tensor +} diff --git a/mlir/test/Dialect/Linalg/comprehensive-func-bufferize-analysis.mlir b/mlir/test/Dialect/Linalg/comprehensive-func-bufferize-analysis.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/Linalg/comprehensive-func-bufferize-analysis.mlir @@ -0,0 +1,321 @@ +// RUN: mlir-opt %s -linalg-comprehensive-func-bufferize=test-analysis-only -split-input-file | FileCheck %s + +//===----------------------------------------------------------------------===// +// Simple cases +//===----------------------------------------------------------------------===// + +// ----- + +// CHECK-LABEL: func @subtensor_fun +func @subtensor_fun(%A : tensor, %B : tensor {linalg.inplaceable = true}) + -> (tensor<4xf32>, tensor<8xf32>) +{ + // subtensor is not used in a write, it is not compelled to bufferize out of + // place. Let callers decide whether they want to create aliasing subviews at + // all call sites or whether they allocate. + // This is true irrespective of whether the function argument is inplaceable. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> + + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor %B[0][8][1] : tensor to tensor<8xf32> + + return %r0, %r1: tensor<4xf32>, tensor<8xf32> +} + +// ----- + +// CHECK-LABEL: func @subtensor_insert_fun +func @subtensor_insert_fun( + %A : tensor, + %B : tensor {linalg.inplaceable = true}, + %C : tensor<4xf32>) + -> (tensor, tensor) +{ + // must bufferize out of place. + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %r0 = subtensor_insert %C into %A[0][4][1] : tensor<4xf32> into tensor + + // bufferizes inplace. + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor_insert %C into %B[0][4][1] : tensor<4xf32> into tensor + + return %r0, %r1: tensor, tensor +} + +// ----- + +// CHECK-LABEL: func @conflict_on_B +func @conflict_on_B( + %A : tensor<4x4xf32> {linalg.inplaceable = true}, + %B : tensor<4x4xf32> {linalg.inplaceable = true}) + -> (tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) +{ + // matmul output operand interferes with input operand. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %C = linalg.matmul ins(%A, %B: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%B: tensor<4x4xf32>) + -> tensor<4x4xf32> + + // matmul output operand interferes with input operand. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %D = linalg.matmul ins(%B, %A: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%B: tensor<4x4xf32>) + -> tensor<4x4xf32> + + // matmul output operand does not interferes with input operand. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %E = linalg.matmul ins(%A, %A: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%B: tensor<4x4xf32>) + -> tensor<4x4xf32> + + return %C, %D, %E: tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32> +} + +//===----------------------------------------------------------------------===// +// Length-1 producer-consumer cases. +//===----------------------------------------------------------------------===// + +// ----- + +// CHECK-LABEL: func @subtensor_subtensor +func @subtensor_subtensor( + %A : tensor {linalg.inplaceable = true}, %B : tensor) + -> (tensor<2xf32>, tensor<2xf32>) +{ + // subtensor is not used in a write, it is not compelled to bufferize out of + // place. Let callers decide whether they want to create aliasing subviews at + // all call sites or whether they allocate. + // This is true irrespective of whether the function argument is inplaceable. + // CHECK: {__inplace_results_attr__ = ["true"]} + %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> + + // CHECK: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor %r0[0][2][1] : tensor<4xf32> to tensor<2xf32> + + // CHECK: {__inplace_results_attr__ = ["true"]} + %r2 = subtensor %B[0][4][1] : tensor to tensor<4xf32> + + // CHECK: {__inplace_results_attr__ = ["true"]} + %r3 = subtensor %r2[0][2][1] : tensor<4xf32> to tensor<2xf32> + + return %r1, %r3: tensor<2xf32>, tensor<2xf32> +} + +// ----- + +// CHECK-LABEL: func @subtensor_insert_subtensor_insert +func @subtensor_insert_subtensor_insert( + %A : tensor {linalg.inplaceable = true}, + %A2 : tensor<4xf32> {linalg.inplaceable = true}, + %A3 : tensor<2xf32> {linalg.inplaceable = true}, + %B : tensor, %B2 : tensor<4xf32>, %B3 : tensor<2xf32>) + -> (tensor, tensor) +{ + // CHECK: {__inplace_results_attr__ = ["true"]} + %r0 = subtensor_insert %A3 into %A2[0][2][1] : tensor<2xf32> into tensor<4xf32> + + // CHECK: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor_insert %r0 into %A[0][4][1] : tensor<4xf32> into tensor + + // CHECK: {__inplace_results_attr__ = ["false"]} + %r2 = subtensor_insert %B3 into %B2[0][2][1] : tensor<2xf32> into tensor<4xf32> + + // CHECK: {__inplace_results_attr__ = ["false"]} + %r3 = subtensor_insert %r2 into %B[0][4][1] : tensor<4xf32> into tensor + + return %r1, %r3: tensor, tensor +} + +// ----- + +// CHECK-LABEL: func @subtensor_nonmatching_subtensor_insert +func @subtensor_nonmatching_subtensor_insert( + %A : tensor {linalg.inplaceable = true}, + %B : tensor, %idx: index) + -> (tensor, tensor) +{ + // %r1 bufferizes inplace because %A is inplaceable. + // %r0 is an overlapping subtensor that does not match, it must be out of place. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> + + // %r1 can bufferize inplace fine. + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor_insert %r0 into %A[%idx][4][1] : tensor<4xf32> into tensor + + // %r3 does bufferizes inplace because %B is not inplaceable. + // %r0 is an overlapping subtensor that does not match, but does not alias with + // the buffer coming from %r3 so it can actually bufferize inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r2 = subtensor %B[0][4][1] : tensor to tensor<4xf32> + + // %r3 cannot bufferize inplace since %B is not inplaceable. + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %r3 = subtensor_insert %r2 into %B[%idx][4][1] : tensor<4xf32> into tensor + + return %r1, %r3: tensor, tensor +} + +// ----- + +// CHECK-LABEL: func @subtensor_matching_subtensor_insert +func @subtensor_matching_subtensor_insert( + %A : tensor {linalg.inplaceable = true}, + %B : tensor) + -> (tensor, tensor) +{ + // %r1 bufferizes inplace because %A is inplaceable. + // %r0 is a subtensor that matches, it can also be bufferized inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> + + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r1 = subtensor_insert %r0 into %A[0][4][1] : tensor<4xf32> into tensor + + // %r2 is a subtensor that matches %r3, it can be bufferized inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %r2 = subtensor %B[0][4][1] : tensor to tensor<4xf32> + + // subtensor_insert cannot bufferize inplace. + // This should have been captured by a canonicalization pattern and it would + // be unproductive to have special logic in bufferization to encode matching + // subtensor_insert(subtensor(A), A). + // CHECK: subtensor_insert + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %r3 = subtensor_insert %r2 into %B[0][4][1] : tensor<4xf32> into tensor + + return %r1, %r3: tensor, tensor +} + +// ----- + +// CHECK-LABEL: func @subtensor_linalg_readonly_use +func @subtensor_linalg_readonly_use( + %A : tensor, + %B : tensor<4x4xf32>, + %C : tensor<4x4xf32> {linalg.inplaceable = true}) + -> (tensor<4x4xf32>, tensor<4x4xf32>) +{ + // subtensor is only used as a read, no interference irrespective of user's + // inplace status. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %sA = subtensor %A[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + + // matmul output operand is not inplaceable at the function boundary. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %D = linalg.matmul ins(%sA, %B: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%B: tensor<4x4xf32>) + -> tensor<4x4xf32> + + // matmul output operand is inplaceable at the function boundary. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %E = linalg.matmul ins(%sA, %B: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%C: tensor<4x4xf32>) + -> tensor<4x4xf32> + + return %D, %E: tensor<4x4xf32>, tensor<4x4xf32> +} + +// ----- + +// CHECK-LABEL: func @subtensor_to_linalg_write_use +func @subtensor_to_linalg_write_use( + %A : tensor<4x4xf32>, + %B : tensor, + %C : tensor {linalg.inplaceable = true}) + -> (tensor<4x4xf32>, tensor<4x4xf32>) +{ + // Step 3. %sB forward propagates to a write in %D but it is not inplace. + // So this is only ever read and can bufferize inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %sB = subtensor %B[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + + // Step 2. %sB has a read interference in %E, it does not bufferize inplace. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %D = linalg.matmul ins(%B, %C: tensor, tensor) + outs(%sB: tensor<4x4xf32>) + -> tensor<4x4xf32> + + // Step 4. %sC forward propagates to an inplace write in %E. + // %sC backward propagates to %C which is inplaceable. + // As a consequence this is bufferized inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %sC = subtensor %C[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + + // Step 1. %sC backprops to the subtensor producer which is not considered an + // interference. This bufferizes inplace. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %E = linalg.matmul ins(%A, %sB: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%sC: tensor<4x4xf32>) + -> tensor<4x4xf32> + + return %D, %E: tensor<4x4xf32>, tensor<4x4xf32> +} + +//===----------------------------------------------------------------------===// +// Transitive cases +//===----------------------------------------------------------------------===// + +// ----- + +// CHECK-LABEL: func @subtensor_to_linalg_write_use +func @subtensor_to_linalg_write_use( + %A : tensor<4x4xf32>, + %B : tensor, + %C : tensor {linalg.inplaceable = true}) + -> (tensor<4x4xf32>, tensor<4x4xf32>) +{ + // Step 4. %sB forward propagates to an inplace write in %D. + // %sB backward propagates to %B which is not inplaceable. + // As a consequence this is bufferized out of place. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["false"]} + %sB = subtensor %B[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + + // Step 1. %sB backprops to the subtensor producer which is not considered an + // interference. This bufferizes inplace. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %D = linalg.matmul ins(%B, %C: tensor, tensor) + outs(%sB: tensor<4x4xf32>) + -> tensor<4x4xf32> + + // Step 3. %sC forward propagates to an inplace write in %E. + // %sC backward propagates to %C which is inplaceable. + // As a consequence this is bufferized inplace. + // CHECK: subtensor + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %sC = subtensor %C[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> + + // Step 1. %sC backprops to the subtensor producer which is not considered an + // interference. This bufferizes inplace. + // CHECK: linalg.matmul + // CHECK-SAME: {__inplace_results_attr__ = ["true"]} + %E = linalg.matmul ins(%A, %A: tensor<4x4xf32>, tensor<4x4xf32>) + outs(%sC: tensor<4x4xf32>) + -> tensor<4x4xf32> + + return %D, %E: tensor<4x4xf32>, tensor<4x4xf32> +} diff --git a/mlir/test/Dialect/Linalg/comprehensive-func-bufferize.mlir b/mlir/test/Dialect/Linalg/comprehensive-func-bufferize.mlir --- a/mlir/test/Dialect/Linalg/comprehensive-func-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/comprehensive-func-bufferize.mlir @@ -1,5 +1,4 @@ // RUN: mlir-opt %s -linalg-comprehensive-func-bufferize -split-input-file | FileCheck %s -// RUN: mlir-opt %s -linalg-comprehensive-func-bufferize=test-analysis-only -split-input-file | FileCheck %s --check-prefix=ANALYSIS // CHECK-DAG: #[[$map_2d_dyn:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)> @@ -226,133 +225,19 @@ func @subtensor_fun(%A : tensor {linalg.inplaceable = true}) -> tensor<4xf32> { - // CHECK: %[[BUFFER_CAST_A:.*]] = memref.buffer_cast {{.*}} : memref - // CHECK: %[[SV:.*]] = memref.subview %[[BUFFER_CAST_A]][0] [4] [1] - // CHECK: linalg.copy(%[[SV]], %[[ALLOC]]) - %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> - return %r0: tensor<4xf32> -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_readonly_use -func @subtensor_readonly_use( - %A : tensor {linalg.inplaceable = true}, - %B : tensor<4x4xf32>, %C : tensor<4x4xf32>) -> tensor<4x4xf32> -{ - // subtensor is only used as a read. - // ANALYSIS: subtensor {{.*}} {__inplace_results_attr__ = ["true"]} - %sA = subtensor %A[0, 0][4, 4][1, 1] : tensor to tensor<4x4xf32> - // matmul output operand is not inplaceable at the function boundary. - // ANALYSIS: linalg.matmul {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %D = linalg.matmul ins(%sA, %B: tensor<4x4xf32>, tensor<4x4xf32>) - outs(%B: tensor<4x4xf32>) - -> tensor<4x4xf32> - return %D: tensor<4x4xf32> -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_nonmatching_subtensor_insert_inplace -func @subtensor_nonmatching_subtensor_insert_inplace( - %A : tensor {linalg.inplaceable = true}, %idx: index) - -> tensor -{ - // subtensor has no matching subtensor_insert and is not just used by known - // readonly ops. - // ANALYSIS: subtensor {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> - // subtensor_insert can bufferize inplace fine. - // ANALYSIS: subtensor_insert {{.*}} {__inplace_results_attr__ = ["true"]} - %r1 = subtensor_insert %r0 into %A[%idx][4][1] : tensor<4xf32> into tensor - return %r1: tensor -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_nonmatching_subtensor_insert_non_inplace -func @subtensor_nonmatching_subtensor_insert_non_inplace( - %A : tensor {linalg.inplaceable = false}, %idx: index) - -> tensor -{ - // subtensor has no matching subtensor_insert and is not just used by known - // readonly ops. - // ANALYSIS: subtensor {{.*}} {__inplace_results_attr__ = ["true"]} - %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> - // subtensor_insert cannot bufferize inplace. - // ANALYSIS: subtensor_insert {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r1 = subtensor_insert %r0 into %A[%idx][4][1] : tensor<4xf32> into tensor - return %r1: tensor -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_matching_subtensor_insert -func @subtensor_matching_subtensor_insert(%A : tensor {linalg.inplaceable = true}) - -> tensor -{ - // subtensor has a matching subtensor_insert that bufferizes inplace. - // TODO: Atm subtensor is not inplaceable but can be. - // In the grander scheme, this will canonicalize away beforehand. - // ANALYSIS: subtensor {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> - // subtensor_insert can bufferize inplace fine. - // ANALYSIS: subtensor_insert {{.*}} {__inplace_results_attr__ = ["true"]} - %r1 = subtensor_insert %r0 into %A[0][4][1] : tensor<4xf32> into tensor - return %r1: tensor -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_matching_and_nonmatching_1 -func @subtensor_matching_and_nonmatching_1(%A : tensor {linalg.inplaceable = true}, %idx: index) - -> (tensor, tensor) -{ - // %r1 is not inplaceable and %r2 is a matching subtensor_insert so %r0 could - // be inplaceable. - // In the grander scheme, %r2 will canonicalize away beforehand but %r0 will still - // not be inplaceable as the production of %r1 may involve a self-copy. - // ANALYSIS: subtensor {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r0 = subtensor %A[0][4][1] : tensor to tensor<4xf32> - // ANALYSIS: subtensor_insert {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r1 = subtensor_insert %r0 into %A[%idx][4][1] : tensor<4xf32> into tensor - // ANALYSIS: subtensor_insert {{.*}} {__inplace_results_attr__ = ["true"]} - %r2 = subtensor_insert %r0 into %A[0][4][1] : tensor<4xf32> into tensor - return %r1, %r2: tensor, tensor -} - -// ----- - -// ANALYSIS-LABEL: func @subtensor_matching_and_nonmatching_2 -func @subtensor_matching_and_nonmatching_2(%A : tensor {linalg.inplaceable = true}, %idx: index) - -> (tensor, tensor) -{ - // %r1 is not inplaceable and %r2 is a matching subtensor_insert so %r0 should - // be inplaceable. - // In the grander scheme, %r2 will canonicalize away beforehand and %r0 will become - // inplaceable by reducing to the `subtensor_nonmatching_subtensor_insert_non_inplace` - // case, - // ANALYSIS: subtensor {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} + // This bufferizes to a pattern that the cross-function boundary pass needs to + // convert into a new memref argument at all call site; this may be either: + // - an externally created aliasing subview (if we want to allow aliasing + // function arguments). + // - a new alloc + copy (more expensive but does not create new function + // argument aliasing). + // CHECK-NOT: alloc + // CHECK-NOT: copy + // CHECK: %[[BUFFER_CAST_A:.*]] = memref.buffer_cast {{.*}} : memref to tensor<4xf32> - // ANALYSIS: subtensor_insert {{.*}} - // ANALYSIS-NOT: {__inplace_results_attr__ = ["true"]} - %r2 = subtensor_insert %r0 into %A[0][4][1] : tensor<4xf32> into tensor - // ANALYSIS: subtensor_insert {{.*}} {__inplace_results_attr__ = ["true"]} - %r1 = subtensor_insert %r0 into %A[%idx][4][1] : tensor<4xf32> into tensor - return %r1, %r2: tensor, tensor + // CHECK: return %[[RES]] + return %r0: tensor<4xf32> } - -// ----- - -// TODO: unknown ops, linalg chain success, linalg chain failure. -