diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -94,16 +94,16 @@ /// tensor expression. struct LatPoint { LatPoint(unsigned n, unsigned e, unsigned b); - LatPoint(const llvm::BitVector &b, unsigned e); + LatPoint(const BitVector &b, unsigned e); /// Conjunction of tensor loop indices as bitvector. This represents /// all indices involved in the tensor expression - llvm::BitVector bits; + BitVector bits; /// Simplified conjunction of tensor loop indices as bitvector. This /// represents a simplified condition under which this tensor expression /// must execute. Pre-computed during codegen to avoid repeated eval. - llvm::BitVector simple; + BitVector simple; /// Index of the tensor expresssion. unsigned exp; @@ -163,7 +163,7 @@ /// within the given set using just two basic rules: /// (1) multiple dense conditions are reduced to single dense, and /// (2) a *singleton* sparse/dense is reduced to sparse/random access. - llvm::BitVector simplifyCond(unsigned s0, unsigned p0); + BitVector simplifyCond(unsigned s0, unsigned p0); /// Returns true if Li > Lj. bool latGT(unsigned i, unsigned j) const; @@ -190,7 +190,7 @@ } /// Returns true if any set bit corresponds to queried dim. - bool hasAnyDimOf(const llvm::BitVector &bits, Dim d) const; + bool hasAnyDimOf(const BitVector &bits, Dim d) const; /// Returns true if given tensor iterates *only* in the given tensor /// expression. For the output tensor, this defines a "simply dynamic" @@ -217,7 +217,7 @@ void dumpExp(unsigned e) const; void dumpLat(unsigned p) const; void dumpSet(unsigned s) const; - void dumpBits(const llvm::BitVector &bits) const; + void dumpBits(const BitVector &bits) const; #endif /// Builds the iteration lattices in a bottom-up traversal given the remaining diff --git a/mlir/include/mlir/IR/Block.h b/mlir/include/mlir/IR/Block.h --- a/mlir/include/mlir/IR/Block.h +++ b/mlir/include/mlir/IR/Block.h @@ -111,7 +111,7 @@ void eraseArguments(ArrayRef argIndices); /// Erases the arguments that have their corresponding bit set in /// `eraseIndices` and removes them from the argument list. - void eraseArguments(const llvm::BitVector &eraseIndices); + void eraseArguments(const BitVector &eraseIndices); /// Erases arguments using the given predicate. If the predicate returns true, /// that argument is erased. void eraseArguments(function_ref shouldEraseFn); diff --git a/mlir/include/mlir/IR/BuiltinTypes.td b/mlir/include/mlir/IR/BuiltinTypes.td --- a/mlir/include/mlir/IR/BuiltinTypes.td +++ b/mlir/include/mlir/IR/BuiltinTypes.td @@ -166,8 +166,8 @@ TypeRange resultTypes); /// Returns a new function type without the specified arguments and results. - FunctionType getWithoutArgsAndResults(const llvm::BitVector &argIndices, - const llvm::BitVector &resultIndices); + FunctionType getWithoutArgsAndResults(const BitVector &argIndices, + const BitVector &resultIndices); }]; } diff --git a/mlir/include/mlir/IR/FunctionInterfaces.h b/mlir/include/mlir/IR/FunctionInterfaces.h --- a/mlir/include/mlir/IR/FunctionInterfaces.h +++ b/mlir/include/mlir/IR/FunctionInterfaces.h @@ -83,11 +83,11 @@ unsigned originalNumResults, Type newType); /// Erase the specified arguments and update the function type attribute. -void eraseFunctionArguments(Operation *op, const llvm::BitVector &argIndices, +void eraseFunctionArguments(Operation *op, const BitVector &argIndices, Type newType); /// Erase the specified results and update the function type attribute. -void eraseFunctionResults(Operation *op, const llvm::BitVector &resultIndices, +void eraseFunctionResults(Operation *op, const BitVector &resultIndices, Type newType); /// Set a FunctionOpInterface operation's type signature. @@ -101,7 +101,7 @@ /// Filters out any elements referenced by `indices`. If any types are removed, /// `storage` is used to hold the new type list. Returns the new type list. -TypeRange filterTypesOut(TypeRange types, const llvm::BitVector &indices, +TypeRange filterTypesOut(TypeRange types, const BitVector &indices, SmallVectorImpl &storage); //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/IR/FunctionInterfaces.td b/mlir/include/mlir/IR/FunctionInterfaces.td --- a/mlir/include/mlir/IR/FunctionInterfaces.td +++ b/mlir/include/mlir/IR/FunctionInterfaces.td @@ -281,13 +281,13 @@ /// Erase a single argument at `argIndex`. void eraseArgument(unsigned argIndex) { - llvm::BitVector argsToErase($_op.getNumArguments()); + BitVector argsToErase($_op.getNumArguments()); argsToErase.set(argIndex); eraseArguments(argsToErase); } /// Erases the arguments listed in `argIndices`. - void eraseArguments(const llvm::BitVector &argIndices) { + void eraseArguments(const BitVector &argIndices) { Type newType = $_op.getTypeWithoutArgs(argIndices); function_interface_impl::eraseFunctionArguments( this->getOperation(), argIndices, newType); @@ -295,13 +295,13 @@ /// Erase a single result at `resultIndex`. void eraseResult(unsigned resultIndex) { - llvm::BitVector resultsToErase($_op.getNumResults()); + BitVector resultsToErase($_op.getNumResults()); resultsToErase.set(resultIndex); eraseResults(resultsToErase); } /// Erases the results listed in `resultIndices`. - void eraseResults(const llvm::BitVector &resultIndices) { + void eraseResults(const BitVector &resultIndices) { Type newType = $_op.getTypeWithoutResults(resultIndices); function_interface_impl::eraseFunctionResults( this->getOperation(), resultIndices, newType); @@ -326,7 +326,7 @@ /// results. This is used to update the function's signature in the /// `eraseArguments` and `eraseResults` methods. Type getTypeWithoutArgsAndResults( - const llvm::BitVector &argIndices, const llvm::BitVector &resultIndices) { + const BitVector &argIndices, const BitVector &resultIndices) { SmallVector argStorage, resultStorage; TypeRange newArgTypes = function_interface_impl::filterTypesOut( $_op.getArgumentTypes(), argIndices, argStorage); @@ -334,13 +334,13 @@ $_op.getResultTypes(), resultIndices, resultStorage); return $_op.cloneTypeWith(newArgTypes, newResultTypes); } - Type getTypeWithoutArgs(const llvm::BitVector &argIndices) { + Type getTypeWithoutArgs(const BitVector &argIndices) { SmallVector argStorage; TypeRange newArgTypes = function_interface_impl::filterTypesOut( $_op.getArgumentTypes(), argIndices, argStorage); return $_op.cloneTypeWith(newArgTypes, $_op.getResultTypes()); } - Type getTypeWithoutResults(const llvm::BitVector &resultIndices) { + Type getTypeWithoutResults(const BitVector &resultIndices) { SmallVector resultStorage; TypeRange newResultTypes = function_interface_impl::filterTypesOut( $_op.getResultTypes(), resultIndices, resultStorage); diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h --- a/mlir/include/mlir/IR/Operation.h +++ b/mlir/include/mlir/IR/Operation.h @@ -232,7 +232,7 @@ /// Erases the operands that have their corresponding bit set in /// `eraseIndices` and removes them from the operand list. - void eraseOperands(const llvm::BitVector &eraseIndices) { + void eraseOperands(const BitVector &eraseIndices) { getOperandStorage().eraseOperands(eraseIndices); } diff --git a/mlir/include/mlir/IR/OperationSupport.h b/mlir/include/mlir/IR/OperationSupport.h --- a/mlir/include/mlir/IR/OperationSupport.h +++ b/mlir/include/mlir/IR/OperationSupport.h @@ -666,7 +666,7 @@ /// Erase the operands held by the storage that have their corresponding bit /// set in `eraseIndices`. - void eraseOperands(const llvm::BitVector &eraseIndices); + void eraseOperands(const BitVector &eraseIndices); /// Get the operation operands held by the storage. MutableArrayRef getOperands() { return {operandStorage, size()}; } diff --git a/mlir/include/mlir/Support/LLVM.h b/mlir/include/mlir/Support/LLVM.h --- a/mlir/include/mlir/Support/LLVM.h +++ b/mlir/include/mlir/Support/LLVM.h @@ -42,6 +42,7 @@ // Containers. template class ArrayRef; +class BitVector; namespace detail { template struct DenseMapPair; } // namespace detail @@ -91,6 +92,7 @@ // // Containers. using llvm::ArrayRef; +using llvm::BitVector; template using DenseMapInfo = llvm::DenseMapInfo; template erasedResultTypes; - llvm::BitVector erasedResultIndices(functionType.getNumResults()); + BitVector erasedResultIndices(functionType.getNumResults()); for (const auto &resultType : llvm::enumerate(functionType.getResults())) { if (resultType.value().isa()) { erasedResultIndices.set(resultType.index()); diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -98,7 +98,7 @@ // case due to the output operand. For reductions, we need to check that after // the fusion, each loop dimension has at least one input that defines it. if ((consumer.getNumReductionLoops())) { - llvm::BitVector coveredDims(consumer.getNumLoops(), false); + BitVector coveredDims(consumer.getNumLoops(), false); auto addToCoveredDims = [&](AffineMap map) { for (auto result : map.getResults()) diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -619,7 +619,7 @@ SmallVectorImpl &segments) { // Check done[clause] to see if it has been parsed already - llvm::BitVector done(ClauseType::COUNT, false); + BitVector done(ClauseType::COUNT, false); // See pos[clause] to get position of clause in operand segments SmallVector pos(ClauseType::COUNT, -1); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -1004,7 +1004,7 @@ /// maintain the universal index. static bool genInit(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, std::vector &topSort, - unsigned at, llvm::BitVector &inits) { + unsigned at, BitVector &inits) { bool needsUniv = false; Location loc = op.getLoc(); unsigned idx = topSort[at]; @@ -1104,7 +1104,7 @@ static Operation *genFor(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, bool isOuter, bool isInner, unsigned idx, - llvm::BitVector &indices) { + BitVector &indices) { unsigned fb = indices.find_first(); unsigned tensor = merger.tensor(fb); assert(idx == merger.index(fb)); @@ -1175,7 +1175,7 @@ static Operation *genWhile(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, unsigned idx, bool needsUniv, - llvm::BitVector &indices) { + BitVector &indices) { SmallVector types; SmallVector operands; // Construct the while-loop with a parameter for each index. @@ -1242,7 +1242,7 @@ static Operation *genLoop(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, std::vector &topSort, unsigned at, - bool needsUniv, llvm::BitVector &indices) { + bool needsUniv, BitVector &indices) { unsigned idx = topSort[at]; if (indices.count() == 1) { bool isOuter = at == 0; @@ -1258,7 +1258,7 @@ static void genLocals(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, std::vector &topSort, unsigned at, - bool needsUniv, llvm::BitVector &locals) { + bool needsUniv, BitVector &locals) { Location loc = op.getLoc(); unsigned idx = topSort[at]; @@ -1322,7 +1322,7 @@ static void genWhileInduction(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, unsigned idx, bool needsUniv, - llvm::BitVector &induction, + BitVector &induction, scf::WhileOp whileOp) { Location loc = op.getLoc(); // Finalize each else branch of all if statements. @@ -1409,7 +1409,7 @@ /// Generates a single if-statement within a while-loop. static scf::IfOp genIf(Merger &merger, CodeGen &codegen, PatternRewriter &rewriter, linalg::GenericOp op, - unsigned idx, llvm::BitVector &conditions) { + unsigned idx, BitVector &conditions) { Location loc = op.getLoc(); SmallVector types; Value cond; diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -65,7 +65,7 @@ bits.set(b); } -LatPoint::LatPoint(const llvm::BitVector &b, unsigned e) +LatPoint::LatPoint(const BitVector &b, unsigned e) : bits(b), simple(), exp(e) {} //===----------------------------------------------------------------------===// @@ -93,7 +93,7 @@ unsigned Merger::conjLatPoint(Kind kind, unsigned p0, unsigned p1) { unsigned p = latPoints.size(); - llvm::BitVector nb = llvm::BitVector(latPoints[p0].bits); + BitVector nb = BitVector(latPoints[p0].bits); nb |= latPoints[p1].bits; unsigned e = addExp(kind, latPoints[p0].exp, latPoints[p1].exp); latPoints.push_back(LatPoint(nb, e)); @@ -164,7 +164,7 @@ return s; } -llvm::BitVector Merger::simplifyCond(unsigned s0, unsigned p0) { +BitVector Merger::simplifyCond(unsigned s0, unsigned p0) { // First determine if this lattice point is a *singleton*, i.e., // the last point in a lattice, no other is less than this one. bool isSingleton = true; @@ -175,7 +175,7 @@ } } // Now apply the two basic rules. - llvm::BitVector simple = latPoints[p0].bits; + BitVector simple = latPoints[p0].bits; bool reset = isSingleton && hasAnyDimOf(simple, kSparse); for (unsigned b = 0, be = simple.size(); b < be; b++) { if (simple[b] && !isDim(b, kSparse)) { @@ -188,8 +188,8 @@ } bool Merger::latGT(unsigned i, unsigned j) const { - const llvm::BitVector &bitsi = latPoints[i].bits; - const llvm::BitVector &bitsj = latPoints[j].bits; + const BitVector &bitsi = latPoints[i].bits; + const BitVector &bitsj = latPoints[j].bits; assert(bitsi.size() == bitsj.size()); if (bitsi.count() > bitsj.count()) { for (unsigned b = 0, be = bitsj.size(); b < be; b++) @@ -201,12 +201,12 @@ } bool Merger::onlyDenseDiff(unsigned i, unsigned j) { - llvm::BitVector tmp = latPoints[j].bits; + BitVector tmp = latPoints[j].bits; tmp ^= latPoints[i].bits; return !hasAnyDimOf(tmp, kSparse); } -bool Merger::hasAnyDimOf(const llvm::BitVector &bits, Dim d) const { +bool Merger::hasAnyDimOf(const BitVector &bits, Dim d) const { for (unsigned b = 0, be = bits.size(); b < be; b++) if (bits[b] && isDim(b, d)) return true; @@ -386,7 +386,7 @@ llvm::dbgs() << "}\n"; } -void Merger::dumpBits(const llvm::BitVector &bits) const { +void Merger::dumpBits(const BitVector &bits) const { for (unsigned b = 0, be = bits.size(); b < be; b++) { if (bits[b]) { unsigned t = tensor(b); diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp --- a/mlir/lib/IR/Block.cpp +++ b/mlir/lib/IR/Block.cpp @@ -187,13 +187,13 @@ } void Block::eraseArguments(ArrayRef argIndices) { - llvm::BitVector eraseIndices(getNumArguments()); + BitVector eraseIndices(getNumArguments()); for (unsigned i : argIndices) eraseIndices.set(i); eraseArguments(eraseIndices); } -void Block::eraseArguments(const llvm::BitVector &eraseIndices) { +void Block::eraseArguments(const BitVector &eraseIndices) { eraseArguments( [&](BlockArgument arg) { return eraseIndices.test(arg.getArgNumber()); }); } diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -172,8 +172,8 @@ /// Returns a new function type without the specified arguments and results. FunctionType -FunctionType::getWithoutArgsAndResults(const llvm::BitVector &argIndices, - const llvm::BitVector &resultIndices) { +FunctionType::getWithoutArgsAndResults(const BitVector &argIndices, + const BitVector &resultIndices) { SmallVector argStorage, resultStorage; TypeRange newArgTypes = function_interface_impl::filterTypesOut( getInputs(), argIndices, argStorage); diff --git a/mlir/lib/IR/FunctionInterfaces.cpp b/mlir/lib/IR/FunctionInterfaces.cpp --- a/mlir/lib/IR/FunctionInterfaces.cpp +++ b/mlir/lib/IR/FunctionInterfaces.cpp @@ -200,7 +200,7 @@ } void mlir::function_interface_impl::eraseFunctionArguments( - Operation *op, const llvm::BitVector &argIndices, Type newType) { + Operation *op, const BitVector &argIndices, Type newType) { // There are 3 things that need to be updated: // - Function type. // - Arg attrs. @@ -223,7 +223,7 @@ } void mlir::function_interface_impl::eraseFunctionResults( - Operation *op, const llvm::BitVector &resultIndices, Type newType) { + Operation *op, const BitVector &resultIndices, Type newType) { // There are 2 things that need to be updated: // - Function type. // - Result attrs. @@ -263,7 +263,7 @@ TypeRange mlir::function_interface_impl::filterTypesOut(TypeRange types, - const llvm::BitVector &indices, + const BitVector &indices, SmallVectorImpl &storage) { if (indices.none()) return types; diff --git a/mlir/lib/IR/OperationSupport.cpp b/mlir/lib/IR/OperationSupport.cpp --- a/mlir/lib/IR/OperationSupport.cpp +++ b/mlir/lib/IR/OperationSupport.cpp @@ -293,7 +293,7 @@ } void detail::OperandStorage::eraseOperands( - const llvm::BitVector &eraseIndices) { + const BitVector &eraseIndices) { MutableArrayRef operands = getOperands(); assert(eraseIndices.size() == operands.size()); diff --git a/mlir/test/lib/IR/TestFunc.cpp b/mlir/test/lib/IR/TestFunc.cpp --- a/mlir/test/lib/IR/TestFunc.cpp +++ b/mlir/test/lib/IR/TestFunc.cpp @@ -87,7 +87,7 @@ auto module = getOperation(); for (FuncOp func : module.getOps()) { - llvm::BitVector indicesToErase(func.getNumArguments()); + BitVector indicesToErase(func.getNumArguments()); for (auto argIndex : llvm::seq(0, func.getNumArguments())) if (func.getArgAttr(argIndex, "test.erase_this_arg")) indicesToErase.set(argIndex); @@ -107,7 +107,7 @@ auto module = getOperation(); for (FuncOp func : module.getOps()) { - llvm::BitVector indicesToErase(func.getNumResults()); + BitVector indicesToErase(func.getNumResults()); for (auto resultIndex : llvm::seq(0, func.getNumResults())) if (func.getResultAttr(resultIndex, "test.erase_this_result")) indicesToErase.set(resultIndex); diff --git a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp --- a/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/AttrOrTypeFormatGen.cpp @@ -577,7 +577,7 @@ const AttrOrTypeDef &def; /// Seen attribute or type parameters. - llvm::BitVector seenParams; + BitVector seenParams; }; } // namespace diff --git a/mlir/tools/mlir-tblgen/OpFormatGen.cpp b/mlir/tools/mlir-tblgen/OpFormatGen.cpp --- a/mlir/tools/mlir-tblgen/OpFormatGen.cpp +++ b/mlir/tools/mlir-tblgen/OpFormatGen.cpp @@ -1880,7 +1880,7 @@ // Get a string containing all of the cases that can't be represented with a // keyword. - llvm::BitVector nonKeywordCases(cases.size()); + BitVector nonKeywordCases(cases.size()); bool hasStrCase = false; for (auto &it : llvm::enumerate(cases)) { hasStrCase = it.value().isStrCase(); diff --git a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp --- a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp +++ b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp @@ -88,7 +88,7 @@ /// but there is no required ordering within groups. bool latPointWithinRange(unsigned s, unsigned p, unsigned n, const std::shared_ptr &pattern, - const llvm::BitVector &bits) { + const BitVector &bits) { for (unsigned i = p; i < p + n; ++i) { if (compareExpression(merger.lat(merger.set(s)[i]).exp, pattern) && compareBits(s, i, bits)) @@ -100,22 +100,22 @@ /// Wrapper over latPointWithinRange for readability of tests. void expectLatPointWithinRange(unsigned s, unsigned p, unsigned n, const std::shared_ptr &pattern, - const llvm::BitVector &bits) { + const BitVector &bits) { EXPECT_TRUE(latPointWithinRange(s, p, n, pattern, bits)); } /// Wrapper over expectLatPointWithinRange for a single lat point. void expectLatPoint(unsigned s, unsigned p, const std::shared_ptr &pattern, - const llvm::BitVector &bits) { + const BitVector &bits) { EXPECT_TRUE(latPointWithinRange(s, p, 1, pattern, bits)); } /// Converts a vector of (loop, tensor) pairs to a bitvector with the /// corresponding bits set. - llvm::BitVector + BitVector loopsToBits(const std::vector> &loops) { - llvm::BitVector testBits = llvm::BitVector(numTensors + 1, false); + BitVector testBits = BitVector(numTensors + 1, false); for (auto l : loops) { auto loop = std::get<0>(l); auto tensor = std::get<1>(l); @@ -125,7 +125,7 @@ } /// Returns true if the bits of lattice point p in set s match the given bits. - bool compareBits(unsigned s, unsigned p, const llvm::BitVector &bits) { + bool compareBits(unsigned s, unsigned p, const BitVector &bits) { return merger.lat(merger.set(s)[p]).bits == bits; } diff --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp --- a/mlir/unittests/IR/OperationSupportTest.cpp +++ b/mlir/unittests/IR/OperationSupportTest.cpp @@ -164,7 +164,7 @@ // Create an operation with operands to erase. Operation *user = createOp(&context, {operand2, operand1, operand2, operand1}); - llvm::BitVector eraseIndices(user->getNumOperands()); + BitVector eraseIndices(user->getNumOperands()); // Check erasing no operands. user->eraseOperands(eraseIndices);