diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h @@ -265,6 +265,10 @@ return isOutputTensor(tid) && isSparseOut; } + bool isValidLevel(TensorId tid, Level lvl) const { + return tid < lvlTypes.size() && lvl < lvlTypes[tid].size(); + } + /// Prepares loop for iterating over `tensor[lvl]`, under the assumption /// that `tensor[0...lvl-1]` loops have already been set up. void prepareLoopOverTensorAtLvl(OpBuilder &builder, Location loc, diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -456,7 +456,7 @@ for (auto [t, l] : llvm::zip(tids, lvls)) { // TODO: this check for validity of the (t,l) pairs should be // checked/enforced at the callsites, if possible. - assert(t < lvlTypes.size() && l < lvlTypes[t].size()); + assert(isValidLevel(t, l)); assert(!coords[t][l]); // We cannot re-enter the same level const auto lvlTp = lvlTypes[t][l]; const bool isSparse = isCompressedDLT(lvlTp) || isSingletonDLT(lvlTp); @@ -572,7 +572,7 @@ Operation *LoopEmitter::enterFilterLoopOverTensorAtLvl( OpBuilder &builder, Location loc, TensorId tid, Level lvl, AffineExpr affine, MutableArrayRef reduc) { - assert(tid < lvlTypes.size() && lvl < lvlTypes[tid].size()); + assert(isValidLevel(tid, lvl)); assert(!affine.isa() && !isDenseDLT(lvlTypes[tid][lvl])); // We can not re-enter the same level. assert(!coords[tid][lvl]); @@ -862,7 +862,7 @@ void LoopEmitter::prepareLoopOverTensorAtLvl(OpBuilder &builder, Location loc, TensorId tid, Level dstLvl) { - assert(tid < lvlTypes.size() && dstLvl < lvlTypes[tid].size()); + assert(isValidLevel(tid, dstLvl)); const auto lvlTp = lvlTypes[tid][dstLvl]; if (isDenseDLT(lvlTp))