diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -45,7 +45,7 @@ /// Returns internal type encoding for primary storage. Keep these /// values consistent with the sparse runtime support library. -static unsigned getPrimaryTypeEncoding(Type tp) { +static uint32_t getPrimaryTypeEncoding(Type tp) { if (tp.isF64()) return 1; if (tp.isF32()) @@ -63,7 +63,7 @@ /// Returns internal type encoding for overhead storage. Keep these /// values consistent with the sparse runtime support library. -static unsigned getOverheadTypeEncoding(unsigned width) { +static uint32_t getOverheadTypeEncoding(unsigned width) { switch (width) { default: return 1; @@ -78,7 +78,7 @@ /// Returns internal dimension level type encoding. Keep these /// values consistent with the sparse runtime support library. -static unsigned +static uint32_t getDimLevelTypeEncoding(SparseTensorEncodingAttr::DimLevelType dlt) { switch (dlt) { case SparseTensorEncodingAttr::DimLevelType::Dense: @@ -103,12 +103,6 @@ return rewriter.create(loc, i); } -/// Generates a constant of `i64` type. -inline static Value constantI64(ConversionPatternRewriter &rewriter, - Location loc, int64_t i) { - return rewriter.create(loc, i, 64); -} - /// Generates a constant of `i32` type. inline static Value constantI32(ConversionPatternRewriter &rewriter, Location loc, int32_t i) { @@ -246,11 +240,9 @@ params.push_back(genBuffer(rewriter, loc, attrs)); // Dimension sizes array of the enveloping tensor. Useful for either // verification of external data, or for construction of internal data. - // The index type is casted to I64 for API consistency. - Type iTp = rewriter.getI64Type(); SmallVector sizes; for (Value s : szs) - sizes.push_back(rewriter.create(loc, s, iTp)); + sizes.push_back(s); params.push_back(genBuffer(rewriter, loc, sizes)); // Dimension order permutation array. This is the "identity" permutation by // default, or otherwise the "reverse" permutation of a given ordering, so @@ -258,21 +250,21 @@ SmallVector rev(sz); if (AffineMap p = enc.getDimOrdering()) { for (unsigned i = 0; i < sz; i++) - rev[p.getDimPosition(i)] = constantI64(rewriter, loc, i); + rev[p.getDimPosition(i)] = constantIndex(rewriter, loc, i); } else { for (unsigned i = 0; i < sz; i++) - rev[i] = constantI64(rewriter, loc, i); + rev[i] = constantIndex(rewriter, loc, i); } params.push_back(genBuffer(rewriter, loc, rev)); // Secondary and primary types encoding. ShapedType resType = op->getResult(0).getType().cast(); - unsigned secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth()); - unsigned secInd = getOverheadTypeEncoding(enc.getIndexBitWidth()); - unsigned primary = getPrimaryTypeEncoding(resType.getElementType()); + uint32_t secPtr = getOverheadTypeEncoding(enc.getPointerBitWidth()); + uint32_t secInd = getOverheadTypeEncoding(enc.getIndexBitWidth()); + uint32_t primary = getPrimaryTypeEncoding(resType.getElementType()); assert(primary); - params.push_back(constantI64(rewriter, loc, secPtr)); - params.push_back(constantI64(rewriter, loc, secInd)); - params.push_back(constantI64(rewriter, loc, primary)); + params.push_back(constantI32(rewriter, loc, secPtr)); + params.push_back(constantI32(rewriter, loc, secInd)); + params.push_back(constantI32(rewriter, loc, primary)); // User action and pointer. Type pTp = LLVM::LLVMPointerType::get(rewriter.getI8Type()); if (!ptr) @@ -608,7 +600,7 @@ Type eltType = resType.cast().getElementType(); StringRef name; if (eltType.isIndex()) - name = "sparsePointers"; // 64-bit, but its own name for unique signature + name = "sparsePointers"; else if (eltType.isInteger(64)) name = "sparsePointers64"; else if (eltType.isInteger(32)) @@ -637,7 +629,7 @@ Type eltType = resType.cast().getElementType(); StringRef name; if (eltType.isIndex()) - name = "sparseIndices"; // 64-bit, but its own name for unique signature + name = "sparseIndices"; else if (eltType.isInteger(64)) name = "sparseIndices64"; else if (eltType.isInteger(32)) diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp @@ -273,7 +273,7 @@ if (tensor) { assert(tensor->getRank() == rank); for (uint64_t r = 0; r < rank; r++) - assert(tensor->getSizes()[perm[r]] == sizes[r] || sizes[r] == 0); + assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]); tensor->sort(); // sort lexicographically n = new SparseTensorStorage(tensor->getSizes(), perm, sparsity, tensor); @@ -306,8 +306,8 @@ while (lo < hi) { assert(lo < elements.size() && hi <= elements.size()); // Find segment in interval with same index elements in this dimension. - unsigned idx = elements[lo].indices[d]; - unsigned seg = lo + 1; + uint64_t idx = elements[lo].indices[d]; + uint64_t seg = lo + 1; while (seg < hi && elements[seg].indices[d] == idx) seg++; // Handle segment in interval for sparse or dense dimension. @@ -505,14 +505,12 @@ extern "C" { -/// Helper method to read a sparse tensor filename from the environment, -/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. -char *getTensorFilename(uint64_t id) { - char var[80]; - sprintf(var, "TENSOR%" PRIu64, id); - char *env = getenv(var); - return env; -} +/// This type is used in the public API at all places where MLIR expects +/// values with the built-in type "index". For now, we simply assume that +/// type is 64-bit, but targets with different "index" bit widths should link +/// with an alternatively built runtime support library. +// TODO: support such targets? +typedef uint64_t index_t; //===----------------------------------------------------------------------===// // @@ -525,9 +523,9 @@ // //===----------------------------------------------------------------------===// -enum OverheadTypeEnum : uint64_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 }; +enum OverheadTypeEnum : uint32_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 }; -enum PrimaryTypeEnum : uint64_t { +enum PrimaryTypeEnum : uint32_t { kF64 = 1, kF32 = 2, kI64 = 3, @@ -576,7 +574,7 @@ #define IMPL2(NAME, TYPE, LIB) \ void _mlir_ciface_##NAME(StridedMemRefType *ref, void *tensor, \ - uint64_t d) { \ + index_t d) { \ assert(ref); \ assert(tensor); \ std::vector *v; \ @@ -589,17 +587,17 @@ #define IMPL3(NAME, TYPE) \ void *_mlir_ciface_##NAME(void *tensor, TYPE value, \ - StridedMemRefType *iref, \ - StridedMemRefType *pref) { \ + StridedMemRefType *iref, \ + StridedMemRefType *pref) { \ assert(tensor); \ assert(iref); \ assert(pref); \ assert(iref->strides[0] == 1 && pref->strides[0] == 1); \ assert(iref->sizes[0] == pref->sizes[0]); \ - const uint64_t *indx = iref->data + iref->offset; \ - const uint64_t *perm = pref->data + pref->offset; \ + const index_t *indx = iref->data + iref->offset; \ + const index_t *perm = pref->data + pref->offset; \ uint64_t isize = iref->sizes[0]; \ - std::vector indices(isize); \ + std::vector indices(isize); \ for (uint64_t r = 0; r < isize; r++) \ indices[perm[r]] = indx[r]; \ static_cast *>(tensor)->add(indices, value); \ @@ -617,17 +615,17 @@ /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO void * _mlir_ciface_newSparseTensor(StridedMemRefType *aref, // NOLINT - StridedMemRefType *sref, - StridedMemRefType *pref, - uint64_t ptrTp, uint64_t indTp, uint64_t valTp, + StridedMemRefType *sref, + StridedMemRefType *pref, + uint32_t ptrTp, uint32_t indTp, uint32_t valTp, uint32_t action, void *ptr) { assert(aref && sref && pref); assert(aref->strides[0] == 1 && sref->strides[0] == 1 && pref->strides[0] == 1); assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]); const uint8_t *sparsity = aref->data + aref->offset; - const uint64_t *sizes = sref->data + sref->offset; - const uint64_t *perm = pref->data + pref->offset; + const index_t *sizes = sref->data + sref->offset; + const index_t *perm = pref->data + pref->offset; uint64_t rank = aref->sizes[0]; // Double matrices with all combinations of overhead storage. @@ -687,12 +685,12 @@ } /// Methods that provide direct access to pointers, indices, and values. -IMPL2(sparsePointers, uint64_t, getPointers) +IMPL2(sparsePointers, index_t, getPointers) IMPL2(sparsePointers64, uint64_t, getPointers) IMPL2(sparsePointers32, uint32_t, getPointers) IMPL2(sparsePointers16, uint16_t, getPointers) IMPL2(sparsePointers8, uint8_t, getPointers) -IMPL2(sparseIndices, uint64_t, getIndices) +IMPL2(sparseIndices, index_t, getIndices) IMPL2(sparseIndices64, uint64_t, getIndices) IMPL2(sparseIndices32, uint32_t, getIndices) IMPL2(sparseIndices16, uint16_t, getIndices) @@ -726,8 +724,17 @@ // //===----------------------------------------------------------------------===// +/// Helper method to read a sparse tensor filename from the environment, +/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc. +char *getTensorFilename(index_t id) { + char var[80]; + sprintf(var, "TENSOR%" PRIu64, id); + char *env = getenv(var); + return env; +} + /// Returns size of sparse tensor in given dimension. -uint64_t sparseDimSize(void *tensor, uint64_t d) { +index_t sparseDimSize(void *tensor, index_t d) { return static_cast(tensor)->getDimSize(d); } diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -70,11 +70,11 @@ // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new1d(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { @@ -85,11 +85,11 @@ // CHECK-LABEL: func @sparse_new2d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new2d(%arg0: !llvm.ptr) -> tensor { @@ -100,11 +100,11 @@ // CHECK-LABEL: func @sparse_new3d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new3d(%arg0: !llvm.ptr) -> tensor { @@ -118,15 +118,13 @@ // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref -// CHECK-DAG: %[[II:.*]] = arith.index_cast %[[I]] : index to i64 -// CHECK-DAG: %[[JJ:.*]] = arith.index_cast %[[J]] : index to i64 -// CHECK-DAG: memref.store %[[II]], %[[Q]][%[[C0]]] : memref<2xi64> -// CHECK-DAG: memref.store %[[JJ]], %[[Q]][%[[C1]]] : memref<2xi64> +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex> // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: return %[[T]] : !llvm.ptr @@ -158,11 +156,11 @@ // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> @@ -182,11 +180,11 @@ // CHECK-LABEL: func @sparse_convert_1d_ss( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) // CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) // CHECK: return %[[T]] : !llvm.ptr @@ -200,11 +198,11 @@ // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> @@ -229,11 +227,11 @@ // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> @@ -263,11 +261,11 @@ // CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor // CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> -// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64> -// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64> +// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> +// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref -// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref -// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref +// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref +// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr // CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex>