diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -31,7 +31,7 @@
 
 /// New tensor storage action. Keep these values consistent with
 /// the sparse runtime support library.
-enum Action : uint32_t {
+enum Action : unsigned {
   kEmpty = 0,
   kFromFile = 1,
   kFromCOO = 2,
@@ -103,12 +103,6 @@
   return rewriter.create<arith::ConstantIndexOp>(loc, i);
 }
 
-/// Generates a constant of `i64` type.
-inline static Value constantI64(ConversionPatternRewriter &rewriter,
-                                Location loc, int64_t i) {
-  return rewriter.create<arith::ConstantIntOp>(loc, i, 64);
-}
-
 /// Generates a constant of `i32` type.
 inline static Value constantI32(ConversionPatternRewriter &rewriter,
                                 Location loc, int32_t i) {
@@ -246,11 +240,9 @@
   params.push_back(genBuffer(rewriter, loc, attrs));
   // Dimension sizes array of the enveloping tensor. Useful for either
   // verification of external data, or for construction of internal data.
-  // The index type is casted to I64 for API consistency.
-  Type iTp = rewriter.getI64Type();
   SmallVector<Value, 4> sizes;
   for (Value s : szs)
-    sizes.push_back(rewriter.create<arith::IndexCastOp>(loc, s, iTp));
+    sizes.push_back(s);
   params.push_back(genBuffer(rewriter, loc, sizes));
   // Dimension order permutation array. This is the "identity" permutation by
   // default, or otherwise the "reverse" permutation of a given ordering, so
@@ -258,10 +250,10 @@
   SmallVector<Value, 4> rev(sz);
   if (AffineMap p = enc.getDimOrdering()) {
     for (unsigned i = 0; i < sz; i++)
-      rev[p.getDimPosition(i)] = constantI64(rewriter, loc, i);
+      rev[p.getDimPosition(i)] = constantIndex(rewriter, loc, i);
   } else {
     for (unsigned i = 0; i < sz; i++)
-      rev[i] = constantI64(rewriter, loc, i);
+      rev[i] = constantIndex(rewriter, loc, i);
   }
   params.push_back(genBuffer(rewriter, loc, rev));
   // Secondary and primary types encoding.
@@ -270,9 +262,9 @@
   unsigned secInd = getOverheadTypeEncoding(enc.getIndexBitWidth());
   unsigned primary = getPrimaryTypeEncoding(resType.getElementType());
   assert(primary);
-  params.push_back(constantI64(rewriter, loc, secPtr));
-  params.push_back(constantI64(rewriter, loc, secInd));
-  params.push_back(constantI64(rewriter, loc, primary));
+  params.push_back(constantI32(rewriter, loc, secPtr));
+  params.push_back(constantI32(rewriter, loc, secInd));
+  params.push_back(constantI32(rewriter, loc, primary));
   // User action and pointer.
   Type pTp = LLVM::LLVMPointerType::get(rewriter.getI8Type());
   if (!ptr)
@@ -608,7 +600,7 @@
     Type eltType = resType.cast<ShapedType>().getElementType();
     StringRef name;
     if (eltType.isIndex())
-      name = "sparsePointers"; // 64-bit, but its own name for unique signature
+      name = "sparsePointers";
     else if (eltType.isInteger(64))
       name = "sparsePointers64";
     else if (eltType.isInteger(32))
@@ -637,7 +629,7 @@
     Type eltType = resType.cast<ShapedType>().getElementType();
     StringRef name;
     if (eltType.isIndex())
-      name = "sparseIndices"; // 64-bit, but its own name for unique signature
+      name = "sparseIndices";
     else if (eltType.isInteger(64))
       name = "sparseIndices64";
     else if (eltType.isInteger(32))
diff --git a/mlir/lib/ExecutionEngine/SparseUtils.cpp b/mlir/lib/ExecutionEngine/SparseUtils.cpp
--- a/mlir/lib/ExecutionEngine/SparseUtils.cpp
+++ b/mlir/lib/ExecutionEngine/SparseUtils.cpp
@@ -273,7 +273,7 @@
     if (tensor) {
       assert(tensor->getRank() == rank);
       for (uint64_t r = 0; r < rank; r++)
-        assert(tensor->getSizes()[perm[r]] == sizes[r] || sizes[r] == 0);
+        assert(sizes[r] == 0 || tensor->getSizes()[perm[r]] == sizes[r]);
       tensor->sort(); // sort lexicographically
       n = new SparseTensorStorage<P, I, V>(tensor->getSizes(), perm, sparsity,
                                            tensor);
@@ -505,14 +505,12 @@
 
 extern "C" {
 
-/// Helper method to read a sparse tensor filename from the environment,
-/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
-char *getTensorFilename(uint64_t id) {
-  char var[80];
-  sprintf(var, "TENSOR%" PRIu64, id);
-  char *env = getenv(var);
-  return env;
-}
+/// This type is used in the public API at all places where MLIR expects
+/// values with the built-in type "index". For now, we simply assume that
+/// type is 64-bit, but targets with different "index" bit widths should link
+/// with an alternatively built runtime support library.
+// TODO: support such targets?
+typedef uint64_t index_t;
 
 //===----------------------------------------------------------------------===//
 //
@@ -525,9 +523,9 @@
 //
 //===----------------------------------------------------------------------===//
 
-enum OverheadTypeEnum : uint64_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 };
+enum OverheadTypeEnum : uint32_t { kU64 = 1, kU32 = 2, kU16 = 3, kU8 = 4 };
 
-enum PrimaryTypeEnum : uint64_t {
+enum PrimaryTypeEnum : uint32_t {
   kF64 = 1,
   kF32 = 2,
   kI64 = 3,
@@ -576,7 +574,7 @@
 
 #define IMPL2(NAME, TYPE, LIB)                                                 \
   void _mlir_ciface_##NAME(StridedMemRefType<TYPE, 1> *ref, void *tensor,      \
-                           uint64_t d) {                                       \
+                           index_t d) {                                        \
     assert(ref);                                                               \
     assert(tensor);                                                            \
     std::vector<TYPE> *v;                                                      \
@@ -589,17 +587,17 @@
 
 #define IMPL3(NAME, TYPE)                                                      \
   void *_mlir_ciface_##NAME(void *tensor, TYPE value,                          \
-                            StridedMemRefType<uint64_t, 1> *iref,              \
-                            StridedMemRefType<uint64_t, 1> *pref) {            \
+                            StridedMemRefType<index_t, 1> *iref,               \
+                            StridedMemRefType<index_t, 1> *pref) {             \
     assert(tensor);                                                            \
     assert(iref);                                                              \
     assert(pref);                                                              \
     assert(iref->strides[0] == 1 && pref->strides[0] == 1);                    \
     assert(iref->sizes[0] == pref->sizes[0]);                                  \
-    const uint64_t *indx = iref->data + iref->offset;                          \
-    const uint64_t *perm = pref->data + pref->offset;                          \
+    const index_t *indx = iref->data + iref->offset;                           \
+    const index_t *perm = pref->data + pref->offset;                           \
     uint64_t isize = iref->sizes[0];                                           \
-    std::vector<uint64_t> indices(isize);                                      \
+    std::vector<index_t> indices(isize);                                       \
     for (uint64_t r = 0; r < isize; r++)                                       \
       indices[perm[r]] = indx[r];                                              \
     static_cast<SparseTensorCOO<TYPE> *>(tensor)->add(indices, value);         \
@@ -617,17 +615,17 @@
 /// kToCOO = returns coordinate scheme from storage in ptr to use with kFromCOO
 void *
 _mlir_ciface_newSparseTensor(StridedMemRefType<uint8_t, 1> *aref, // NOLINT
-                             StridedMemRefType<uint64_t, 1> *sref,
-                             StridedMemRefType<uint64_t, 1> *pref,
-                             uint64_t ptrTp, uint64_t indTp, uint64_t valTp,
+                             StridedMemRefType<index_t, 1> *sref,
+                             StridedMemRefType<index_t, 1> *pref,
+                             uint32_t ptrTp, uint32_t indTp, uint32_t valTp,
                              uint32_t action, void *ptr) {
   assert(aref && sref && pref);
   assert(aref->strides[0] == 1 && sref->strides[0] == 1 &&
          pref->strides[0] == 1);
   assert(aref->sizes[0] == sref->sizes[0] && sref->sizes[0] == pref->sizes[0]);
   const uint8_t *sparsity = aref->data + aref->offset;
-  const uint64_t *sizes = sref->data + sref->offset;
-  const uint64_t *perm = pref->data + pref->offset;
+  const index_t *sizes = sref->data + sref->offset;
+  const index_t *perm = pref->data + pref->offset;
   uint64_t rank = aref->sizes[0];
 
   // Double matrices with all combinations of overhead storage.
@@ -687,12 +685,12 @@
 }
 
 /// Methods that provide direct access to pointers, indices, and values.
-IMPL2(sparsePointers, uint64_t, getPointers)
+IMPL2(sparsePointers, index_t, getPointers)
 IMPL2(sparsePointers64, uint64_t, getPointers)
 IMPL2(sparsePointers32, uint32_t, getPointers)
 IMPL2(sparsePointers16, uint16_t, getPointers)
 IMPL2(sparsePointers8, uint8_t, getPointers)
-IMPL2(sparseIndices, uint64_t, getIndices)
+IMPL2(sparseIndices, index_t, getIndices)
 IMPL2(sparseIndices64, uint64_t, getIndices)
 IMPL2(sparseIndices32, uint32_t, getIndices)
 IMPL2(sparseIndices16, uint16_t, getIndices)
@@ -726,8 +724,17 @@
 //
 //===----------------------------------------------------------------------===//
 
+/// Helper method to read a sparse tensor filename from the environment,
+/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
+char *getTensorFilename(index_t id) {
+  char var[80];
+  sprintf(var, "TENSOR%" PRIu64, id);
+  char *env = getenv(var);
+  return env;
+}
+
 /// Returns size of sparse tensor in given dimension.
-uint64_t sparseDimSize(void *tensor, uint64_t d) {
+index_t sparseDimSize(void *tensor, index_t d) {
   return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
 }
 
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -70,11 +70,11 @@
 // CHECK-LABEL: func @sparse_new1d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new1d(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
@@ -85,11 +85,11 @@
 // CHECK-LABEL: func @sparse_new2d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new2d(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #SparseMatrix> {
@@ -100,11 +100,11 @@
 // CHECK-LABEL: func @sparse_new3d(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> !llvm.ptr<i8>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor> {
@@ -118,15 +118,13 @@
 //   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[II:.*]] = arith.index_cast %[[I]] : index to i64
-//   CHECK-DAG: %[[JJ:.*]] = arith.index_cast %[[J]] : index to i64
-//   CHECK-DAG: memref.store %[[II]], %[[Q]][%[[C0]]] : memref<2xi64>
-//   CHECK-DAG: memref.store %[[JJ]], %[[Q]][%[[C1]]] : memref<2xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex>
+//   CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
@@ -158,11 +156,11 @@
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor<?xi32>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex>
@@ -182,11 +180,11 @@
 // CHECK-LABEL: func @sparse_convert_1d_ss(
 //  CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]])
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
@@ -200,11 +198,11 @@
 //   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
@@ -229,11 +227,11 @@
 //   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
 //   CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
@@ -263,11 +261,11 @@
 //   CHECK-DAG: %[[U2:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?x?xf64>
 //   CHECK-DAG: %[[U3:.*]] = tensor.dim %[[A]], %[[C2]] : tensor<?x?x?xf64>
 //   CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8>
-//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xi64>
-//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xi64>
+//   CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex>
+//   CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex>
 //   CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref<?xi8>
-//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xi64> to memref<?xi64>
-//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xi64> to memref<?xi64>
+//   CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref<?xindex>
+//   CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref<?xindex>
 //       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex>