diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_insert_1d.mlir @@ -1,10 +1,8 @@ // RUN: mlir-opt %s --sparse-compiler=enable-runtime-library=false | \ -// RUN: mlir-cpu-runner \ -// RUN: -e entry -entry-point-result=void \ -// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: mlir-cpu-runner -e entry -entry-point-result=void | \ // RUN: FileCheck %s -// Insertion example using pure codegen (no sparse runtime support lib). +// Insertion example using pure codegen (no support lib). #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> @@ -44,9 +42,10 @@ %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index %c3 = arith.constant 3 : index + %c8 = arith.constant 8 : index %c1023 = arith.constant 1023 : index - // Build the sparse vector from code. + // Build the sparse vector from straightline code. %0 = bufferization.alloc_tensor() : tensor<1024xf32, #SparseVector> %1 = sparse_tensor.insert %f1 into %0[%c0] : tensor<1024xf32, #SparseVector> %2 = sparse_tensor.insert %f2 into %1[%c1] : tensor<1024xf32, #SparseVector> @@ -59,7 +58,22 @@ // CHECK-NEXT: ( 1, 2, 1, 2, 99, 99, 99, 99 ) call @dump(%5) : (tensor<1024xf32, #SparseVector>) -> () + // Build another sparse vector in a loop. + %6 = bufferization.alloc_tensor() : tensor<1024xf32, #SparseVector> + %7 = scf.for %i = %c0 to %c8 step %c1 iter_args(%vin = %6) -> tensor<1024xf32, #SparseVector> { + %ii = arith.muli %i, %c3 : index + %vout = sparse_tensor.insert %f1 into %vin[%ii] : tensor<1024xf32, #SparseVector> + scf.yield %vout : tensor<1024xf32, #SparseVector> + } + + // CHECK: + // CHECK-NEXT: + // CHECK-NEXT: + call @dump(%7) : (tensor<1024xf32, #SparseVector>) -> () + + // Free resources. bufferization.dealloc_tensor %5 : tensor<1024xf32, #SparseVector> + bufferization.dealloc_tensor %7 : tensor<1024xf32, #SparseVector> return } }