diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -75,18 +75,17 @@ %a = sparse_tensor.new %fileName : !Filename to tensor // Initialize dense vectors. - %init_256 = bufferization.alloc_tensor(%c256) : tensor - %b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256) -> tensor { + %b = tensor.generate %c256 { + ^bb0(%i : index): %k = arith.addi %i, %c1 : index %j = arith.index_cast %k : index to i32 - %t2 = tensor.insert %j into %t[%i] : tensor - scf.yield %t2 : tensor - } - %init_4 = bufferization.alloc_tensor(%c4) : tensor - %x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4) -> tensor { - %t2 = tensor.insert %i0 into %t[%i] : tensor - scf.yield %t2 : tensor - } + tensor.yield %j : i32 + } : tensor + + %x = tensor.generate %c4 { + ^bb0(%i : index): + tensor.yield %i0 : i32 + } : tensor // Call kernel. %0 = call @kernel_matvec(%a, %b, %x) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir @@ -82,42 +82,30 @@ %lsz = tensor.dim %b, %cst2 : tensor // Initialize dense input matrix C. - %c0 = bufferization.alloc_tensor(%ksz, %jsz) : tensor - %c = scf.for %k = %cst0 to %ksz step %cst1 iter_args(%c1 = %c0) -> tensor { - %c2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%c3 = %c1) -> tensor { - %k0 = arith.muli %k, %jsz : index - %k1 = arith.addi %k0, %j : index - %k2 = arith.index_cast %k1 : index to i32 - %kf = arith.sitofp %k2 : i32 to f64 - %c4 = tensor.insert %kf into %c3[%k, %j] : tensor - scf.yield %c4 : tensor - } - scf.yield %c2 : tensor - } + %c = tensor.generate %ksz, %jsz { + ^bb0(%k : index, %j : index): + %k0 = arith.muli %k, %jsz : index + %k1 = arith.addi %k0, %j : index + %k2 = arith.index_cast %k1 : index to i32 + %kf = arith.sitofp %k2 : i32 to f64 + tensor.yield %kf : f64 + } : tensor // Initialize dense input matrix D. - %d0 = bufferization.alloc_tensor(%lsz, %jsz) : tensor - %d = scf.for %l = %cst0 to %lsz step %cst1 iter_args(%d1 = %d0) -> tensor { - %d2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%d3 = %d1) -> tensor { - %k0 = arith.muli %l, %jsz : index - %k1 = arith.addi %k0, %j : index - %k2 = arith.index_cast %k1 : index to i32 - %kf = arith.sitofp %k2 : i32 to f64 - %d4 = tensor.insert %kf into %d3[%l, %j] : tensor - scf.yield %d4 : tensor - } - scf.yield %d2 : tensor - } + %d = tensor.generate %lsz, %jsz { + ^bb0(%l : index, %j : index): + %k0 = arith.muli %l, %jsz : index + %k1 = arith.addi %k0, %j : index + %k2 = arith.index_cast %k1 : index to i32 + %kf = arith.sitofp %k2 : i32 to f64 + tensor.yield %kf : f64 + } : tensor // Initialize dense output matrix A. - %a0 = bufferization.alloc_tensor(%isz, %jsz) : tensor - %a = scf.for %i = %cst0 to %isz step %cst1 iter_args(%a1 = %a0) -> tensor { - %a2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%a3 = %a1) -> tensor { - %a4 = tensor.insert %f0 into %a3[%i, %j] : tensor - scf.yield %a4 : tensor - } - scf.yield %a2 : tensor - } + %a = tensor.generate %isz, %jsz { + ^bb0(%i : index, %j: index): + tensor.yield %f0 : f64 + } : tensor // Call kernel. %0 = call @kernel_mttkrp(%b, %c, %d, %a) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -72,27 +72,27 @@ %c5 = arith.constant 5 : index %c10 = arith.constant 10 : index - // Setup memory for the dense matrices and initialize. - %a0 = bufferization.alloc_tensor(%c5, %c10) : tensor - %b0 = bufferization.alloc_tensor(%c10, %c5) : tensor - %x0 = bufferization.alloc_tensor(%c5, %c5) : tensor - %a, %b, %x = scf.for %i = %c0 to %c5 step %c1 iter_args(%a1 = %a0, %b1 = %b0, %x1 = %x0) - -> (tensor, tensor, tensor) { - %x2 = scf.for %j = %c0 to %c5 step %c1 iter_args(%x3 = %x1) -> (tensor) { - %x4 = tensor.insert %d0 into %x3[%i, %j] : tensor - scf.yield %x4 : tensor - } + // Initialize dense matrices. + %x = tensor.generate %c5, %c5 { + ^bb0(%i : index, %j : index): + tensor.yield %d0 : f32 + } : tensor + + %a = tensor.generate %c5, %c10 { + ^bb0(%i: index, %j: index): %p = arith.addi %i, %c1 : index %q = arith.index_cast %p : index to i32 %d = arith.sitofp %q : i32 to f32 - %a2, %b2 = scf.for %j = %c0 to %c10 step %c1 iter_args(%a3 = %a1, %b3 = %b1) - -> (tensor, tensor) { - %a4 = tensor.insert %d into %a3[%i, %j] : tensor - %b4 = tensor.insert %d into %b3[%j, %i] : tensor - scf.yield %a4, %b4 : tensor, tensor - } - scf.yield %a2, %b2, %x2 : tensor, tensor, tensor - } + tensor.yield %d : f32 + } : tensor + + %b = tensor.generate %c10, %c5 { + ^bb0(%i: index, %j: index): + %p = arith.addi %j, %c1 : index + %q = arith.index_cast %p : index to i32 + %d = arith.sitofp %q : i32 to f32 + tensor.yield %d : f32 + } : tensor // Read the sparse matrix from file, construct sparse storage. %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir @@ -70,27 +70,20 @@ %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) %a = sparse_tensor.new %fileName : !Filename to tensor - // Initialize dense vectors. - %init_256_4 = bufferization.alloc_tensor(%c256, %c4) : tensor - %b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256_4) -> tensor { - %b2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor { - %k0 = arith.muli %i, %c4 : index - %k1 = arith.addi %j, %k0 : index - %k2 = arith.index_cast %k1 : index to i32 - %k = arith.sitofp %k2 : i32 to f64 - %t3 = tensor.insert %k into %t2[%i, %j] : tensor - scf.yield %t3 : tensor - } - scf.yield %b2 : tensor - } - %init_4_4 = bufferization.alloc_tensor(%c4, %c4) : tensor - %x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4_4) -> tensor { - %x2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor { - %t3 = tensor.insert %i0 into %t2[%i, %j] : tensor - scf.yield %t3 : tensor - } - scf.yield %x2 : tensor - } + // Initialize dense tensors. + %b = tensor.generate %c256, %c4 { + ^bb0(%i : index, %j : index): + %k0 = arith.muli %i, %c4 : index + %k1 = arith.addi %j, %k0 : index + %k2 = arith.index_cast %k1 : index to i32 + %k = arith.sitofp %k2 : i32 to f64 + tensor.yield %k : f64 + } : tensor + + %x = tensor.generate %c4, %c4 { + ^bb0(%i : index, %j : index): + tensor.yield %i0 : f64 + } : tensor // Call kernel. %0 = call @kernel_spmm(%a, %b, %x)