diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -423,14 +423,20 @@ } /// Generates code to allocate a tensor of the given type, and zero -/// initialize it. This function assumes the TensorType is fully -/// specified (i.e., has static rank and sizes). -// TODO(D112674): support dynamic sizes. +/// initialize it. If the tensor type has any dynamic sizes, then the +/// `sizes` parameter should be as filled by sizesFromPtr(); that way +/// we can reuse the genDimSizeCall() results generated by sizesFromPtr(). static Value allocDenseTensor(ConversionPatternRewriter &rewriter, Location loc, - RankedTensorType tensorTp) { + RankedTensorType tensorTp, ValueRange sizes) { Type elemTp = tensorTp.getElementType(); - auto memTp = MemRefType::get(tensorTp.getShape(), elemTp); - Value mem = rewriter.create(loc, memTp); + auto shape = tensorTp.getShape(); + auto memTp = MemRefType::get(shape, elemTp); + SmallVector dynamicSizes; + for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) { + if (shape[i] == ShapedType::kDynamicSize) + dynamicSizes.push_back(sizes[i]); + } + Value mem = rewriter.create(loc, memTp, dynamicSizes); Value zero = constantZero(rewriter, loc, elemTp); rewriter.create(loc, zero, mem).result(); return mem; @@ -597,9 +603,6 @@ if (!tensorTp) return failure(); unsigned rank = tensorTp.getRank(); - Value dst = allocDenseTensor(rewriter, loc, tensorTp); - Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType()); - Value elemPtr = genAllocaScalar(rewriter, loc, tensorTp.getElementType()); // Clone encSrc but removing the dimOrdering. // The srcDimOrdering will already be applied during the // conversion from `SparseTensorStorage src` to SparseTensorCOO @@ -619,10 +622,12 @@ AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth()); SmallVector sizes; SmallVector params; - // TODO(D112674): support dynamic sizes. - sizesFromType(rewriter, sizes, loc, tensorTp); + sizesFromPtr(rewriter, sizes, op, encSrc, tensorTp, src); newParams(rewriter, params, op, encDst, kToIter, sizes, src); Value iter = genNewCall(rewriter, op, params); + Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType()); + Value elemPtr = genAllocaScalar(rewriter, loc, tensorTp.getElementType()); + Value dst = allocDenseTensor(rewriter, loc, tensorTp, sizes); SmallVector noArgs; SmallVector noTypes; auto whileOp = rewriter.create(loc, noTypes, noArgs); diff --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir --- a/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2dense.mlir @@ -17,31 +17,26 @@ // CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor<13xi32> // CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[I13:.*]] = arith.constant 13 : index -// +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref +// CHECK-DAG: memref.store %[[I13]], %[[SizesS]][%[[I0]]] : memref<1xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex> +// CHECK-DAG: %[[SecTp:.*]] = arith.constant 1 : i32 +// CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref // CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<13xi32> // CHECK-DAG: %[[E0:.*]] = arith.constant 0 : i32 // CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : i32, memref<13xi32> -// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex> -// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref -// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref -// -// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8> -// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref -// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 -// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8> -// -// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex> -// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref -// CHECK-DAG: memref.store %[[I13]], %[[SizesS]][%[[I0]]] : memref<1xindex> -// -// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex> -// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref -// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex> -// -// CHECK-DAG: %[[SecTp:.*]] = arith.constant 1 : i32 -// CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32 -// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 -// CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr // CHECK: scf.while : () -> () { // CHECK: %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 // CHECK: scf.condition(%[[Cond]]) @@ -58,38 +53,73 @@ return %0 : tensor<13xi32> } +// CHECK-LABEL: func @sparse_convert_1d_dyn( +// CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor +// CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<1xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<1xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<1xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<1xindex> to memref +// CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr, index) -> index +// CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<1xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<1xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<1xindex> +// CHECK-DAG: %[[SecTp:.*]] = arith.constant 1 : i32 +// CHECK-DAG: %[[ElemTp:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %[[SecTp]], %[[SecTp]], %[[ElemTp]], %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<1xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<1xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref +// CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref +// CHECK-DAG: %[[E0:.*]] = arith.constant 0 : i32 +// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : i32, memref +// CHECK: scf.while : () -> () { +// CHECK: %[[Cond:.*]] = call @getNextI32(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 +// CHECK: scf.condition(%[[Cond]]) +// CHECK: } do { +// CHECK: %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<1xindex> +// CHECK: %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref +// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]]] : memref +// CHECK: scf.yield +// CHECK: } +// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref +// CHECK: return %[[T]] : tensor +func @sparse_convert_1d_dyn(%arg0: tensor) -> tensor { + %0 = sparse_tensor.convert %arg0 : tensor to tensor + return %0 : tensor +} + // CHECK-LABEL: func @sparse_convert_2d( // CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor<2x4xf64> // CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index // CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index -// +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8> +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref // CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x4xf64> // CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 // CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x4xf64> -// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex> -// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref -// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref -// -// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8> -// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref -// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 -// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8> -// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8> -// -// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex> -// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref -// CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex> -// CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex> -// -// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex> -// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref -// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex> -// CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex> -// -// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 -// CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr // CHECK: scf.while : () -> () { // CHECK: %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 // CHECK: scf.condition(%[[Cond]]) @@ -107,6 +137,138 @@ return %0 : tensor<2x4xf64> } +// CHECK-LABEL: func @sparse_convert_2d_dyn0( +// CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor +// CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8> +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref +// CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr, index) -> index +// CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref +// CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]]) : memref +// CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref +// CHECK: scf.while : () -> () { +// CHECK: %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 +// CHECK: scf.condition(%[[Cond]]) +// CHECK: } do { +// CHECK: %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex> +// CHECK: %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex> +// CHECK: %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref +// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref +// CHECK: scf.yield +// CHECK: } +// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref +// CHECK: return %[[T]] : tensor +func @sparse_convert_2d_dyn0(%arg0: tensor) -> tensor { + %0 = sparse_tensor.convert %arg0 : tensor to tensor + return %0 : tensor +} + +// CHECK-LABEL: func @sparse_convert_2d_dyn1( +// CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor<2x?xf64> +// CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8> +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref +// CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr, index) -> index +// CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref +// CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI1]]) : memref<2x?xf64> +// CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x?xf64> +// CHECK: scf.while : () -> () { +// CHECK: %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 +// CHECK: scf.condition(%[[Cond]]) +// CHECK: } do { +// CHECK: %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex> +// CHECK: %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex> +// CHECK: %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref +// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref<2x?xf64> +// CHECK: scf.yield +// CHECK: } +// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref<2x?xf64> +// CHECK: return %[[T]] : tensor<2x?xf64> +func @sparse_convert_2d_dyn1(%arg0: tensor<2x?xf64, #SparseMatrix>) -> tensor<2x?xf64> { + %0 = sparse_tensor.convert %arg0 : tensor<2x?xf64, #SparseMatrix> to tensor<2x?xf64> + return %0 : tensor<2x?xf64> +} + +// CHECK-LABEL: func @sparse_convert_2d_dyn2( +// CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor +// CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[I1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<2xi8> +// CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<2xi8> to memref +// CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<2xi8> +// CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<2xi8> +// CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<2xindex> to memref +// CHECK-DAG: %[[SizeI0:.*]] = call @sparseDimSize(%[[Arg]], %[[I0]]) : (!llvm.ptr, index) -> index +// CHECK-DAG: %[[SizeI1:.*]] = call @sparseDimSize(%[[Arg]], %[[I1]]) : (!llvm.ptr, index) -> index +// CHECK-DAG: memref.store %[[SizeI0]], %[[SizesS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[SizeI1]], %[[SizesS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<2xindex> to memref +// CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<2xindex> +// CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<2xindex> +// CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<2xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<2xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref +// CHECK-DAG: %[[M:.*]] = memref.alloc(%[[SizeI0]], %[[SizeI1]]) : memref +// CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref +// CHECK: scf.while : () -> () { +// CHECK: %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 +// CHECK: scf.condition(%[[Cond]]) +// CHECK: } do { +// CHECK: %[[Iv0:.*]] = memref.load %[[IndS]][%[[I0]]] : memref<2xindex> +// CHECK: %[[Iv1:.*]] = memref.load %[[IndS]][%[[I1]]] : memref<2xindex> +// CHECK: %[[ElemVal:.*]] = memref.load %[[ElemBuffer]][] : memref +// CHECK: memref.store %[[ElemVal]], %[[M]][%[[Iv0]], %[[Iv1]]] : memref +// CHECK: scf.yield +// CHECK: } +// CHECK: %[[T:.*]] = memref.tensor_load %[[M]] : memref +// CHECK: return %[[T]] : tensor +func @sparse_convert_2d_dyn2(%arg0: tensor) -> tensor { + %0 = sparse_tensor.convert %arg0 : tensor to tensor + return %0 : tensor +} + // CHECK-LABEL: func @sparse_convert_3d( // CHECK-SAME: %[[Arg:.*]]: !llvm.ptr) -> tensor<2x3x4xf64> // CHECK-DAG: %[[I0:.*]] = arith.constant 0 : index @@ -114,35 +276,30 @@ // CHECK-DAG: %[[I2:.*]] = arith.constant 2 : index // CHECK-DAG: %[[I3:.*]] = arith.constant 3 : index // CHECK-DAG: %[[I4:.*]] = arith.constant 4 : index -// -// CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64> -// CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x3x4xf64> -// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex> -// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref -// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref -// // CHECK-DAG: %[[AttrsS:.*]] = memref.alloca() : memref<3xi8> // CHECK-DAG: %[[AttrsD:.*]] = memref.cast %[[AttrsS]] : memref<3xi8> to memref // CHECK-DAG: %[[Attr0:.*]] = arith.constant 0 : i8 // CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I0]]] : memref<3xi8> // CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I1]]] : memref<3xi8> // CHECK-DAG: memref.store %[[Attr0]], %[[AttrsS]][%[[I2]]] : memref<3xi8> -// // CHECK-DAG: %[[SizesS:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[SizesD:.*]] = memref.cast %[[SizesS]] : memref<3xindex> to memref // CHECK-DAG: memref.store %[[I2]], %[[SizesS]][%[[I0]]] : memref<3xindex> // CHECK-DAG: memref.store %[[I3]], %[[SizesS]][%[[I1]]] : memref<3xindex> // CHECK-DAG: memref.store %[[I4]], %[[SizesS]][%[[I2]]] : memref<3xindex> -// // CHECK-DAG: %[[PermS:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[PermD:.*]] = memref.cast %[[PermS]] : memref<3xindex> to memref // CHECK-DAG: memref.store %[[I0]], %[[PermS]][%[[I0]]] : memref<3xindex> // CHECK-DAG: memref.store %[[I1]], %[[PermS]][%[[I1]]] : memref<3xindex> // CHECK-DAG: memref.store %[[I2]], %[[PermS]][%[[I2]]] : memref<3xindex> -// // CHECK-DAG: %[[ActionToIter:.*]] = arith.constant 5 : i32 -// CHECK: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[Iter:.*]] = call @newSparseTensor(%[[AttrsD]], %[[SizesD]], %[[PermD]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ActionToIter]], %[[Arg]]) : (memref, memref, memref, i32, i32, i32, i32, !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[IndS:.*]] = memref.alloca() : memref<3xindex> +// CHECK-DAG: %[[IndD:.*]] = memref.cast %[[IndS]] : memref<3xindex> to memref +// CHECK-DAG: %[[ElemBuffer:.*]] = memref.alloca() : memref +// CHECK-DAG: %[[M:.*]] = memref.alloc() : memref<2x3x4xf64> +// CHECK-DAG: %[[E0:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: linalg.fill(%[[E0]], %[[M]]) : f64, memref<2x3x4xf64> // CHECK: scf.while : () -> () { // CHECK: %[[Cond:.*]] = call @getNextF64(%[[Iter]], %[[IndD]], %[[ElemBuffer]]) : (!llvm.ptr, memref, memref) -> i1 // CHECK: scf.condition(%[[Cond]])