diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp @@ -253,6 +253,14 @@ return val; } +/// Generates a call to release/delete a `SparseTensorCOO`. +static void genDelCOOCall(OpBuilder &builder, Operation *op, Type elemTp, + Value coo) { + SmallString<21> name{"delSparseTensorCOO", primaryTypeFunctionSuffix(elemTp)}; + TypeRange noTp; + createFuncCall(builder, op, name, noTp, coo, EmitCInterface::Off); +} + /// Generates a call that adds one element to a coordinate scheme. /// In particular, this generates code like the following: /// val = a[i1,..,ik]; @@ -501,7 +509,9 @@ params[4] = constantIndexTypeEncoding(rewriter, loc, encDst); params[6] = constantAction(rewriter, loc, Action::kFromCOO); params[7] = coo; - rewriter.replaceOp(op, genNewCall(rewriter, op, params)); + Value dst = genNewCall(rewriter, op, params); + genDelCOOCall(rewriter, op, stp.getElementType(), coo); + rewriter.replaceOp(op, dst); return success(); } if (!encDst && encSrc) { @@ -545,6 +555,7 @@ insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, rank, ind); rewriter.create(loc); rewriter.setInsertionPointAfter(whileOp); + genDelCOOCall(rewriter, op, elemTp, iter); rewriter.replaceOpWithNewOp(op, resType, dst); return success(); } @@ -584,7 +595,7 @@ SmallVector params; sizesFromSrc(rewriter, sizes, loc, src); newParams(rewriter, params, op, stp, encDst, Action::kEmptyCOO, sizes); - Value ptr = genNewCall(rewriter, op, params); + Value coo = genNewCall(rewriter, op, params); Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType()); Value perm = params[2]; SmallVector lo; @@ -620,13 +631,15 @@ ivs, rank); else val = genIndexAndValueForDense(rewriter, loc, src, ind, ivs); - genAddEltCall(rewriter, op, eltType, ptr, val, ind, perm); + genAddEltCall(rewriter, op, eltType, coo, val, ind, perm); return {}; }); // Final call to construct sparse tensor storage. params[6] = constantAction(rewriter, loc, Action::kFromCOO); - params[7] = ptr; - rewriter.replaceOp(op, genNewCall(rewriter, op, params)); + params[7] = coo; + Value dst = genNewCall(rewriter, op, params); + genDelCOOCall(rewriter, op, eltType, coo); + rewriter.replaceOp(op, dst); return success(); } }; @@ -822,8 +835,9 @@ Type eltType = srcType.getElementType(); SmallString<18> name{"outSparseTensor", primaryTypeFunctionSuffix(eltType)}; TypeRange noTp; - replaceOpWithFuncCall(rewriter, op, name, noTp, params, - EmitCInterface::Off); + createFuncCall(rewriter, op, name, noTp, params, EmitCInterface::Off); + genDelCOOCall(rewriter, op, eltType, coo); + rewriter.eraseOp(op); return success(); } }; diff --git a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorUtils.cpp @@ -400,7 +400,6 @@ assert(shape[r] == 0 || shape[r] == tensor->getSizes()[perm[r]]); n = new SparseTensorStorage(tensor->getSizes(), perm, sparsity, tensor); - delete tensor; } else { std::vector permsz(rank); for (uint64_t r = 0; r < rank; r++) { @@ -748,7 +747,6 @@ file.flush(); file.close(); assert(file.good()); - delete coo; } /// Initializes sparse tensor from an external COO-flavored format. @@ -780,17 +778,19 @@ #endif // Convert external format to internal COO. - auto *tensor = SparseTensorCOO::newSparseTensorCOO(rank, shape, perm, nse); + auto *coo = SparseTensorCOO::newSparseTensorCOO(rank, shape, perm, nse); std::vector idx(rank); for (uint64_t i = 0, base = 0; i < nse; i++) { for (uint64_t r = 0; r < rank; r++) idx[perm[r]] = indices[base + r]; - tensor->add(idx, values[i]); + coo->add(idx, values[i]); base += rank; } // Return sparse tensor storage format as opaque pointer. - return SparseTensorStorage::newSparseTensor( - rank, shape, perm, sparsity, tensor); + auto *tensor = SparseTensorStorage::newSparseTensor( + rank, shape, perm, sparsity, coo); + delete coo; + return tensor; } /// Converts a sparse tensor to an external COO-flavored format. @@ -847,28 +847,31 @@ #define CASE(p, i, v, P, I, V) \ if (ptrTp == (p) && indTp == (i) && valTp == (v)) { \ - SparseTensorCOO *tensor = nullptr; \ + SparseTensorCOO *coo = nullptr; \ if (action <= Action::kFromCOO) { \ if (action == Action::kFromFile) { \ char *filename = static_cast(ptr); \ - tensor = openSparseTensorCOO(filename, rank, shape, perm); \ + coo = openSparseTensorCOO(filename, rank, shape, perm); \ } else if (action == Action::kFromCOO) { \ - tensor = static_cast *>(ptr); \ + coo = static_cast *>(ptr); \ } else { \ assert(action == Action::kEmpty); \ } \ - return SparseTensorStorage::newSparseTensor(rank, shape, perm, \ - sparsity, tensor); \ + auto *tensor = SparseTensorStorage::newSparseTensor( \ + rank, shape, perm, sparsity, coo); \ + if (action == Action::kFromFile) \ + delete coo; \ + return tensor; \ } \ if (action == Action::kEmptyCOO) \ return SparseTensorCOO::newSparseTensorCOO(rank, shape, perm); \ - tensor = static_cast *>(ptr)->toCOO(perm); \ + coo = static_cast *>(ptr)->toCOO(perm); \ if (action == Action::kToIterator) { \ - tensor->startIterator(); \ + coo->startIterator(); \ } else { \ assert(action == Action::kToCOO); \ } \ - return tensor; \ + return coo; \ } #define CASE_SECSAME(p, v, P, V) CASE(p, p, v, P, P, V) @@ -924,10 +927,8 @@ const uint64_t isize = iref->sizes[0]; \ auto iter = static_cast *>(tensor); \ const Element *elem = iter->getNext(); \ - if (elem == nullptr) { \ - delete iter; \ + if (elem == nullptr) \ return false; \ - } \ for (uint64_t r = 0; r < isize; r++) \ indx[r] = elem->indices[r]; \ *value = elem->value; \ @@ -1208,6 +1209,19 @@ delete static_cast(tensor); } +/// Releases sparse tensor coordinate scheme. +#define IMPL_DELCOO(VNAME, V) \ + void delSparseTensorCOO##VNAME(void *coo) { \ + delete static_cast *>(coo); \ + } +IMPL_DELCOO(F64, double) +IMPL_DELCOO(F32, float) +IMPL_DELCOO(I64, int64_t) +IMPL_DELCOO(I32, int32_t) +IMPL_DELCOO(I16, int16_t) +IMPL_DELCOO(I8, int8_t) +#undef IMPL_DELCOO + /// Initializes sparse tensor from a COO-flavored format expressed using C-style /// data structures. The expected parameters are: /// diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir --- a/mlir/test/Dialect/SparseTensor/conversion.mlir +++ b/mlir/test/Dialect/SparseTensor/conversion.mlir @@ -65,13 +65,14 @@ // CHECK-LABEL: func @sparse_new1d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new1d(%arg0: !llvm.ptr) -> tensor<128xf64, #SparseVector> { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<128xf64, #SparseVector> @@ -80,13 +81,14 @@ // CHECK-LABEL: func @sparse_new2d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<2xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<2xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new2d(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor @@ -95,13 +97,14 @@ // CHECK-LABEL: func @sparse_new3d( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> !llvm.ptr +// CHECK-DAG: %[[FromFile:.*]] = arith.constant 1 : i32 // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<3xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<3xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<3xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromFile]], %[[A]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_new3d(%arg0: !llvm.ptr) -> tensor { %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor @@ -111,6 +114,7 @@ // CHECK-LABEL: func @sparse_init( // CHECK-SAME: %[[I:.*]]: index, // CHECK-SAME: %[[J:.*]]: index) -> !llvm.ptr +// CHECK-DAG: %[[Empty:.*]] = arith.constant 0 : i32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> @@ -122,7 +126,7 @@ // CHECK-DAG: memref.store %[[I]], %[[Q]][%[[C0]]] : memref<2xindex> // CHECK-DAG: memref.store %[[J]], %[[Q]][%[[C1]]] : memref<2xindex> // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_init(%arg0: index, %arg1: index) -> tensor { %0 = sparse_tensor.init [%arg0, %arg1] : tensor @@ -164,6 +168,8 @@ // CHECK-LABEL: func @sparse_convert_1d( // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr +// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[U:.*]] = tensor.dim %[[A]], %[[C0]] : tensor @@ -174,7 +180,7 @@ // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U]] step %[[C1]] { @@ -182,7 +188,8 @@ // CHECK: memref.store %[[I]], %[[M]][%[[C0]]] : memref<1xindex> // CHECK: call @addEltI32(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) +// CHECK: call @delSparseTensorCOOI32(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_1d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor @@ -191,14 +198,17 @@ // CHECK-LABEL: func @sparse_convert_1d_ss( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) +// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8> // CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex> // CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) +// CHECK: call @delSparseTensorCOOF32(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_1d_ss(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor @@ -207,6 +217,8 @@ // CHECK-LABEL: func @sparse_convert_2d( // CHECK-SAME: %[[A:.*]]: tensor<2x4xf64>) -> !llvm.ptr +// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<2xi8> @@ -216,7 +228,7 @@ // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %{{.*}} step %[[C1]] { @@ -227,7 +239,8 @@ // CHECK: call @addEltF64(%[[C]], %[[E]], %[[T]], %[[Z]]) // CHECK: } // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) +// CHECK: call @delSparseTensorCOOF64(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #SparseMatrix> { %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #SparseMatrix> @@ -235,6 +248,8 @@ } // CHECK-LABEL: func @sparse_constant() -> !llvm.ptr { +// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index @@ -245,7 +260,7 @@ // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<2xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<2xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[C2]] step %[[C1]] { @@ -254,7 +269,8 @@ // CHECK: %[[V:.*]] = tensor.extract %{{.*}}[%[[I]]] : tensor<2xf32> // CHECK: call @addEltF32(%{{.*}}, %[[V]], %[[N]], %{{.*}}) // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) +// CHECK: call @delSparseTensorCOOF32(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_constant() -> tensor<8x7xf32, #SparseMatrix>{ // Initialize a tensor. @@ -266,6 +282,8 @@ // CHECK-LABEL: func @sparse_convert_3d( // CHECK-SAME: %[[A:.*]]: tensor) -> !llvm.ptr +// CHECK-DAG: %[[EmptyCOO:.*]] = arith.constant 4 : i32 +// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32 // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index @@ -279,7 +297,7 @@ // CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<3xindex> to memref // CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<3xindex> to memref // CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr -// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[NP]]) +// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]]) // CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex> // CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref // CHECK: scf.for %[[I:.*]] = %[[C0]] to %[[U1]] step %[[C1]] { @@ -293,7 +311,8 @@ // CHECK: } // CHECK: } // CHECK: } -// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[C]]) +// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]]) +// CHECK: call @delSparseTensorCOOF64(%[[C]]) // CHECK: return %[[T]] : !llvm.ptr func @sparse_convert_3d(%arg0: tensor) -> tensor { %0 = sparse_tensor.convert %arg0 : tensor to tensor @@ -472,9 +491,11 @@ // CHECK-LABEL: func @sparse_out1( // CHECK-SAME: %[[A:.*]]: !llvm.ptr, // CHECK-SAME: %[[B:.*]]: !llvm.ptr) -// CHECK-DAG: %[[C:.*]] = arith.constant false -// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) -// CHECK: call @outSparseTensorF64(%[[T]], %[[B]], %[[C]]) : (!llvm.ptr, !llvm.ptr, i1) -> () +// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Sort:.*]] = arith.constant false +// CHECK: %[[COO:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) +// CHECK: call @outSparseTensorF64(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr, !llvm.ptr, i1) -> () +// CHECK: call @delSparseTensorCOOF64(%[[COO]]) // CHECK: return func @sparse_out1(%arg0: tensor, %arg1: !llvm.ptr) { sparse_tensor.out %arg0, %arg1 : tensor, !llvm.ptr @@ -484,9 +505,11 @@ // CHECK-LABEL: func @sparse_out2( // CHECK-SAME: %[[A:.*]]: !llvm.ptr, // CHECK-SAME: %[[B:.*]]: !llvm.ptr) -// CHECK-DAG: %[[C:.*]] = arith.constant true -// CHECK: %[[T:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[A]]) -// CHECK: call @outSparseTensorF32(%[[T]], %[[B]], %[[C]]) : (!llvm.ptr, !llvm.ptr, i1) -> () +// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32 +// CHECK-DAG: %[[Sort:.*]] = arith.constant true +// CHECK: %[[COO:.*]] = call @newSparseTensor(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]]) +// CHECK: call @outSparseTensorF32(%[[COO]], %[[B]], %[[Sort]]) : (!llvm.ptr, !llvm.ptr, i1) -> () +// CHECK: call @delSparseTensorCOOF32(%[[COO]]) // CHECK: return func @sparse_out2(%arg0: tensor, %arg1: !llvm.ptr) { sparse_tensor.out %arg0, %arg1 : tensor, !llvm.ptr