diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -27,7 +27,7 @@ //===----------------------------------------------------------------------===// def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>, - Arguments<(ins AnyType:$source, UnitAttr:$expandSymmetry)>, + Arguments<(ins AnyType:$source)>, Results<(outs AnySparseTensor:$result)> { string summary = "Materializes a new sparse tensor from given source"; string description = [{ @@ -40,22 +40,14 @@ code. The operation is provided as an anchor that materializes a properly typed sparse tensor with inital contents into a computation. - An optional attribute `expandSymmetry` can be used to extend this operation - to make symmetry in external formats explicit in the storage. That is, when - the attribute presents and a non-zero value is discovered at (i, j) where - i!=j, we add the same value to (j, i). This claims more storage than a pure - symmetric storage, and thus may cause a bad performance hit. True symmetric - storage is planned for the future. - Example: ```mlir sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR> ``` }]; - let assemblyFormat = "(`expand_symmetry` $expandSymmetry^)? $source attr-dict" + let assemblyFormat = "$source attr-dict" "`:` type($source) `to` type($result)"; - let hasVerifier = 1; } def SparseTensor_PackOp : SparseTensor_Op<"pack", [Pure]>, diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/File.h @@ -164,13 +164,6 @@ return valueKind_ == ValueKind::kPattern; } - /// Gets the MME "symmetric" property setting. Is only valid after - /// parsing the header. - bool isSymmetric() const { - assert(isValid() && "Attempt to isSymmetric() before readHeader()"); - return isSymmetric_; - } - /// Gets the rank of the tensor. Is only valid after parsing the header. uint64_t getRank() const { assert(isValid() && "Attempt to getRank() before readHeader()"); @@ -276,14 +269,14 @@ } /// The internal implementation of `readCOO`. We template over - /// `IsPattern` and `IsSymmetric` in order to perform LICM without - /// needing to duplicate the source code. + /// `IsPattern` in order to perform LICM without needing to duplicate the + /// source code. // // TODO: We currently take the `dim2lvl` argument as a `PermutationRef` // since that's what `readCOO` creates. Once we update `readCOO` to // functionalize the mapping, then this helper will just take that // same function. - template + template void readCOOLoop(uint64_t lvlRank, detail::PermutationRef dim2lvl, SparseTensorCOO *lvlCOO); @@ -300,7 +293,6 @@ const char *const filename; FILE *file = nullptr; ValueKind valueKind_ = ValueKind::kInvalid; - bool isSymmetric_ = false; uint64_t idata[512]; char line[kColWidth]; }; @@ -323,21 +315,16 @@ auto *lvlCOO = new SparseTensorCOO(lvlRank, lvlSizes, getNNZ()); // Do some manual LICM, to avoid assertions in the for-loop. const bool IsPattern = isPattern(); - const bool IsSymmetric = (isSymmetric() && getRank() == 2); - if (IsPattern && IsSymmetric) - readCOOLoop(lvlRank, d2l, lvlCOO); - else if (IsPattern) - readCOOLoop(lvlRank, d2l, lvlCOO); - else if (IsSymmetric) - readCOOLoop(lvlRank, d2l, lvlCOO); + if (IsPattern) + readCOOLoop(lvlRank, d2l, lvlCOO); else - readCOOLoop(lvlRank, d2l, lvlCOO); + readCOOLoop(lvlRank, d2l, lvlCOO); // Close the file and return the COO. closeFile(); return lvlCOO; } -template +template void SparseTensorReader::readCOOLoop(uint64_t lvlRank, detail::PermutationRef dim2lvl, SparseTensorCOO *lvlCOO) { @@ -353,16 +340,6 @@ dim2lvl.pushforward(dimRank, dimInd.data(), lvlInd.data()); // TODO: lvlCOO->add(lvlInd, value); - // We currently chose to deal with symmetric matrices by fully - // constructing them. In the future, we may want to make symmetry - // implicit for storage reasons. - if constexpr (IsSymmetric) - if (dimInd[0] != dimInd[1]) { - // Must recompute `lvlInd`, since arbitrary maps don't preserve swap. - std::swap(dimInd[0], dimInd[1]); - dim2lvl.pushforward(dimRank, dimInd.data(), lvlInd.data()); - lvlCOO->add(lvlInd, value); - } } } diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h --- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h +++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h @@ -248,9 +248,6 @@ /// Returns the rank of the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderRank(void *p); -/// Returns the is_symmetric bit for the sparse tensor being read. -MLIR_CRUNNERUTILS_EXPORT bool getSparseTensorReaderIsSymmetric(void *p); - /// Returns the number of non-zero values for the sparse tensor being read. MLIR_CRUNNERUTILS_EXPORT index_type getSparseTensorReaderNNZ(void *p); diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -611,12 +611,6 @@ return success(); } -LogicalResult NewOp::verify() { - if (getExpandSymmetry() && getTypeRank(getResult()) != 2) - return emitOpError("expand_symmetry can only be used for 2D tensors"); - return success(); -} - static LogicalResult verifyPackUnPack(Operation *op, TensorType cooTp, TensorType dataTp, TensorType idxTp) { if (!isUniqueCOOType(cooTp)) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -1045,16 +1045,6 @@ /*sizeHint=*/nnz, Attribute()) .getResult(); - // The verifier ensures only 2D tensors can have the expandSymmetry flag. - Value symmetric; - if (rank == 2 && op.getExpandSymmetry()) { - symmetric = - createFuncCall(rewriter, loc, "getSparseTensorReaderIsSymmetric", - {rewriter.getI1Type()}, {reader}, EmitCInterface::Off) - .getResult(0); - } else { - symmetric = Value(); - } Type eltTp = dstTp.getElementType(); Value value = genAllocaScalar(rewriter, loc, eltTp); scf::ForOp forOp = rewriter.create(loc, c0, nnz, c1, @@ -1074,21 +1064,6 @@ Value v = rewriter.create(loc, value); Value t = rewriter.create(loc, v, forOp.getRegionIterArg(0), indicesArray); - if (symmetric) { - Value eq = rewriter.create( - loc, arith::CmpIPredicate::ne, indicesArray[0], indicesArray[1]); - Value cond = rewriter.create(loc, symmetric, eq); - scf::IfOp ifOp = - rewriter.create(loc, t.getType(), cond, /*else*/ true); - rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); - rewriter.create( - loc, Value(rewriter.create( - loc, v, t, ValueRange{indicesArray[1], indicesArray[0]}))); - rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front()); - rewriter.create(loc, t); - t = ifOp.getResult(0); - rewriter.setInsertionPointAfter(ifOp); - } rewriter.create(loc, ArrayRef(t)); rewriter.setInsertionPointAfter(forOp); // Link SSA chain. diff --git a/mlir/lib/ExecutionEngine/SparseTensor/File.cpp b/mlir/lib/ExecutionEngine/SparseTensor/File.cpp --- a/mlir/lib/ExecutionEngine/SparseTensor/File.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensor/File.cpp @@ -149,13 +149,15 @@ else MLIR_SPARSETENSOR_FATAL("Unexpected header field value in %s\n", filename); // Set properties. - isSymmetric_ = streq(symmetry, "symmetric"); + bool isSymmetric = streq(symmetry, "symmetric"); // Make sure this is a general sparse matrix. if (strne(header, "%%matrixmarket") || strne(object, "matrix") || strne(format, "coordinate") || - (strne(symmetry, "general") && !isSymmetric_)) + (strne(symmetry, "general") && !isSymmetric)) MLIR_SPARSETENSOR_FATAL("Cannot find a general sparse matrix in %s\n", filename); + if (isSymmetric) + MLIR_SPARSETENSOR_FATAL("Symmetric MTX is not supported yet"); // Skip comments. while (true) { readLine(); diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp --- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp +++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp @@ -877,10 +877,6 @@ return static_cast(p)->getRank(); } -bool getSparseTensorReaderIsSymmetric(void *p) { - return static_cast(p)->isSymmetric(); -} - index_type getSparseTensorReaderNNZ(void *p) { return static_cast(p)->getNNZ(); } diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -374,16 +374,6 @@ // ----- -#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> - -func.func @sparse_new(%arg0: !llvm.ptr) { - // expected-error@+1 {{expand_symmetry can only be used for 2D tensors}} - %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr to tensor<128xf64, #SparseVector> - return -} - -// ----- - func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> { // expected-error@+1 {{unexpected type in convert}} %0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32> diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -10,45 +10,6 @@ dimOrdering = affine_map<(i, j) -> (j, i)> }> -// CHECK-LABEL: func.func @sparse_new_symmetry( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { -// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index -// CHECK: %[[R:.*]] = call @createSparseTensorReader(%[[A]]) -// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref -// CHECK: call @copySparseTensorReaderDimSizes(%[[R]], %[[DS]]) -// CHECK: %[[D0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] -// CHECK: %[[D1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] -// CHECK: %[[N:.*]] = call @getSparseTensorReaderNNZ(%[[R]]) -// CHECK: %[[T:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]]) size_hint=%[[N]] -// CHECK: %[[S:.*]] = call @getSparseTensorReaderIsSymmetric(%[[R]]) -// CHECK: %[[VB:.*]] = memref.alloca() -// CHECK: %[[T2:.*]] = scf.for %{{.*}} = %[[C0]] to %[[N]] step %[[C1]] iter_args(%[[A2:.*]] = %[[T]]) -// CHECK: func.call @getSparseTensorReaderNextF32(%[[R]], %[[DS]], %[[VB]]) -// CHECK: %[[E0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] -// CHECK: %[[E1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] -// CHECK: %[[V:.*]] = memref.load %[[VB]][] -// CHECK: %[[T1:.*]] = sparse_tensor.insert %[[V]] into %[[A2]]{{\[}}%[[E0]], %[[E1]]] -// CHECK: %[[NE:.*]] = arith.cmpi ne, %[[E0]], %[[E1]] -// CHECK: %[[COND:.*]] = arith.andi %[[S]], %[[NE]] -// CHECK: %[[T3:.*]] = scf.if %[[COND]] -// CHECK: %[[T4:.*]] = sparse_tensor.insert %[[V]] into %[[T1]]{{\[}}%[[E1]], %[[E0]]] -// CHECK: scf.yield %[[T4]] -// CHECK: else -// CHECK: scf.yield %[[T1]] -// CHECK: scf.yield %[[T3]] -// CHECK: } -// CHECK: call @delSparseTensorReader(%[[R]]) -// CHECK: %[[T5:.*]] = sparse_tensor.load %[[T2]] hasInserts -// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T5]] -// CHECK: bufferization.dealloc_tensor %[[T5]] -// CHECK: return %[[R]] -func.func @sparse_new_symmetry(%arg0: !llvm.ptr) -> tensor { - %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr to tensor - return %0 : tensor -} - // CHECK-LABEL: func.func @sparse_new( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { // CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -44,19 +44,6 @@ // ----- -#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> - -// CHECK-LABEL: func @sparse_new_symmetry( -// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -// CHECK: %[[T:.*]] = sparse_tensor.new expand_symmetry %[[A]] : !llvm.ptr to tensor -// CHECK: return %[[T]] : tensor -func.func @sparse_new_symmetry(%arg0: !llvm.ptr) -> tensor { - %0 = sparse_tensor.new expand_symmetry %arg0 : !llvm.ptr to tensor - return %0 : tensor -} - -// ----- - #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> // CHECK-LABEL: func @sparse_dealloc( diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_file_io.mlir @@ -40,7 +40,6 @@ func.func private @delSparseTensorReader(!TensorReader) -> () func.func private @getSparseTensorReaderRank(!TensorReader) -> (index) func.func private @getSparseTensorReaderNNZ(!TensorReader) -> (index) - func.func private @getSparseTensorReaderIsSymmetric(!TensorReader) -> (i1) func.func private @copySparseTensorReaderDimSizes(!TensorReader, memref) -> () attributes { llvm.emit_c_interface } func.func private @getSparseTensorReaderNextF32(!TensorReader, @@ -109,9 +108,6 @@ vector.print %rank : index %nnz = call @getSparseTensorReaderNNZ(%tensor) : (!TensorReader) -> index vector.print %nnz : index - %symmetric = call @getSparseTensorReaderIsSymmetric(%tensor) - : (!TensorReader) -> i1 - vector.print %symmetric : i1 %dimSizes = memref.alloc(%rank) : memref func.call @copySparseTensorReaderDimSizes(%tensor, %dimSizes) : (!TensorReader, memref) -> () @@ -182,7 +178,6 @@ // print the data. // CHECK: 2 // CHECK: 17 - // CHECK: 0 // CHECK: ( 4, 256, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ) // CHECK: ( 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 ) // CHECK: ( 0, 126, 127, 254, 1, 253, 2, 0, 1, 3, 98, 126, 127, 128, 249, 253, 255 ) diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -1,6 +1,6 @@ // DEFINE: %{option} = enable-runtime-library=true // DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} -// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" \ +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ @@ -19,7 +19,7 @@ // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" -// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric.mtx" \ +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ // REDEFINE: %lli \ // REDEFINE: --entry-function=entry_lli \ // REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ @@ -79,7 +79,7 @@ // Read the sparse matrix from file, construct sparse storage. %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) - %a = sparse_tensor.new expand_symmetry %fileName : !Filename to tensor + %a = sparse_tensor.new %fileName : !Filename to tensor // Call the kernel. %0 = call @kernel_sum_reduce(%a, %x) @@ -87,7 +87,7 @@ // Print the result for verification. // - // CHECK: 30.2 + // CHECK: 28.2 // %v = tensor.extract %0[] : tensor vector.print %v : f64 diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir @@ -1,6 +1,6 @@ // DEFINE: %{option} = enable-runtime-library=true // DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} -// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_complex.mtx" \ // DEFINE: mlir-cpu-runner \ // DEFINE: -e entry -entry-point-result=void \ // DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \ @@ -19,7 +19,7 @@ // Do the same run, but now with direct IR generation and, if available, VLA // vectorization. // REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" -// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_symmetric_complex.mtx" \ +// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test_complex.mtx" \ // REDEFINE: %lli \ // REDEFINE: --entry-function=entry_lli \ // REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ @@ -82,7 +82,7 @@ // Read the sparse matrix from file, construct sparse storage. %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) - %a = sparse_tensor.new expand_symmetry %fileName : !Filename to tensor, #SparseMatrix> + %a = sparse_tensor.new %fileName : !Filename to tensor, #SparseMatrix> // Call the kernel. %0 = call @kernel_sum_reduce(%a, %x) @@ -90,8 +90,8 @@ // Print the result for verification. // - // CHECK: 30.2 - // CHECK-NEXT: 22.2 + // CHECK: 24.1 + // CHECK-NEXT: 16.1 // %v = tensor.extract %0[] : tensor> %real = complex.re %v : complex diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx b/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx --- a/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/data/pwtk.mtx @@ -1,11 +1,14 @@ -%%MatrixMarket matrix coordinate real symmetric +%%MatrixMarket matrix coordinate real general %------------------------------------------------------------------------------- % To download a matrix for a real world application % https://math.nist.gov/MatrixMarket/ %------------------------------------------------------------------------------- -3 3 5 +3 3 8 1 1 37423.0879671 +1 2 -22.4050781162 +1 3 -300.654980157 2 1 -22.4050781162 +2 3 -.00869762944058 3 1 -300.654980157 3 2 -.00869762944058 3 3 805225.750212 diff --git a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py b/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py --- a/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py +++ b/mlir/test/Integration/Dialect/SparseTensor/taco/unit_test_tensor_io.py @@ -21,18 +21,12 @@ _FORMAT = mlir_pytaco.Format([_COMPRESSED, _COMPRESSED]) -_MTX_DATA_TEMPLATE = Template( - """%%MatrixMarket matrix coordinate real $general_or_symmetry +_MTX_DATA = """%%MatrixMarket matrix coordinate real general 3 3 3 3 1 3 1 2 2 3 2 4 -""") - - -def _get_mtx_data(value): - mtx_data = _MTX_DATA_TEMPLATE - return mtx_data.substitute(general_or_symmetry=value) +""" # CHECK-LABEL: test_read_mtx_matrix_general @@ -41,7 +35,7 @@ with tempfile.TemporaryDirectory() as test_dir: file_name = os.path.join(test_dir, "data.mtx") with open(file_name, "w") as file: - file.write(_get_mtx_data("general")) + file.write(_MTX_DATA) a = mlir_pytaco_io.read(file_name, _FORMAT) passed = 0 # The value of a is stored as an MLIR sparse tensor. @@ -55,29 +49,6 @@ print(passed) -# CHECK-LABEL: test_read_mtx_matrix_symmetry -@testing_utils.run_test -def test_read_mtx_matrix_symmetry(): - with tempfile.TemporaryDirectory() as test_dir: - file_name = os.path.join(test_dir, "data.mtx") - with open(file_name, "w") as file: - file.write(_get_mtx_data("symmetric")) - a = mlir_pytaco_io.read(file_name, _FORMAT) - passed = 0 - # The value of a is stored as an MLIR sparse tensor. - passed += (not a.is_unpacked()) - a.unpack() - passed += (a.is_unpacked()) - coords, values = a.get_coordinates_and_values() - print(coords) - print(values) - passed += np.array_equal(coords, - [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]) - passed += np.allclose(values, [2.0, 3.0, 2.0, 4.0, 3.0, 4.0]) - # CHECK: 4 - print(passed) - - _TNS_DATA = """2 3 3 2 3 1 3 diff --git a/mlir/test/Integration/data/test_symmetric_complex.mtx b/mlir/test/Integration/data/test_complex.mtx rename from mlir/test/Integration/data/test_symmetric_complex.mtx rename to mlir/test/Integration/data/test_complex.mtx --- a/mlir/test/Integration/data/test_symmetric_complex.mtx +++ b/mlir/test/Integration/data/test_complex.mtx @@ -1,4 +1,4 @@ -%%MatrixMarket matrix coordinate complex symmetric +%%MatrixMarket matrix coordinate complex general % % This is a test sparse matrix in Matrix Market Exchange Format. % see https://math.nist.gov/MatrixMarket diff --git a/mlir/test/Integration/data/test_symmetric.mtx b/mlir/test/Integration/data/test_symmetric.mtx deleted file mode 100644 --- a/mlir/test/Integration/data/test_symmetric.mtx +++ /dev/null @@ -1,13 +0,0 @@ -%%MatrixMarket matrix coordinate real symmetric -% -% This is a test sparse matrix in Matrix Market Exchange Format. -% see https://math.nist.gov/MatrixMarket -% -5 5 7 -1 1 5.0 -1 3 4.1 -2 2 3.0 -2 4 2.0 -3 3 1.0 -4 4 4.0 -5 5 5.0