diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -26,7 +26,7 @@ //===----------------------------------------------------------------------===// def SparseTensor_NewOp : SparseTensor_Op<"new", [Pure]>, - Arguments<(ins AnyType:$source)>, + Arguments<(ins AnyType:$source, UnitAttr:$nonSymmetric)>, Results<(outs AnySparseTensor:$result)> { string summary = "Materializes a new sparse tensor from given source"; string description = [{ @@ -39,13 +39,21 @@ code. The operation is provided as an anchor that materializes a properly typed sparse tensor with inital contents into a computation. + The operator supports symmetric structures for 2D tensors. That is, if a + non-zero value is discovered at (i, j) where i!=j, then we add the same + value to (j, i). An optional attribute `nonSymmetric` can be used to + indicate that a 2D tensor is not symmetric. This can avoid the per + non-zero element check at runtime. Without this attribute, we alway Perform + the symmetric check for 2D tensors. + Example: ```mlir sparse_tensor.new %source : !Source to tensor<1024x1024xf64, #CSR> ``` }]; - let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)"; + let assemblyFormat = "(`non_symmetric` $nonSymmetric^)? $source attr-dict" + "`:` type($source) `to` type($result)"; } def SparseTensor_ConvertOp : SparseTensor_Op<"convert", diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp @@ -913,7 +913,7 @@ .getResult(0); Value symmetric; // We assume only rank 2 tensors may have the isSymmetric flag set. - if (rank == 2) { + if (rank == 2 && !op.getNonSymmetric()) { symmetric = createFuncCall(rewriter, loc, "getSparseTensorReaderIsSymmetric", {rewriter.getI1Type()}, {reader}, EmitCInterface::Off) diff --git a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir --- a/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir @@ -44,6 +44,38 @@ return %0 : tensor } +// CHECK-LABEL: func.func @sparse_new_nonsymmetric( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) -> tensor> { +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK: %[[R:.*]] = call @createSparseTensorReader(%[[A]]) +// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref +// CHECK: call @getSparseTensorReaderDimSizes(%[[R]], %[[DS]]) +// CHECK: %[[D0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] +// CHECK: %[[D1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] +// CHECK: %[[T:.*]] = bufferization.alloc_tensor(%[[D0]], %[[D1]]) +// CHECK: %[[N:.*]] = call @getSparseTensorReaderNNZ(%[[R]]) +// CHECK: %[[VB:.*]] = memref.alloca() +// CHECK: %[[T2:.*]] = scf.for %{{.*}} = %[[C0]] to %[[N]] step %[[C1]] iter_args(%[[A2:.*]] = %[[T]]) +// CHECK: func.call @getSparseTensorReaderNextF32(%[[R]], %[[DS]], %[[VB]]) +// CHECK: %[[E0:.*]] = memref.load %[[DS]]{{\[}}%[[C0]]] +// CHECK: %[[E1:.*]] = memref.load %[[DS]]{{\[}}%[[C1]]] +// CHECK: %[[V:.*]] = memref.load %[[VB]][] +// CHECK: %[[T1:.*]] = sparse_tensor.insert %[[V]] into %[[A2]]{{\[}}%[[E0]], %[[E1]]] +// CHECK: scf.yield %[[T1]] +// CHECK: } +// CHECK: call @delSparseTensorReader(%[[R]]) +// CHECK: %[[T3:.*]] = sparse_tensor.load %[[T2]] hasInserts +// CHECK: %[[R:.*]] = sparse_tensor.convert %[[T3]] +// CHECK: bufferization.dealloc_tensor %[[T3]] +// CHECK: return %[[R]] +func.func @sparse_new_nonsymmetric(%arg0: !llvm.ptr) -> tensor { + %0 = sparse_tensor.new non_symmetric %arg0 : !llvm.ptr to tensor + return %0 : tensor +} + + // CHECK-LABEL: func.func @sparse_out( // CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ] }>>, // CHECK-SAME: %[[B:.*]]: !llvm.ptr) { diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -13,6 +13,19 @@ // ----- +#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> + +// CHECK-LABEL: func @sparse_new_nonsymmetric( +// CHECK-SAME: %[[A:.*]]: !llvm.ptr) +// CHECK: %[[T:.*]] = sparse_tensor.new non_symmetric %[[A]] : !llvm.ptr to tensor +// CHECK: return %[[T]] : tensor +func.func @sparse_new_nonsymmetric(%arg0: !llvm.ptr) -> tensor { + %0 = sparse_tensor.new non_symmetric %arg0 : !llvm.ptr to tensor + return %0 : tensor +} + +// ----- + #SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> // CHECK-LABEL: func @sparse_dealloc(