diff --git a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h --- a/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Utils/Merger.h @@ -32,6 +32,7 @@ // Unary operations. kAbsF, kAbsC, + kAbsI, kCeilF, kFloorF, kSqrtF, diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -40,6 +40,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: case kCeilF: case kFloorF: case kSqrtF: @@ -310,6 +311,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: case kCeilF: case kFloorF: case kSqrtF: @@ -398,6 +400,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: return "abs"; case kCeilF: return "ceil"; @@ -497,6 +500,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: case kCeilF: case kFloorF: case kSqrtF: @@ -630,6 +634,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: case kCeilF: case kFloorF: case kSqrtF: @@ -896,6 +901,8 @@ return addExp(kAbsF, e); if (isa(def)) return addExp(kAbsC, e); + if (isa(def)) + return addExp(kAbsI, e); if (isa(def)) return addExp(kCeilF, e); if (isa(def)) @@ -1079,6 +1086,8 @@ auto eltType = type.getElementType().cast(); return rewriter.create(loc, eltType, v0); } + case kAbsI: + return rewriter.create(loc, v0); case kCeilF: return rewriter.create(loc, v0); case kFloorF: diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_abs.mlir @@ -0,0 +1,110 @@ +// RUN: mlir-opt %s --sparse-compiler | \ +// RUN: mlir-cpu-runner \ +// RUN: -e entry -entry-point-result=void \ +// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ +// RUN: FileCheck %s + +#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> + +#trait_op = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a + affine_map<(i) -> (i)> // x (out) + ], + iterator_types = ["parallel"], + doc = "x(i) = OP a(i)" +} + +module { + func.func @sparse_absf(%arg0: tensor) + -> tensor { + %c0 = arith.constant 0 : index + %d = tensor.dim %arg0, %c0 : tensor + %xin = bufferization.alloc_tensor(%d) : tensor + %0 = linalg.generic #trait_op + ins(%arg0: tensor) + outs(%xin: tensor) { + ^bb0(%a: f64, %x: f64) : + %result = math.absf %a : f64 + linalg.yield %result : f64 + } -> tensor + return %0 : tensor + } + + func.func @sparse_absi(%arg0: tensor) + -> tensor { + %c0 = arith.constant 0 : index + %d = tensor.dim %arg0, %c0 : tensor + %xin = bufferization.alloc_tensor(%d) : tensor + %0 = linalg.generic #trait_op + ins(%arg0: tensor) + outs(%xin: tensor) { + ^bb0(%a: i32, %x: i32) : + %result = math.absi %a : i32 + linalg.yield %result : i32 + } -> tensor + return %0 : tensor + } + + // Driver method to call and verify sign kernel. + func.func @entry() { + %c0 = arith.constant 0 : index + %df = arith.constant 99.99 : f64 + %di = arith.constant 9999 : i32 + + %pnan = arith.constant 0x7FF0000001000000 : f64 + %nnan = arith.constant 0xFFF0000001000000 : f64 + %pinf = arith.constant 0x7FF0000000000000 : f64 + %ninf = arith.constant 0xFFF0000000000000 : f64 + + // Setup sparse vectors. + %v1 = arith.constant sparse< + [ [0], [3], [5], [11], [13], [17], [18], [20], [21], [28], [29], [31] ], + [ -1.5, 1.5, -10.2, 11.3, 1.0, -1.0, + 0x7FF0000001000000, // +NaN + 0xFFF0000001000000, // -NaN + 0x7FF0000000000000, // +Inf + 0xFFF0000000000000, // -Inf + -0.0, // -Zero + 0.0 // +Zero + ] + > : tensor<32xf64> + %v2 = arith.constant sparse< + [ [0], [3], [5], [11], [13], [17], [18], [21], [31] ], + [ -2147483648, -2147483647, -1000, -1, 0, + 1, 1000, 2147483646, 2147483647 + ] + > : tensor<32xi32> + %sv1 = sparse_tensor.convert %v1 + : tensor<32xf64> to tensor + %sv2 = sparse_tensor.convert %v2 + : tensor<32xi32> to tensor + + // Call abs kernels. + %0 = call @sparse_absf(%sv1) : (tensor) + -> tensor + + %1 = call @sparse_absi(%sv2) : (tensor) + -> tensor + + // + // Verify the results. + // + // CHECK: ( 1.5, 1.5, 10.2, 11.3, 1, 1, nan, nan, inf, inf, 0, 0, 99.99 ) + // CHECK-NEXT: ( -2147483648, 2147483647, 1000, 1, 0, 1, 1000, 2147483646, 2147483647, 9999, 9999, 9999, 9999 ) + // + %x = sparse_tensor.values %0 : tensor to memref + %y = sparse_tensor.values %1 : tensor to memref + %a = vector.transfer_read %x[%c0], %df: memref, vector<13xf64> + %b = vector.transfer_read %y[%c0], %di: memref, vector<13xi32> + vector.print %a : vector<13xf64> + vector.print %b : vector<13xi32> + + // Release the resources. + bufferization.dealloc_tensor %sv1 : tensor + bufferization.dealloc_tensor %sv2 : tensor + bufferization.dealloc_tensor %0 : tensor + bufferization.dealloc_tensor %1 : tensor + return + } +} diff --git a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp --- a/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp +++ b/mlir/unittests/Dialect/SparseTensor/MergerTest.cpp @@ -230,6 +230,7 @@ // Unary operations. case kAbsF: case kAbsC: + case kAbsI: case kCeilF: case kFloorF: case kSqrtF: