diff --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp --- a/flang/lib/Lower/IntrinsicCall.cpp +++ b/flang/lib/Lower/IntrinsicCall.cpp @@ -1200,10 +1200,10 @@ /// See https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gfortran/\ /// Intrinsic-Procedures.html for a reference. static constexpr MathOperation mathOperations[] = { - {"abs", "fabsf", genF32F32FuncType, genMathOp}, - {"abs", "fabs", genF64F64FuncType, genMathOp}, + {"abs", "fabsf", genF32F32FuncType, genMathOp}, + {"abs", "fabs", genF64F64FuncType, genMathOp}, {"abs", "llvm.fabs.f128", genF128F128FuncType, - genMathOp}, + genMathOp}, // llvm.trunc behaves the same way as libm's trunc. {"aint", "llvm.trunc.f32", genF32F32FuncType, genLibCall}, {"aint", "llvm.trunc.f64", genF64F64FuncType, genLibCall}, diff --git a/flang/test/Intrinsics/math-codegen.fir b/flang/test/Intrinsics/math-codegen.fir --- a/flang/test/Intrinsics/math-codegen.fir +++ b/flang/test/Intrinsics/math-codegen.fir @@ -21,7 +21,7 @@ func.func @_QPtest_real4(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f32 { %0 = fir.alloca f32 {bindc_name = "test_real4", uniq_name = "_QFtest_real4Etest_real4"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f32 + %2 = math.absf %1 : f32 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f32 @@ -29,7 +29,7 @@ func.func @_QPtest_real8(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f64 { %0 = fir.alloca f64 {bindc_name = "test_real8", uniq_name = "_QFtest_real8Etest_real8"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f64 + %2 = math.absf %1 : f64 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f64 @@ -37,7 +37,7 @@ func.func @_QPtest_real16(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f128 { %0 = fir.alloca f128 {bindc_name = "test_real16", uniq_name = "_QFtest_real16Etest_real16"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f128 + %2 = math.absf %1 : f128 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f128 @@ -93,7 +93,7 @@ func.func @_QPtest_real4(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f32 { %0 = fir.alloca f32 {bindc_name = "test_real4", uniq_name = "_QFtest_real4Etest_real4"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f32 + %2 = math.absf %1 : f32 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f32 @@ -101,7 +101,7 @@ func.func @_QPtest_real8(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f64 { %0 = fir.alloca f64 {bindc_name = "test_real8", uniq_name = "_QFtest_real8Etest_real8"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f64 + %2 = math.absf %1 : f64 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f64 @@ -109,7 +109,7 @@ func.func @_QPtest_real16(%arg0: !fir.ref {fir.bindc_name = "x"}) -> f128 { %0 = fir.alloca f128 {bindc_name = "test_real16", uniq_name = "_QFtest_real16Etest_real16"} %1 = fir.load %arg0 : !fir.ref - %2 = math.abs %1 : f128 + %2 = math.absf %1 : f128 fir.store %2 to %0 : !fir.ref %3 = fir.load %0 : !fir.ref return %3 : f128 @@ -1934,4 +1934,3 @@ } func.func private @tanf(f32) -> f32 func.func private @tan(f64) -> f64 - diff --git a/flang/test/Lower/Intrinsics/abs.f90 b/flang/test/Lower/Intrinsics/abs.f90 --- a/flang/test/Lower/Intrinsics/abs.f90 +++ b/flang/test/Lower/Intrinsics/abs.f90 @@ -36,7 +36,7 @@ subroutine abs_testh(a, b) ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref ! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (f16) -> f32 -! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32 +! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2_1]] : f32 ! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> f16 ! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref ! CHECK: return @@ -49,7 +49,7 @@ subroutine abs_testb(a, b) ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref ! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (bf16) -> f32 -! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32 +! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2_1]] : f32 ! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> bf16 ! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref ! CHECK: return @@ -61,7 +61,7 @@ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref{{.*}}, %[[VAL_1:.*]]: !fir.ref{{.*}}) { subroutine abs_testr(a, b) ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref -! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f32 +! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f32 ! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref ! CHECK: return real :: a, b @@ -72,7 +72,7 @@ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref{{.*}}, %[[VAL_1:.*]]: !fir.ref{{.*}}) { subroutine abs_testd(a, b) ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref -! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f64 +! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f64 ! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref ! CHECK: return real(kind=8) :: a, b @@ -83,7 +83,7 @@ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref{{.*}}, %[[VAL_1:.*]]: !fir.ref{{.*}}) { subroutine abs_testr16(a, b) ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref -! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f128 +! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f128 ! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref ! CHECK: return real(kind=16) :: a, b diff --git a/flang/test/Lower/array-expression.f90 b/flang/test/Lower/array-expression.f90 --- a/flang/test/Lower/array-expression.f90 +++ b/flang/test/Lower/array-expression.f90 @@ -116,15 +116,15 @@ ! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_3]] : !fir.ref ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i32) -> i64 ! CHECK: %[[VAL_7A:.*]] = fir.convert %[[VAL_6]] : (i64) -> index -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_7A]], %[[C0]] : index -! CHECK: %[[VAL_7:.*]] = arith.select %[[CMP]], %[[VAL_7A]], %[[C0]] : index +! CHECK: %[[C0:.*]] = arith.constant 0 : index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_7A]], %[[C0]] : index +! CHECK: %[[VAL_7:.*]] = arith.select %[[CMP]], %[[VAL_7A]], %[[C0]] : index ! CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_4]] : !fir.ref ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i32) -> i64 ! CHECK: %[[VAL_10A:.*]] = fir.convert %[[VAL_9]] : (i64) -> index -! CHECK: %[[C0_2:.*]] = arith.constant 0 : index -! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_10A]], %[[C0_2]] : index -! CHECK: %[[VAL_10:.*]] = arith.select %[[CMP_2]], %[[VAL_10A]], %[[C0_2]] : index +! CHECK: %[[C0_2:.*]] = arith.constant 0 : index +! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_10A]], %[[C0_2]] : index +! CHECK: %[[VAL_10:.*]] = arith.select %[[CMP_2]], %[[VAL_10A]], %[[C0_2]] : index ! CHECK: %[[VAL_11:.*]] = arith.constant 3 : i64 ! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i64) -> index ! CHECK: %[[VAL_13:.*]] = arith.constant 4 : i64 @@ -255,15 +255,15 @@ ! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref ! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i32) -> i64 ! CHECK: %[[VAL_5A:.*]] = fir.convert %[[VAL_4]] : (i64) -> index -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_5A]], %[[C0]] : index -! CHECK: %[[VAL_5:.*]] = arith.select %[[CMP]], %[[VAL_5A]], %[[C0]] : index +! CHECK: %[[C0:.*]] = arith.constant 0 : index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_5A]], %[[C0]] : index +! CHECK: %[[VAL_5:.*]] = arith.select %[[CMP]], %[[VAL_5A]], %[[C0]] : index ! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_2]] : !fir.ref ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i32) -> i64 ! CHECK: %[[VAL_8A:.*]] = fir.convert %[[VAL_7]] : (i64) -> index -! CHECK: %[[C0_2:.*]] = arith.constant 0 : index -! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_8A]], %[[C0_2]] : index -! CHECK: %[[VAL_8:.*]] = arith.select %[[CMP_2]], %[[VAL_8A]], %[[C0_2]] : index +! CHECK: %[[C0_2:.*]] = arith.constant 0 : index +! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_8A]], %[[C0_2]] : index +! CHECK: %[[VAL_8:.*]] = arith.select %[[CMP_2]], %[[VAL_8A]], %[[C0_2]] : index ! CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_10:.*]] = fir.array_load %[[VAL_0]](%[[VAL_9]]) : (!fir.ref>, !fir.shape<1>) -> !fir.array ! CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1> @@ -471,7 +471,7 @@ real :: a(100), b(100) ! CHECK: %[[loop:.*]] = fir.do_loop %[[i:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[bth:.*]] = %[[barr]]) -> (!fir.array<100xf32>) { ! CHECK: %[[val:.*]] = fir.array_fetch %[[aarr]], %[[i]] : (!fir.array<100xf32>, index) -> f32 - ! CHECK: %[[fres:.*]] = math.abs %[[val]] : f32 + ! CHECK: %[[fres:.*]] = math.absf %[[val]] : f32 ! CHECK: %[[res:.*]] = fir.array_update %[[bth]], %[[fres]], %[[i]] : (!fir.array<100xf32>, f32, index) -> !fir.array<100xf32> ! CHECK: fir.result %[[res]] : !fir.array<100xf32> ! CHECK: fir.array_merge_store %[[barr]], %[[loop]] to %[[b]] @@ -1065,9 +1065,9 @@ ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_3]] : !fir.ref ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i32) -> i64 ! CHECK: %[[VAL_15A:.*]] = fir.convert %[[VAL_14]] : (i64) -> index -! CHECK: %[[C0:.*]] = arith.constant 0 : index -! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_15A]], %[[C0]] : index -! CHECK: %[[VAL_15:.*]] = arith.select %[[CMP]], %[[VAL_15A]], %[[C0]] : index +! CHECK: %[[C0:.*]] = arith.constant 0 : index +! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_15A]], %[[C0]] : index +! CHECK: %[[VAL_15:.*]] = arith.select %[[CMP]], %[[VAL_15A]], %[[C0]] : index ! CHECK: %[[VAL_16:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1> ! CHECK: %[[VAL_17:.*]] = fir.array_load %[[VAL_9]](%[[VAL_16]]) typeparams %[[VAL_8]] : (!fir.ref>>, !fir.shape<1>, i32) -> !fir.array<70x!fir.char<1,?>> ! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i64 diff --git a/flang/test/Lower/math-lowering.f90 b/flang/test/Lower/math-lowering.f90 --- a/flang/test/Lower/math-lowering.f90 +++ b/flang/test/Lower/math-lowering.f90 @@ -14,8 +14,8 @@ end function ! ALL-LABEL: @_QPtest_real4 -! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f32 -! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f32 +! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f32 +! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f32 ! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @fabsf({{%[A-Za-z0-9._]+}}) : (f32) -> f32 function test_real8(x) @@ -24,8 +24,8 @@ end function ! ALL-LABEL: @_QPtest_real8 -! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f64 -! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f64 +! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f64 +! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f64 ! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @fabs({{%[A-Za-z0-9._]+}}) : (f64) -> f64 function test_real16(x) @@ -33,8 +33,8 @@ test_real16 = abs(x) end function ! ALL-LABEL: @_QPtest_real16 -! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f128 -! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f128 +! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f128 +! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f128 ! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @llvm.fabs.f128({{%[A-Za-z0-9._]+}}) : (f128) -> f128 function test_complex4(c) diff --git a/mlir/docs/Bindings/Python.md b/mlir/docs/Bindings/Python.md --- a/mlir/docs/Bindings/Python.md +++ b/mlir/docs/Bindings/Python.md @@ -940,7 +940,7 @@ attributes: * `OPERATION_NAME` attribute with the `str` fully qualified operation name - (i.e. `math.abs`). + (i.e. `math.absf`). * An `__init__` method for the *default builder* if one is defined or inferred for the operation. * `@property` getter for each operand or result (using an auto-generated name @@ -1170,4 +1170,3 @@ utilities to connect to the rest of Python API. The bindings can be located in a separate pybind11 module or in the same module as attributes and types, and loaded along with the dialect. - diff --git a/mlir/include/mlir/Dialect/Math/IR/MathBase.td b/mlir/include/mlir/Dialect/Math/IR/MathBase.td --- a/mlir/include/mlir/Dialect/Math/IR/MathBase.td +++ b/mlir/include/mlir/Dialect/Math/IR/MathBase.td @@ -20,13 +20,13 @@ ```mlir // Scalar absolute value. - %a = math.abs %b : f64 - + %a = math.absf %b : f64 + // Vector elementwise absolute value. - %f = math.abs %g : vector<4xf32> + %f = math.absf %g : vector<4xf32> // Tensor elementwise absolute value. - %x = math.abs %y : tensor<4x?xf8> + %x = math.absf %y : tensor<4x?xf8> ``` }]; let hasConstantMaterializer = 1; diff --git a/mlir/include/mlir/Dialect/Math/IR/MathOps.td b/mlir/include/mlir/Dialect/Math/IR/MathOps.td --- a/mlir/include/mlir/Dialect/Math/IR/MathOps.td +++ b/mlir/include/mlir/Dialect/Math/IR/MathOps.td @@ -66,21 +66,42 @@ } //===----------------------------------------------------------------------===// -// AbsOp +// AbsFOp //===----------------------------------------------------------------------===// -def Math_AbsOp : Math_FloatUnaryOp<"abs"> { +def Math_AbsFOp : Math_FloatUnaryOp<"absf"> { let summary = "floating point absolute-value operation"; let description = [{ - The `abs` operation computes the absolute value. It takes one operand of - floating point type (i.e., scalar, tensor or vector) and returns one result of - the same type. + The `absf` operation computes the absolute value. It takes one operand of + floating point type (i.e., scalar, tensor or vector) and returns one result + of the same type. Example: ```mlir // Scalar absolute value. - %a = math.abs %b : f64 + %a = math.absf %b : f64 + ``` + }]; + let hasFolder = 1; +} + +//===----------------------------------------------------------------------===// +// AbsIOp +//===----------------------------------------------------------------------===// + +def Math_AbsIOp : Math_IntegerUnaryOp<"absi"> { + let summary = "integer absolute-value operation"; + let description = [{ + The `absi` operation computes the absolute value. It takes one operand of + integer type (i.e., scalar, tensor or vector) and returns one result of the + same type. + + Example: + + ```mlir + // Scalar absolute value. + %a = math.absi %b : i64 ``` }]; let hasFolder = 1; @@ -443,7 +464,7 @@ ```mlir // Scalar floor value. - %a = math.floor %b : f64 + %a = math.floor %b : f64 ``` }]; } @@ -608,7 +629,7 @@ one result of the same type. It has no standard attributes. Example: - + ```mlir // Scalar reciprocal square root value. %a = math.rsqrt %b : f64 diff --git a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp --- a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp +++ b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp @@ -288,10 +288,10 @@ // Case 1. Zero denominator, numerator contains at most one NaN value. Value zero = rewriter.create( loc, elementType, rewriter.getZeroAttr(elementType)); - Value rhsRealAbs = rewriter.create(loc, rhsReal); + Value rhsRealAbs = rewriter.create(loc, rhsReal); Value rhsRealIsZero = rewriter.create( loc, arith::CmpFPredicate::OEQ, rhsRealAbs, zero); - Value rhsImagAbs = rewriter.create(loc, rhsImag); + Value rhsImagAbs = rewriter.create(loc, rhsImag); Value rhsImagIsZero = rewriter.create( loc, arith::CmpFPredicate::OEQ, rhsImagAbs, zero); Value lhsRealIsNotNaN = rewriter.create( @@ -321,10 +321,10 @@ loc, arith::CmpFPredicate::ONE, rhsImagAbs, inf); Value rhsFinite = rewriter.create(loc, rhsRealFinite, rhsImagFinite); - Value lhsRealAbs = rewriter.create(loc, lhsReal); + Value lhsRealAbs = rewriter.create(loc, lhsReal); Value lhsRealInfinite = rewriter.create( loc, arith::CmpFPredicate::OEQ, lhsRealAbs, inf); - Value lhsImagAbs = rewriter.create(loc, lhsImag); + Value lhsImagAbs = rewriter.create(loc, lhsImag); Value lhsImagInfinite = rewriter.create( loc, arith::CmpFPredicate::OEQ, lhsImagAbs, inf); Value lhsInfinite = @@ -533,25 +533,25 @@ auto elementType = type.getElementType().cast(); Value lhsReal = b.create(elementType, adaptor.getLhs()); - Value lhsRealAbs = b.create(lhsReal); + Value lhsRealAbs = b.create(lhsReal); Value lhsImag = b.create(elementType, adaptor.getLhs()); - Value lhsImagAbs = b.create(lhsImag); + Value lhsImagAbs = b.create(lhsImag); Value rhsReal = b.create(elementType, adaptor.getRhs()); - Value rhsRealAbs = b.create(rhsReal); + Value rhsRealAbs = b.create(rhsReal); Value rhsImag = b.create(elementType, adaptor.getRhs()); - Value rhsImagAbs = b.create(rhsImag); + Value rhsImagAbs = b.create(rhsImag); Value lhsRealTimesRhsReal = b.create(lhsReal, rhsReal); - Value lhsRealTimesRhsRealAbs = b.create(lhsRealTimesRhsReal); + Value lhsRealTimesRhsRealAbs = b.create(lhsRealTimesRhsReal); Value lhsImagTimesRhsImag = b.create(lhsImag, rhsImag); - Value lhsImagTimesRhsImagAbs = b.create(lhsImagTimesRhsImag); + Value lhsImagTimesRhsImagAbs = b.create(lhsImagTimesRhsImag); Value real = b.create(lhsRealTimesRhsReal, lhsImagTimesRhsImag); Value lhsImagTimesRhsReal = b.create(lhsImag, rhsReal); - Value lhsImagTimesRhsRealAbs = b.create(lhsImagTimesRhsReal); + Value lhsImagTimesRhsRealAbs = b.create(lhsImagTimesRhsReal); Value lhsRealTimesRhsImag = b.create(lhsReal, rhsImag); - Value lhsRealTimesRhsImagAbs = b.create(lhsRealTimesRhsImag); + Value lhsRealTimesRhsImagAbs = b.create(lhsRealTimesRhsImag); Value imag = b.create(lhsImagTimesRhsReal, lhsRealTimesRhsImag); @@ -762,7 +762,7 @@ Value real = b.create(elementType, adaptor.getComplex()); Value imag = b.create(elementType, adaptor.getComplex()); - Value absLhs = b.create(real); + Value absLhs = b.create(real); Value absArg = b.create(elementType, arg); Value addAbs = b.create(absLhs, absArg); diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -254,8 +254,8 @@ StringAttr::get(&converter.getContext(), NVVM::NVVMDialect::getKernelFuncAttrName())); - patterns.add>(converter, "__nv_fabsf", - "__nv_fabs"); + patterns.add>(converter, "__nv_fabsf", + "__nv_fabs"); patterns.add>(converter, "__nv_atanf", "__nv_atan"); patterns.add>(converter, "__nv_atan2f", diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp --- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp +++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp @@ -148,8 +148,8 @@ patterns.add(converter, /*addressSpace=*/4); } - patterns.add>(converter, "__ocml_fabs_f32", - "__ocml_fabs_f64"); + patterns.add>(converter, "__ocml_fabs_f32", + "__ocml_fabs_f64"); patterns.add>(converter, "__ocml_atan_f32", "__ocml_atan_f64"); patterns.add>( diff --git a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp --- a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp +++ b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp @@ -18,7 +18,8 @@ using namespace mlir; namespace { -using AbsOpLowering = VectorConvertToLLVMPattern; +using AbsFOpLowering = VectorConvertToLLVMPattern; +using AbsIOpLowering = VectorConvertToLLVMPattern; using CeilOpLowering = VectorConvertToLLVMPattern; using CopySignOpLowering = VectorConvertToLLVMPattern; @@ -268,7 +269,7 @@ RewritePatternSet &patterns) { // clang-format off patterns.add< - AbsOpLowering, + AbsFOpLowering, CeilOpLowering, CopySignOpLowering, CosOpLowering, diff --git a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp --- a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp +++ b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp @@ -287,7 +287,7 @@ patterns .add, ExpM1OpPattern, PowFOpPattern, RoundOpPattern, - spirv::ElementwiseOpPattern, + spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, @@ -302,7 +302,7 @@ // OpenCL patterns patterns.add, ExpM1OpPattern, - spirv::ElementwiseOpPattern, + spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, spirv::ElementwiseOpPattern, diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp @@ -50,7 +50,7 @@ // tosa::AbsOp if (isa(op) && elementTy.isa()) - return rewriter.create(loc, resultTypes, args); + return rewriter.create(loc, resultTypes, args); if (isa(op) && elementTy.isa()) { auto zero = rewriter.create( diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -309,7 +309,7 @@ case UnaryFn::log: return builder.create(arg.getLoc(), arg); case UnaryFn::abs: - return builder.create(arg.getLoc(), arg); + return builder.create(arg.getLoc(), arg); case UnaryFn::ceil: return builder.create(arg.getLoc(), arg); case UnaryFn::floor: diff --git a/mlir/lib/Dialect/Math/IR/MathOps.cpp b/mlir/lib/Dialect/Math/IR/MathOps.cpp --- a/mlir/lib/Dialect/Math/IR/MathOps.cpp +++ b/mlir/lib/Dialect/Math/IR/MathOps.cpp @@ -22,14 +22,21 @@ #include "mlir/Dialect/Math/IR/MathOps.cpp.inc" //===----------------------------------------------------------------------===// -// AbsOp folder +// AbsFOp folder //===----------------------------------------------------------------------===// -OpFoldResult math::AbsOp::fold(ArrayRef operands) { - return constFoldUnaryOp(operands, [](const APFloat &a) { - const APFloat &result(a); - return abs(result); - }); +OpFoldResult math::AbsFOp::fold(ArrayRef operands) { + return constFoldUnaryOp(operands, + [](const APFloat &a) { return abs(a); }); +} + +//===----------------------------------------------------------------------===// +// AbsIOp folder +//===----------------------------------------------------------------------===// + +OpFoldResult math::AbsIOp::fold(ArrayRef operands) { + return constFoldUnaryOp(operands, + [](const APInt &a) { return a.abs(); }); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp --- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp +++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp @@ -232,7 +232,7 @@ Value normalizedFraction = builder.create(f32Vec, tmp1); // Compute exponent. - Value arg0 = isPositive ? arg : builder.create(arg); + Value arg0 = isPositive ? arg : builder.create(arg); Value biasedExponentBits = builder.create( builder.create(i32Vec, arg0), bcast(i32Cst(builder, 23))); @@ -375,7 +375,7 @@ // Remap the problem over [0.0, 1.0] by looking at the absolute value and the // handling symmetry. - Value abs = builder.create(operand); + Value abs = builder.create(operand); Value reciprocal = builder.create(one, abs); Value compare = builder.create(arith::CmpFPredicate::OLT, abs, reciprocal); @@ -507,7 +507,7 @@ // Mask for tiny values that are approximated with `operand`. Value tiny = bcast(f32Cst(builder, 0.0004f)); Value tinyMask = builder.create( - arith::CmpFPredicate::OLT, builder.create(op.getOperand()), + arith::CmpFPredicate::OLT, builder.create(op.getOperand()), tiny); // The monomial coefficients of the numerator polynomial (odd). diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp --- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp +++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp @@ -895,7 +895,7 @@ auto x = buildTensorExp(op, def->getOperand(0)); if (x.has_value()) { unsigned e = x.value(); - if (isa(def)) + if (isa(def)) return addExp(kAbsF, e); if (isa(def)) return addExp(kAbsC, e); @@ -1076,7 +1076,7 @@ llvm_unreachable("unexpected non-op"); // Unary operations. case kAbsF: - return rewriter.create(loc, v0); + return rewriter.create(loc, v0); case kAbsC: { auto type = v0.getType().cast(); auto eltType = type.getElementType().cast(); diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir --- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir +++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir @@ -99,9 +99,9 @@ // Case 1. Zero denominator, numerator contains at most one NaN value. // CHECK: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32 +// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32 // CHECK: %[[RHS_REAL_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_REAL_ABS]], %[[ZERO]] : f32 -// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32 +// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32 // CHECK: %[[RHS_IMAG_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_IMAG_ABS]], %[[ZERO]] : f32 // CHECK: %[[LHS_REAL_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_REAL]], %[[ZERO]] : f32 // CHECK: %[[LHS_IMAG_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_IMAG]], %[[ZERO]] : f32 @@ -117,9 +117,9 @@ // CHECK: %[[RHS_REAL_FINITE:.*]] = arith.cmpf one, %[[RHS_REAL_ABS]], %[[INF]] : f32 // CHECK: %[[RHS_IMAG_FINITE:.*]] = arith.cmpf one, %[[RHS_IMAG_ABS]], %[[INF]] : f32 // CHECK: %[[RHS_IS_FINITE:.*]] = arith.andi %[[RHS_REAL_FINITE]], %[[RHS_IMAG_FINITE]] : i1 -// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32 +// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32 // CHECK: %[[LHS_REAL_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_REAL_ABS]], %[[INF]] : f32 -// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32 +// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32 // CHECK: %[[LHS_IMAG_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_IMAG_ABS]], %[[INF]] : f32 // CHECK: %[[LHS_IS_INFINITE:.*]] = arith.ori %[[LHS_REAL_INFINITE]], %[[LHS_IMAG_INFINITE]] : i1 // CHECK: %[[INF_NUM_FINITE_DENOM:.*]] = arith.andi %[[LHS_IS_INFINITE]], %[[RHS_IS_FINITE]] : i1 @@ -289,24 +289,24 @@ return %mul : complex } // CHECK: %[[LHS_REAL:.*]] = complex.re %[[LHS]] : complex -// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32 +// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32 // CHECK: %[[LHS_IMAG:.*]] = complex.im %[[LHS]] : complex -// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32 +// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32 // CHECK: %[[RHS_REAL:.*]] = complex.re %[[RHS]] : complex -// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32 +// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32 // CHECK: %[[RHS_IMAG:.*]] = complex.im %[[RHS]] : complex -// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32 +// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32 // CHECK: %[[LHS_REAL_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_REAL]], %[[RHS_REAL]] : f32 -// CHECK: %[[LHS_REAL_TIMES_RHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL_TIMES_RHS_REAL]] : f32 +// CHECK: %[[LHS_REAL_TIMES_RHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL_TIMES_RHS_REAL]] : f32 // CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_IMAG]], %[[RHS_IMAG]] : f32 -// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32 +// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32 // CHECK: %[[REAL:.*]] = arith.subf %[[LHS_REAL_TIMES_RHS_REAL]], %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32 // CHECK: %[[LHS_IMAG_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_IMAG]], %[[RHS_REAL]] : f32 -// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL_ABS:.*]] = math.abs %[[LHS_IMAG_TIMES_RHS_REAL]] : f32 +// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL_ABS:.*]] = math.absf %[[LHS_IMAG_TIMES_RHS_REAL]] : f32 // CHECK: %[[LHS_REAL_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_REAL]], %[[RHS_IMAG]] : f32 -// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG_ABS:.*]] = math.abs %[[LHS_REAL_TIMES_RHS_IMAG]] : f32 +// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG_ABS:.*]] = math.absf %[[LHS_REAL_TIMES_RHS_IMAG]] : f32 // CHECK: %[[IMAG:.*]] = arith.addf %[[LHS_IMAG_TIMES_RHS_REAL]], %[[LHS_REAL_TIMES_RHS_IMAG]] : f32 // Handle cases where the "naive" calculation results in NaN values. @@ -561,9 +561,9 @@ // Case 1. Zero denominator, numerator contains at most one NaN value. // CHECK: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32 +// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32 // CHECK: %[[RHS_REAL_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_REAL_ABS]], %[[ZERO]] : f32 -// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32 +// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32 // CHECK: %[[RHS_IMAG_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_IMAG_ABS]], %[[ZERO]] : f32 // CHECK: %[[LHS_REAL_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_REAL]], %[[ZERO]] : f32 // CHECK: %[[LHS_IMAG_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_IMAG]], %[[ZERO]] : f32 @@ -579,9 +579,9 @@ // CHECK: %[[RHS_REAL_FINITE:.*]] = arith.cmpf one, %[[RHS_REAL_ABS]], %[[INF]] : f32 // CHECK: %[[RHS_IMAG_FINITE:.*]] = arith.cmpf one, %[[RHS_IMAG_ABS]], %[[INF]] : f32 // CHECK: %[[RHS_IS_FINITE:.*]] = arith.andi %[[RHS_REAL_FINITE]], %[[RHS_IMAG_FINITE]] : i1 -// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32 +// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32 // CHECK: %[[LHS_REAL_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_REAL_ABS]], %[[INF]] : f32 -// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32 +// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32 // CHECK: %[[LHS_IMAG_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_IMAG_ABS]], %[[INF]] : f32 // CHECK: %[[LHS_IS_INFINITE:.*]] = arith.ori %[[LHS_REAL_INFINITE]], %[[LHS_IMAG_INFINITE]] : i1 // CHECK: %[[INF_NUM_FINITE_DENOM:.*]] = arith.andi %[[LHS_IS_INFINITE]], %[[RHS_IS_FINITE]] : i1 diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -170,9 +170,9 @@ // CHECK: llvm.func @__nv_fabs(f64) -> f64 // CHECK-LABEL: func @gpu_fabs func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { - %result32 = math.abs %arg_f32 : f32 + %result32 = math.absf %arg_f32 : f32 // CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (f32) -> f32 - %result64 = math.abs %arg_f64 : f64 + %result64 = math.absf %arg_f64 : f64 // CHECK: llvm.call @__nv_fabs(%{{.*}}) : (f64) -> f64 func.return %result32, %result64 : f32, f64 } @@ -487,4 +487,3 @@ gpu.return } } - diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir --- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir +++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir @@ -89,9 +89,9 @@ // CHECK: llvm.func @__ocml_fabs_f64(f64) -> f64 // CHECK-LABEL: func @gpu_fabs func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) { - %result32 = math.abs %arg_f32 : f32 + %result32 = math.absf %arg_f32 : f32 // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (f32) -> f32 - %result64 = math.abs %arg_f64 : f64 + %result64 = math.absf %arg_f64 : f64 // CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) : (f64) -> f64 func.return %result32, %result64 : f32, f64 } diff --git a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir --- a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir +++ b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir @@ -29,7 +29,7 @@ // CHECK: spv.GL.Sin %{{.*}}: f32 %8 = math.sin %arg0 : f32 // CHECK: spv.GL.FAbs %{{.*}}: f32 - %9 = math.abs %arg0 : f32 + %9 = math.absf %arg0 : f32 // CHECK: spv.GL.Ceil %{{.*}}: f32 %10 = math.ceil %arg0 : f32 // CHECK: spv.GL.Floor %{{.*}}: f32 diff --git a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir --- a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir +++ b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir @@ -27,7 +27,7 @@ // CHECK: spv.CL.sin %{{.*}}: f32 %8 = math.sin %arg0 : f32 // CHECK: spv.CL.fabs %{{.*}}: f32 - %9 = math.abs %arg0 : f32 + %9 = math.absf %arg0 : f32 // CHECK: spv.CL.ceil %{{.*}}: f32 %10 = math.ceil %arg0 : f32 // CHECK: spv.CL.floor %{{.*}}: f32 diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -7,7 +7,7 @@ // CHECK: [[INIT:%.+]] = linalg.init_tensor [] : tensor // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = []} ins(%arg0 : tensor) outs([[INIT]] : tensor) { // CHECK: ^bb0(%arg1: f32, %arg2: f32): - // CHECK: [[ELEMENT:%.+]] = math.abs %arg1 + // CHECK: [[ELEMENT:%.+]] = math.absf %arg1 // CHECK: linalg.yield [[ELEMENT]] : f32 // CHECK: } -> tensor @@ -26,7 +26,7 @@ // CHECK: [[INIT:%.+]] = linalg.init_tensor [2] : tensor<2xf32> // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%arg0 : tensor<2xf32>) outs([[INIT]] : tensor<2xf32>) { // CHECK: ^bb0(%arg1: f32, %arg2: f32): - // CHECK: [[ELEMENT:%.+]] = math.abs %arg1 + // CHECK: [[ELEMENT:%.+]] = math.absf %arg1 // CHECK: linalg.yield [[ELEMENT]] : f32 // CHECK: } -> tensor<2xf32> %0 = "tosa.abs"(%arg0) : (tensor<2xf32>) -> tensor<2xf32> @@ -44,7 +44,7 @@ // CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32> // CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<2x3xf32>) outs([[INIT]] : tensor<2x3xf32>) { // CHECK: ^bb0(%arg1: f32, %arg2: f32): - // CHECK: [[ELEMENT:%.+]] = math.abs %arg1 + // CHECK: [[ELEMENT:%.+]] = math.absf %arg1 // CHECK: linalg.yield [[ELEMENT]] : f32 // CHECK: } -> tensor<2x3xf32> %0 = "tosa.abs"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32> @@ -61,7 +61,7 @@ // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C0]] // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DIM]]] // CHECK: linalg.generic - // CHECK: math.abs + // CHECK: math.absf %0 = "tosa.abs"(%arg0) : (tensor) -> tensor return %0 : tensor } @@ -76,7 +76,7 @@ // CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C1]] // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, %[[DIM]]] // CHECK: linalg.generic - // CHECK: math.abs + // CHECK: math.absf %0 = "tosa.abs"(%arg0) : (tensor<2x?xf32>) -> tensor<2x?xf32> return %0 : tensor<2x?xf32> } @@ -146,7 +146,7 @@ %0 = "tosa.tanh"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic - // CHECK: math.abs + // CHECK: math.absf %1 = "tosa.abs"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: linalg.generic @@ -1252,7 +1252,7 @@ // CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index // CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32 // CHECK: tensor.pad %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { - // CHECK: ^bb0(%arg1: index, %arg2: index): + // CHECK: ^bb0(%arg1: index, %arg2: index): // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> %1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>) @@ -1288,7 +1288,7 @@ // CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index // CHECK-DAG: [[CST:%.+]] = arith.constant 4.200000e+01 : f32 // CHECK: tensor.pad %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] { - // CHECK: ^bb0(%arg1: index, %arg2: index): + // CHECK: ^bb0(%arg1: index, %arg2: index): // CHECK: tensor.yield [[CST]] // CHECK: } : tensor<1x2xf32> to tensor<4x9xf32> %1 = arith.constant dense<42.0> : tensor diff --git a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir --- a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir +++ b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir @@ -306,7 +306,7 @@ } // CHECK-LABEL: @generalize_elemwise_abs -// CHECK: = math.abs +// CHECK: = math.absf // ----- diff --git a/mlir/test/Dialect/Math/canonicalize.mlir b/mlir/test/Dialect/Math/canonicalize.mlir --- a/mlir/test/Dialect/Math/canonicalize.mlir +++ b/mlir/test/Dialect/Math/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -canonicalize | FileCheck %s +// RUN: mlir-opt %s -canonicalize | FileCheck %s // CHECK-LABEL: @ceil_fold // CHECK: %[[cst:.+]] = arith.constant 1.000000e+00 : f32 @@ -125,7 +125,7 @@ // CHECK: return %[[cst]] func.func @abs_fold() -> f32 { %c = arith.constant -4.0 : f32 - %r = math.abs %c : f32 + %r = math.absf %c : f32 return %r : f32 } diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir --- a/mlir/test/Dialect/Math/polynomial-approximation.mlir +++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir @@ -359,7 +359,7 @@ // CHECK: %[[VAL_16:.*]] = arith.select %[[VAL_15]], %[[VAL_0]], %[[VAL_2]] : f32 // CHECK: %[[VAL_17:.*]] = arith.cmpf ugt, %[[VAL_16]], %[[VAL_1]] : f32 // CHECK: %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_16]], %[[VAL_1]] : f32 -// CHECK: %[[VAL_19:.*]] = math.abs %[[VAL_0]] : f32 +// CHECK: %[[VAL_19:.*]] = math.absf %[[VAL_0]] : f32 // CHECK: %[[VAL_20:.*]] = arith.cmpf olt, %[[VAL_19]], %[[VAL_3]] : f32 // CHECK: %[[VAL_21:.*]] = arith.mulf %[[VAL_18]], %[[VAL_18]] : f32 // CHECK: %[[VAL_22:.*]] = math.fma %[[VAL_21]], %[[VAL_10]], %[[VAL_9]] : f32 @@ -517,7 +517,7 @@ // CHECK-DAG: %[[N3:.+]] = arith.constant -0.0106783099 // CHECK-DAG: %[[N4:.+]] = arith.constant 1.00209987 // CHECK-DAG: %[[HALF_PI:.+]] = arith.constant 1.57079637 -// CHECK-DAG: %[[ABS:.+]] = math.abs %arg0 +// CHECK-DAG: %[[ABS:.+]] = math.absf %arg0 // CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]] // CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]] // CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]] @@ -547,7 +547,7 @@ // CHECK-DAG: %[[ARG0:.+]] = arith.extf %arg0 : f16 to f32 // CHECK-DAG: %[[ARG1:.+]] = arith.extf %arg1 : f16 to f32 // CHECK-DAG: %[[RATIO:.+]] = arith.divf %[[ARG0]], %[[ARG1]] -// CHECK-DAG: %[[ABS:.+]] = math.abs %[[RATIO]] +// CHECK-DAG: %[[ABS:.+]] = math.absf %[[RATIO]] // CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]] // CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]] // CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]] @@ -593,4 +593,3 @@ %0 = math.atan2 %arg0, %arg1 : f16 return %0 : f16 } - diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -45,7 +45,7 @@ // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref -// CHECK: %[[VAL_13:.*]] = math.abs %[[VAL_12]] : f64 +// CHECK: %[[VAL_13:.*]] = math.absf %[[VAL_12]] : f64 // CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> // CHECK: } // CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> @@ -57,7 +57,7 @@ ins(%arga: tensor<32xf64, #SV>) outs(%argx: tensor<32xf64>) { ^bb(%a: f64, %x: f64): - %0 = math.abs %a : f64 + %0 = math.absf %a : f64 linalg.yield %0 : f64 } -> tensor<32xf64> return %0 : tensor<32xf64> @@ -367,7 +367,7 @@ // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref // CHECK: memref.store %[[VAL_12]], %[[VAL_8]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref -// CHECK: %[[VAL_14:.*]] = math.abs %[[VAL_13]] : f64 +// CHECK: %[[VAL_14:.*]] = math.absf %[[VAL_13]] : f64 // CHECK: %[[VAL_15:.*]] = math.ceil %[[VAL_14]] : f64 // CHECK: %[[VAL_16:.*]] = math.floor %[[VAL_15]] : f64 // CHECK: %[[VAL_17:.*]] = math.sqrt %[[VAL_16]] : f64 @@ -388,7 +388,7 @@ ins(%arga: tensor<32xf64, #SV>) outs(%xinp: tensor<32xf64, #SV>) { ^bb(%a: f64, %x: f64): - %0 = math.abs %a : f64 + %0 = math.absf %a : f64 %1 = math.ceil %0 : f64 %2 = math.floor %1 : f64 %3 = math.sqrt %2 : f64 diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -105,17 +105,17 @@ // CHECK: arith.constant false %75 = arith.constant false - // CHECK: %{{.*}} = math.abs %arg1 : f32 - %100 = "math.abs"(%f) : (f32) -> f32 + // CHECK: %{{.*}} = math.absf %arg1 : f32 + %100 = "math.absf"(%f) : (f32) -> f32 - // CHECK: %{{.*}} = math.abs %arg1 : f32 - %101 = math.abs %f : f32 + // CHECK: %{{.*}} = math.absf %arg1 : f32 + %101 = math.absf %f : f32 - // CHECK: %{{.*}} = math.abs %{{.*}}: vector<4xf32> - %102 = math.abs %vcf32 : vector<4xf32> + // CHECK: %{{.*}} = math.absf %{{.*}}: vector<4xf32> + %102 = math.absf %vcf32 : vector<4xf32> - // CHECK: %{{.*}} = math.abs %arg0 : tensor<4x4x?xf32> - %103 = math.abs %t : tensor<4x4x?xf32> + // CHECK: %{{.*}} = math.absf %arg0 : tensor<4x4x?xf32> + %103 = math.absf %t : tensor<4x4x?xf32> // CHECK: %{{.*}} = math.ceil %arg1 : f32 %104 = "math.ceil"(%f) : (f32) -> f32 diff --git a/mlir/test/Target/Cpp/invalid.mlir b/mlir/test/Target/Cpp/invalid.mlir --- a/mlir/test/Target/Cpp/invalid.mlir +++ b/mlir/test/Target/Cpp/invalid.mlir @@ -11,8 +11,8 @@ // ----- func.func @unsupported_std_op(%arg0: f64) -> f64 { - // expected-error@+1 {{'math.abs' op unable to find printer for op}} - %0 = math.abs %arg0 : f64 + // expected-error@+1 {{'math.absf' op unable to find printer for op}} + %0 = math.absf %arg0 : f64 return %0 : f64 } diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py @@ -92,7 +92,7 @@ # CHECK-LABEL: @test_f32_elemwise_abs # CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32) - # CHECK-NEXT: %[[EXP:.+]] = math.abs %[[IN]] : f32 + # CHECK-NEXT: %[[EXP:.+]] = math.absf %[[IN]] : f32 # CHECK-NEXT: linalg.yield %[[EXP]] : f32 # CHECK-NEXT: -> tensor<4x16xf32> @func.FuncOp.from_py_func(