diff --git a/llvm/test/CodeGen/X86/fma-signed-zero.ll b/llvm/test/CodeGen/X86/fma-signed-zero.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/fma-signed-zero.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma \ +; RUN: | FileCheck %s --check-prefixes=NO-NSZ-OPTION +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma \ +; RUN: --enable-no-signed-zeros-fp-math | FileCheck %s --check-prefixes=NSZ-OPTION + +; This test checks that (fneg (fma (fneg x), y, (fneg z))) can't be folded to (fma x, y, z) +; without no signed zeros flag (nsz) or no NoSignedZerosFPMath option. + +declare float @llvm.fma.f32(float, float, float) + +define float @fneg_fma32(float %x, float %y, float %z) { +; NO-NSZ-OPTION-LABEL: fneg_fma32: +; NO-NSZ-OPTION: # %bb.0: +; NO-NSZ-OPTION-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NO-NSZ-OPTION-NEXT: retq +; +; NSZ-OPTION-LABEL: fneg_fma32: +; NSZ-OPTION: # %bb.0: +; NSZ-OPTION-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NSZ-OPTION-NEXT: retq + %negx = fneg float %x + %negz = fneg float %z + %fma = call float @llvm.fma.f32(float %negx, float %y, float %negz) + %n = fneg float %fma + ret float %n +} + +define float @fneg_fma32_nsz(float %x, float %y, float %z) { +; NO-NSZ-OPTION-LABEL: fneg_fma32_nsz: +; NO-NSZ-OPTION: # %bb.0: +; NO-NSZ-OPTION-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NO-NSZ-OPTION-NEXT: retq +; +; NSZ-OPTION-LABEL: fneg_fma32_nsz: +; NSZ-OPTION: # %bb.0: +; NSZ-OPTION-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NSZ-OPTION-NEXT: retq + %negx = fneg float %x + %negz = fneg float %z + %fma = call nsz float @llvm.fma.f32(float %negx, float %y, float %negz) + %n = fneg float %fma + ret float %n +} + +declare double @llvm.fma.f64(double, double, double) + +define double @fneg_fma64(double %x, double %y, double %z) { +; NO-NSZ-OPTION-LABEL: fneg_fma64: +; NO-NSZ-OPTION: # %bb.0: +; NO-NSZ-OPTION-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NO-NSZ-OPTION-NEXT: retq +; +; NSZ-OPTION-LABEL: fneg_fma64: +; NSZ-OPTION: # %bb.0: +; NSZ-OPTION-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NSZ-OPTION-NEXT: retq + %negx = fneg double %x + %negz = fneg double %z + %fma = call double @llvm.fma.f64(double %negx, double %y, double %negz) + %n = fneg double %fma + ret double %n +} + +define double @fneg_fma64_nsz(double %x, double %y, double %z) { +; NO-NSZ-OPTION-LABEL: fneg_fma64_nsz: +; NO-NSZ-OPTION: # %bb.0: +; NO-NSZ-OPTION-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NO-NSZ-OPTION-NEXT: retq +; +; NSZ-OPTION-LABEL: fneg_fma64_nsz: +; NSZ-OPTION: # %bb.0: +; NSZ-OPTION-NEXT: vfmadd213sd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; NSZ-OPTION-NEXT: retq + %negx = fneg double %x + %negz = fneg double %z + %fma = call nsz double @llvm.fma.f64(double %negx, double %y, double %negz) + %n = fneg double %fma + ret double %n +}