diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -10042,8 +10042,14 @@ Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); } else { llvm::Type *Ty = A->getType(); - Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); - Res = CGF.Builder.CreateCall(FMA, {A, B, C} ); + Function *FMA; + if (CGF.Builder.getIsFPConstrained()) { + FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty); + Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C}); + } else { + FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); + Res = CGF.Builder.CreateCall(FMA, {A, B, C}); + } if (IsAddSub) { // Negate even elts in C using a mask. @@ -10053,7 +10059,11 @@ Indices[i] = i + (i % 2) * NumElts; Value *NegC = CGF.Builder.CreateFNeg(C); - Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} ); + Value *FMSub; + if (CGF.Builder.getIsFPConstrained()) + FMSub = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, NegC} ); + else + FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} ); Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices); } } @@ -10112,6 +10122,10 @@ Intrinsic::x86_avx512_vfmadd_f64; Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2], Ops[4]}); + } else if (CGF.Builder.getIsFPConstrained()) { + Function *FMA = CGF.CGM.getIntrinsic( + Intrinsic::experimental_constrained_fma, Ops[0]->getType()); + Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3)); } else { Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); @@ -11835,8 +11849,15 @@ case X86::BI__builtin_ia32_sqrtss: case X86::BI__builtin_ia32_sqrtsd: { Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); - Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); - A = Builder.CreateCall(F, {A}); + Function *F; + if (Builder.getIsFPConstrained()) { + F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, + A->getType()); + A = Builder.CreateConstrainedFPCall(F, {A}); + } else { + F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); + A = Builder.CreateCall(F, {A}); + } return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); } case X86::BI__builtin_ia32_sqrtsd_round_mask: @@ -11851,8 +11872,15 @@ return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); - Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); - A = Builder.CreateCall(F, A); + Function *F; + if (Builder.getIsFPConstrained()) { + F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, + A->getType()); + A = Builder.CreateConstrainedFPCall(F, A); + } else { + F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); + A = Builder.CreateCall(F, A); + } Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); A = EmitX86ScalarSelect(*this, Ops[3], A, Src); return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); @@ -11874,8 +11902,14 @@ return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } } - Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); - return Builder.CreateCall(F, Ops[0]); + if (Builder.getIsFPConstrained()) { + Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, + Ops[0]->getType()); + return Builder.CreateConstrainedFPCall(F, Ops[0]); + } else { + Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); + return Builder.CreateCall(F, Ops[0]); + } } case X86::BI__builtin_ia32_pabsb128: case X86::BI__builtin_ia32_pabsw128: diff --git a/clang/test/CodeGen/avx512f-builtins-constrained.c b/clang/test/CodeGen/avx512f-builtins-constrained.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/avx512f-builtins-constrained.c @@ -0,0 +1,126 @@ +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -fms-extensions -fms-compatibility -ffreestanding %s -triple=x86_64-windows-msvc -target-feature +avx512f -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -fms-compatibility -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -S -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s +// RUN: %clang_cc1 -fexperimental-new-pass-manager -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +avx512f -ffp-exception-behavior=strict -S -o - -Wall -Werror | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s + +#include + +__m512d test_mm512_sqrt_pd(__m512d a) +{ + // COMMON-LABEL: test_mm512_sqrt_pd + // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtpd + return _mm512_sqrt_pd(a); +} + +__m512d test_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + // COMMON-LABEL: test_mm512_mask_sqrt_pd + // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtpd + // COMMONIR: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} + return _mm512_mask_sqrt_pd (__W,__U,__A); +} + +__m512d test_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) +{ + // COMMON-LABEL: test_mm512_maskz_sqrt_pd + // UNCONSTRAINED: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CONSTRAINED: call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtpd + // COMMONIR: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}} + return _mm512_maskz_sqrt_pd (__U,__A); +} + +__m512 test_mm512_sqrt_ps(__m512 a) +{ + // COMMON-LABEL: test_mm512_sqrt_ps + // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtps + return _mm512_sqrt_ps(a); +} + +__m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) +{ + // COMMON-LABEL: test_mm512_mask_sqrt_ps + // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtps + // COMMONIR: bitcast i16 %{{.*}} to <16 x i1> + // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} + return _mm512_mask_sqrt_ps( __W, __U, __A); +} + +__m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) +{ + // COMMON-LABEL: test_mm512_maskz_sqrt_ps + // UNCONSTRAINED: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtps + // COMMONIR: bitcast i16 %{{.*}} to <16 x i1> + // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}} + return _mm512_maskz_sqrt_ps(__U ,__A); +} + +__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // COMMON-LABEL: test_mm_mask_sqrt_sd + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}}) + // CONSTRAINED-NEXT: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtsd + // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}} + // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 + return _mm_mask_sqrt_sd(__W,__U,__A,__B); +} + +__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // COMMON-LABEL: test_mm_maskz_sqrt_sd + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED-NEXT: call double @llvm.sqrt.f64(double %{{.*}}) + // CONSTRAINED-NEXT: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtsd + // COMMONIR-NEXT: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // COMMONIR-NEXT: select i1 {{.*}}, double {{.*}}, double {{.*}} + // COMMONIR-NEXT: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 + return _mm_maskz_sqrt_sd(__U,__A,__B); +} + +__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // COMMON-LABEL: test_mm_mask_sqrt_ss + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}}) + // CONSTRAINED-NEXT: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtss + // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}} + // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 + return _mm_mask_sqrt_ss(__W,__U,__A,__B); +} + +__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // COMMON-LABEL: test_mm_maskz_sqrt_ss + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED-NEXT: call float @llvm.sqrt.f32(float %{{.*}}) + // CONSTRAINED-NEXT: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vsqrtss + // COMMONIR-NEXT: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // COMMONIR-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // COMMONIR-NEXT: select i1 {{.*}}, float {{.*}}, float {{.*}} + // COMMONIR-NEXT: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 + return _mm_maskz_sqrt_ss(__U,__A,__B); +} diff --git a/clang/test/CodeGen/fma-builtins-constrained.c b/clang/test/CodeGen/fma-builtins-constrained.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/fma-builtins-constrained.c @@ -0,0 +1,352 @@ +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=UNCONSTRAINED %s +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -ffp-exception-behavior=strict -emit-llvm -o - | FileCheck --check-prefix=COMMON --check-prefix=COMMONIR --check-prefix=CONSTRAINED %s +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -S -o - | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +fma -ffp-exception-behavior=strict -S -o - | FileCheck --check-prefix=COMMON --check-prefix=CHECK-ASM %s + +// FIXME: Several of these tests are broken when constrained. + +#include + +__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmadd_ps + // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm_fmadd_ps(a, b, c); +} + +__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmadd_pd + // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm_fmadd_pd(a, b, c); +} + +__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmadd_ss + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) + // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ss + // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0 + return _mm_fmadd_ss(a, b, c); +} + +__m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmadd_sd + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}) + // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213sd + // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0 + return _mm_fmadd_sd(a, b, c); +} + +__m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmsub_ps + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm_fmsub_ps(a, b, c); +} + +__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmsub_pd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm_fmsub_pd(a, b, c); +} + +__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmsub_ss + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) + // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsub213ss + // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0 + return _mm_fmsub_ss(a, b, c); +} + +__m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmsub_sd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}) + // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsub213sd + // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0 + return _mm_fmsub_sd(a, b, c); +} + +__m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fnmadd_ps + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm_fnmadd_ps(a, b, c); +} + +__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fnmadd_pd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm_fnmadd_pd(a, b, c); +} + +__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fnmadd_ss + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) + // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfnmadd213ss + // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0 + return _mm_fnmadd_ss(a, b, c); +} + +__m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fnmadd_sd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}) + // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfnmadd213sd + // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0 + return _mm_fnmadd_sd(a, b, c); +} + +__m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fnmsub_ps + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}} + // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm_fnmsub_ps(a, b, c); +} + +__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fnmsub_pd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}} + // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm_fnmsub_pd(a, b, c); +} + +__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fnmsub_ss + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}} + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0 + // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}) + // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfnmsub213ss + // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0 + return _mm_fnmsub_ss(a, b, c); +} + +__m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fnmsub_sd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}} + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0 + // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}) + // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfnmsub213sd + // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0 + return _mm_fnmsub_sd(a, b, c); +} + +__m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmaddsub_ps + // UNCONSTRAINED: [[ADD:%.+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmaddsub213ps + // COMMONIR: shufflevector <4 x float> [[SUB]], <4 x float> [[ADD]], <4 x i32> + return _mm_fmaddsub_ps(a, b, c); +} + +__m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmaddsub_pd + // UNCONSTRAINED: [[ADD:%.+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmaddsub213pd + // COMMONIR: shufflevector <2 x double> [[SUB]], <2 x double> [[ADD]], <2 x i32> + return _mm_fmaddsub_pd(a, b, c); +} + +__m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) { + // COMMON-LABEL: test_mm_fmsubadd_ps + // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]] + // CONSTRAINED: [[SUB:%.+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[NEG]], metadata !{{.*}}) + // UNCONSTRAINED: [[ADD:%.+]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsubadd213ps + // COMMONIR: shufflevector <4 x float> [[ADD]], <4 x float> [[SUB]], <4 x i32> + return _mm_fmsubadd_ps(a, b, c); +} + +__m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) { + // COMMON-LABEL: test_mm_fmsubadd_pd + // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}}) + // UNCONSTRAINED: [[ADD:%.+]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsubadd213pd + // COMMONIR: shufflevector <2 x double> [[ADD]], <2 x double> [[SUB]], <2 x i32> + return _mm_fmsubadd_pd(a, b, c); +} + +__m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fmadd_ps + // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm256_fmadd_ps(a, b, c); +} + +__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fmadd_pd + // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm256_fmadd_pd(a, b, c); +} + +__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fmsub_ps + // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}} + // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm256_fmsub_ps(a, b, c); +} + +__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fmsub_pd + // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}} + // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm256_fmsub_pd(a, b, c); +} + +__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fnmadd_ps + // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}} + // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm256_fnmadd_ps(a, b, c); +} + +__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fnmadd_pd + // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}} + // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm256_fnmadd_pd(a, b, c); +} + +__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fnmsub_ps + // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}} + // COMMONIR: [[NEG2:%.+]] = fneg <8 x float> %{{.*}} + // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213ps + return _mm256_fnmsub_ps(a, b, c); +} + +__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fnmsub_pd + // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}} + // COMMONIR: [[NEG2:%.+]] = fneg <4 x double> %{{.+}} + // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // CHECK-ASM: vfmadd213pd + return _mm256_fnmsub_pd(a, b, c); +} + +__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fmaddsub_ps + // UNCONSTRAINED: [[ADD:%.+]] = call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}} + // UNCONSTRAINED: [[SUB:%.+]] = call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmaddsub213ps + // COMMONIR: shufflevector <8 x float> [[SUB]], <8 x float> [[ADD]], <8 x i32> + return _mm256_fmaddsub_ps(a, b, c); +} + +__m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fmaddsub_pd + // UNCONSTRAINED: [[ADD:%.+]] = call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: [[SUB:%.+]] = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmaddsub213pd + // COMMONIR: shufflevector <4 x double> [[SUB]], <4 x double> [[ADD]], <4 x i32> + return _mm256_fmaddsub_pd(a, b, c); +} + +__m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) { + // COMMON-LABEL: test_mm256_fmsubadd_ps + // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}} + // UNCONSTRAINED: [[SUB:%.+]] = call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[NEG]], metadata !{{.*}}) + // UNCONSTRAINED: [[ADD:%.+]] = call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsubadd213ps + // COMMONIR: shufflevector <8 x float> [[ADD]], <8 x float> [[SUB]], <8 x i32> + return _mm256_fmsubadd_ps(a, b, c); +} + +__m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) { + // COMMON-LABEL: test_mm256_fmsubadd_pd + // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}} + // UNCONSTRAINED: [[SUB:%.+]] = call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]]) + // CONSTRAINED: [[SUB:%.+]] = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[NEG]], metadata !{{.*}}) + // UNCONSTRAINED: [[ADD:%.+]] = call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}) + // CONSTRAINED: [[ADD:%.+]] = call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}}) + // FIXME-CHECK-ASM: vfmsubadd213pd + // COMMONIR: shufflevector <4 x double> [[ADD]], <4 x double> [[SUB]], <4 x i32> + return _mm256_fmsubadd_pd(a, b, c); +} diff --git a/clang/test/CodeGen/sse-builtins-constrained.c b/clang/test/CodeGen/sse-builtins-constrained.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/sse-builtins-constrained.c @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=UNCONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -ffp-exception-behavior=strict -emit-llvm -o - -Wall -Werror | FileCheck %s --check-prefix=CONSTRAINED --check-prefix=COMMON --check-prefix=COMMONIR +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -S %s -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON +// RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-apple-darwin -target-feature +sse -ffp-exception-behavior=strict -S %s -o - -Wall -Werror | FileCheck %s --check-prefix=CHECK-ASM --check-prefix=COMMON + + +#include + +__m128 test_mm_sqrt_ps(__m128 x) { + // COMMON-LABEL: test_mm_sqrt_ps + // UNCONSTRAINED: call <4 x float> @llvm.sqrt.v4f32(<4 x float> {{.*}}) + // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> {{.*}}, metadata !{{.*}}) + // CHECK-ASM: sqrtps + return _mm_sqrt_ps(x); +} + +__m128 test_sqrt_ss(__m128 x) { + // COMMON-LABEL: test_sqrt_ss + // COMMONIR: extractelement <4 x float> {{.*}}, i64 0 + // UNCONSTRAINED: call float @llvm.sqrt.f32(float {{.*}}) + // CONSTRAINED: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !{{.*}}) + // CHECK-ASM: sqrtss + // COMMONIR: insertelement <4 x float> {{.*}}, float {{.*}}, i64 0 + return _mm_sqrt_ss(x); +} +