Index: lib/CodeGen/CGBuiltin.cpp =================================================================== --- lib/CodeGen/CGBuiltin.cpp +++ lib/CodeGen/CGBuiltin.cpp @@ -8900,7 +8900,41 @@ Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}), Ops[1]); } - + case X86::BI__builtin_ia32_sqrtsd_round_mask: + case X86::BI__builtin_ia32_sqrtss_round_mask: { + unsigned CC = cast(Ops[4])->getZExtValue(); + // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), + // otherwise keep the intrinsic. + if (CC != 4) + return nullptr; + Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0, "extract"); + Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); + Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0, "extract"); + int MaskSize = Ops[3]->getType()->getScalarSizeInBits(); + llvm::Type *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), MaskSize); + Value *Mask = Builder.CreateBitCast(Ops[3], MaskTy); + Mask = Builder.CreateExtractElement(Mask, (uint64_t)0, "extract"); + A = Builder.CreateSelect(Mask, Builder.CreateCall(F, {A}), Src); + return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); + } + case X86::BI__builtin_ia32_sqrtpd256: + case X86::BI__builtin_ia32_sqrtpd: + case X86::BI__builtin_ia32_sqrtps256: + case X86::BI__builtin_ia32_sqrtps: { + Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); + return Builder.CreateCall(F, {Ops[0]}); + } + case X86::BI__builtin_ia32_sqrtps512_mask: + case X86::BI__builtin_ia32_sqrtpd512_mask: { + unsigned CC = cast(Ops[3])->getZExtValue(); + // Support only if the rounding mode is 4 (AKA CUR_DIRECTION), + // otherwise keep the intrinsic. + if (CC != 4) + return nullptr; + Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); + return EmitX86Select(*this, Ops[2], Builder.CreateCall(F, {Ops[0]}), + Ops[1]); + } case X86::BI__builtin_ia32_pabsb128: case X86::BI__builtin_ia32_pabsw128: case X86::BI__builtin_ia32_pabsd128: Index: test/CodeGen/avx-builtins.c =================================================================== --- test/CodeGen/avx-builtins.c +++ test/CodeGen/avx-builtins.c @@ -1124,13 +1124,13 @@ __m256d test_mm256_sqrt_pd(__m256d A) { // CHECK-LABEL: test_mm256_sqrt_pd - // CHECK: call <4 x double> @llvm.x86.avx.sqrt.pd.256(<4 x double> %{{.*}}) + // CHECK: call <4 x double> @llvm.sqrt.v4f64(<4 x double> %{{.*}}) return _mm256_sqrt_pd(A); } __m256 test_mm256_sqrt_ps(__m256 A) { // CHECK-LABEL: test_mm256_sqrt_ps - // CHECK: call <8 x float> @llvm.x86.avx.sqrt.ps.256(<8 x float> %{{.*}}) + // CHECK: call <8 x float> @llvm.sqrt.v8f32(<8 x float> %{{.*}}) return _mm256_sqrt_ps(A); } Index: test/CodeGen/avx512f-builtins.c =================================================================== --- test/CodeGen/avx512f-builtins.c +++ test/CodeGen/avx512f-builtins.c @@ -5,84 +5,100 @@ __m512d test_mm512_sqrt_pd(__m512d a) { // CHECK-LABEL: @test_mm512_sqrt_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) return _mm512_sqrt_pd(a); } __m512d test_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) { // CHECK-LABEL: @test_mm512_mask_sqrt_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_sqrt_pd (__W,__U,__A); } __m512d test_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) { // CHECK-LABEL: @test_mm512_maskz_sqrt_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}} return _mm512_maskz_sqrt_pd (__U,__A); } __m512d test_mm512_mask_sqrt_round_pd(__m512d __W,__mmask8 __U,__m512d __A) { // CHECK-LABEL: @test_mm512_mask_sqrt_round_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_sqrt_round_pd(__W,__U,__A,_MM_FROUND_CUR_DIRECTION); } __m512d test_mm512_maskz_sqrt_round_pd(__mmask8 __U,__m512d __A) { // CHECK-LABEL: @test_mm512_maskz_sqrt_round_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> {{.*}} return _mm512_maskz_sqrt_round_pd(__U,__A,_MM_FROUND_CUR_DIRECTION); } __m512d test_mm512_sqrt_round_pd(__m512d __A) { // CHECK-LABEL: @test_mm512_sqrt_round_pd - // CHECK: @llvm.x86.avx512.mask.sqrt.pd.512 + // CHECK: call <8 x double> @llvm.sqrt.v8f64(<8 x double> %{{.*}}) return _mm512_sqrt_round_pd(__A,_MM_FROUND_CUR_DIRECTION); } __m512 test_mm512_sqrt_ps(__m512 a) { // CHECK-LABEL: @test_mm512_sqrt_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) return _mm512_sqrt_ps(a); } __m512 test_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) { // CHECK-LABEL: @test_mm512_mask_sqrt_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_sqrt_ps( __W, __U, __A); } __m512 test_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) { // CHECK-LABEL: @test_mm512_maskz_sqrt_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}} return _mm512_maskz_sqrt_ps(__U ,__A); } __m512 test_mm512_mask_sqrt_round_ps(__m512 __W,__mmask16 __U,__m512 __A) { // CHECK-LABEL: @test_mm512_mask_sqrt_round_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}} return _mm512_mask_sqrt_round_ps(__W,__U,__A,_MM_FROUND_CUR_DIRECTION); } __m512 test_mm512_maskz_sqrt_round_ps(__mmask16 __U,__m512 __A) { // CHECK-LABEL: @test_mm512_maskz_sqrt_round_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> {{.*}} return _mm512_maskz_sqrt_round_ps(__U,__A,_MM_FROUND_CUR_DIRECTION); } __m512 test_mm512_sqrt_round_ps(__m512 __A) { // CHECK-LABEL: @test_mm512_sqrt_round_ps - // CHECK: @llvm.x86.avx512.mask.sqrt.ps.512 + // CHECK: call <16 x float> @llvm.sqrt.v16f32(<16 x float> %{{.*}}) return _mm512_sqrt_round_ps(__A,_MM_FROUND_CUR_DIRECTION); } @@ -4619,53 +4635,117 @@ __m128d test_mm_sqrt_round_sd(__m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_sqrt_round_sd - // CHECK: @llvm.x86.avx512.mask.sqrt.sd + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: call double @llvm.sqrt.f64(double %{{.*}}) + // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}} + // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 return _mm_sqrt_round_sd(__A, __B, 4); } __m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.sd + // CHECK-LABEL: @test_mm_mask_sqrt_sd + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call double @llvm.sqrt.f64(double %{{.*}}) + // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}} + // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 return _mm_mask_sqrt_sd(__W,__U,__A,__B); } __m128d test_mm_mask_sqrt_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.sd + // CHECK-LABEL: @test_mm_mask_sqrt_round_sd + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call double @llvm.sqrt.f64(double %{{.*}}) + // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}} + // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 return _mm_mask_sqrt_round_sd(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION); } __m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.sd + // CHECK-LABEL: @test_mm_maskz_sqrt_sd + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call double @llvm.sqrt.f64(double %{{.*}}) + // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}} + // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 return _mm_maskz_sqrt_sd(__U,__A,__B); } __m128d test_mm_maskz_sqrt_round_sd(__mmask8 __U, __m128d __A, __m128d __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.sd + // CHECK-LABEL: @test_mm_maskz_sqrt_round_sd + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: extractelement <2 x double> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call double @llvm.sqrt.f64(double %{{.*}}) + // CHECK: select i1 {{.*}}, double {{.*}}, double {{.*}} + // CHECK: insertelement <2 x double> %{{.*}}, double {{.*}}, i64 0 return _mm_maskz_sqrt_round_sd(__U,__A,__B,_MM_FROUND_CUR_DIRECTION); } __m128 test_mm_sqrt_round_ss(__m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_sqrt_round_ss - // CHECK: @llvm.x86.avx512.mask.sqrt.ss + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: call float @llvm.sqrt.f32(float %{{.*}}) + // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}} + // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 return _mm_sqrt_round_ss(__A, __B, 4); } __m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.ss + // CHECK-LABEL: @test_mm_mask_sqrt_ss + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call float @llvm.sqrt.f32(float %{{.*}}) + // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}} + // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 return _mm_mask_sqrt_ss(__W,__U,__A,__B); } __m128 test_mm_mask_sqrt_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.ss + // CHECK-LABEL: @test_mm_mask_sqrt_round_ss + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call float @llvm.sqrt.f32(float %{{.*}}) + // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}} + // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 return _mm_mask_sqrt_round_ss(__W,__U,__A,__B,_MM_FROUND_CUR_DIRECTION); } __m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.ss + // CHECK-LABEL: @test_mm_maskz_sqrt_ss + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call float @llvm.sqrt.f32(float %{{.*}}) + // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}} + // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 return _mm_maskz_sqrt_ss(__U,__A,__B); } __m128 test_mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B){ - // CHECK: @llvm.x86.avx512.mask.sqrt.ss + // CHECK-LABEL: @test_mm_maskz_sqrt_round_ss + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: extractelement <4 x float> %{{.*}}, i64 0 + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK: call float @llvm.sqrt.f32(float %{{.*}}) + // CHECK: select i1 {{.*}}, float {{.*}}, float {{.*}} + // CHECK: insertelement <4 x float> %{{.*}}, float {{.*}}, i64 0 return _mm_maskz_sqrt_round_ss(__U,__A,__B,_MM_FROUND_CUR_DIRECTION); } Index: test/CodeGen/avx512vl-builtins.c =================================================================== --- test/CodeGen/avx512vl-builtins.c +++ test/CodeGen/avx512vl-builtins.c @@ -3160,49 +3160,49 @@ } __m128d test_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) { // CHECK-LABEL: @test_mm_mask_sqrt_pd - // CHECK: @llvm.x86.sse2.sqrt.pd + // CHECK: @llvm.sqrt.v2f64 // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_sqrt_pd(__W,__U,__A); } __m128d test_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) { // CHECK-LABEL: @test_mm_maskz_sqrt_pd - // CHECK: @llvm.x86.sse2.sqrt.pd + // CHECK: @llvm.sqrt.v2f64 // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_sqrt_pd(__U,__A); } __m256d test_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) { // CHECK-LABEL: @test_mm256_mask_sqrt_pd - // CHECK: @llvm.x86.avx.sqrt.pd.256 + // CHECK: @llvm.sqrt.v4f64 // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_sqrt_pd(__W,__U,__A); } __m256d test_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) { // CHECK-LABEL: @test_mm256_maskz_sqrt_pd - // CHECK: @llvm.x86.avx.sqrt.pd.256 + // CHECK: @llvm.sqrt.v4f64 // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_sqrt_pd(__U,__A); } __m128 test_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) { // CHECK-LABEL: @test_mm_mask_sqrt_ps - // CHECK: @llvm.x86.sse.sqrt.ps + // CHECK: @llvm.sqrt.v4f32 // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_sqrt_ps(__W,__U,__A); } __m128 test_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) { // CHECK-LABEL: @test_mm_maskz_sqrt_ps - // CHECK: @llvm.x86.sse.sqrt.ps + // CHECK: @llvm.sqrt.v4f32 // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_sqrt_ps(__U,__A); } __m256 test_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) { // CHECK-LABEL: @test_mm256_mask_sqrt_ps - // CHECK: @llvm.x86.avx.sqrt.ps.256 + // CHECK: @llvm.sqrt.v8f32 // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_sqrt_ps(__W,__U,__A); } __m256 test_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { // CHECK-LABEL: @test_mm256_maskz_sqrt_ps - // CHECK: @llvm.x86.avx.sqrt.ps.256 + // CHECK: @llvm.sqrt.v8f32 // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_sqrt_ps(__U,__A); } Index: test/CodeGen/sse-builtins.c =================================================================== --- test/CodeGen/sse-builtins.c +++ test/CodeGen/sse-builtins.c @@ -655,7 +655,7 @@ __m128 test_mm_sqrt_ps(__m128 x) { // CHECK-LABEL: test_mm_sqrt_ps - // CHECK: call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> {{.*}}) + // CHECK: call <4 x float> @llvm.sqrt.v4f32(<4 x float> {{.*}}) return _mm_sqrt_ps(x); } Index: test/CodeGen/sse2-builtins.c =================================================================== --- test/CodeGen/sse2-builtins.c +++ test/CodeGen/sse2-builtins.c @@ -1188,7 +1188,7 @@ __m128d test_mm_sqrt_pd(__m128d A) { // CHECK-LABEL: test_mm_sqrt_pd - // CHECK: call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %{{.*}}) + // CHECK: call <2 x double> @llvm.sqrt.v2f64(<2 x double> %{{.*}}) return _mm_sqrt_pd(A); }