Index: include/clang/Basic/BuiltinsX86.def =================================================================== --- include/clang/Basic/BuiltinsX86.def +++ include/clang/Basic/BuiltinsX86.def @@ -1794,6 +1794,14 @@ TARGET_BUILTIN(__builtin_ia32_loadupd256_mask, "V4dV4d*V4dUc","","avx512vl") TARGET_BUILTIN(__builtin_ia32_loadups128_mask, "V4fV4f*V4fUc","","avx512vl") TARGET_BUILTIN(__builtin_ia32_loadups256_mask, "V8fV8f*V8fUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_sqrtsd_round_mask, "V2dV2dV2dV2dUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_sqrtss_round_mask, "V4fV4fV4fV4fUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddss3_mask, "V4fV4fV4fV4fUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddss3_maskz, "V4fV4fV4fV4fUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddss3_mask3, "V4fV4fV4fV4fUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddsd3_mask, "V2dV2dV2dV2dUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddsd3_maskz, "V2dV2dV2dV2dUcIi", "", "avx512f") +TARGET_BUILTIN(__builtin_ia32_vfmaddsd3_mask3, "V2dV2dV2dV2dUcIi", "", "avx512f") #undef BUILTIN #undef TARGET_BUILTIN Index: lib/Headers/avx512fintrin.h =================================================================== --- lib/Headers/avx512fintrin.h +++ lib/Headers/avx512fintrin.h @@ -931,6 +931,24 @@ (__mmask8) -1); } +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rsqrt14_sd(__m128d __A, __m128d __B) { @@ -941,6 +959,24 @@ (__mmask8) -1); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_rcp14_pd(__m512d __A) { @@ -968,6 +1004,24 @@ (__mmask8) -1); } +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rcp14_sd(__m128d __A, __m128d __B) { @@ -978,6 +1032,24 @@ (__mmask8) -1); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + static __inline __m512 __DEFAULT_FN_ATTRS _mm512_floor_ps(__m512 __A) { @@ -3918,6 +3990,42 @@ (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_sd( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_getexpsd128_round_mask ((__v2df) __A,\ + (__v2df) __B,\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_sd( __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) _mm_setzero_pd (),\ + (__mmask8) __U,\ + __R);\ +}) + #define _mm_getexp_round_ss( __A, __B, __R) __extension__ ({ \ __builtin_ia32_getexpss128_round_mask ((__v4sf)( __A),\ (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), (__mmask8) -1,\ @@ -3931,6 +4039,42 @@ (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); } +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_getexp_ss (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_ss( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_getexpss128_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_getexp_ss (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_ss( __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_getexpss128_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) _mm_setzero_ps (),\ + (__mmask8) __U,\ + __R);\ +}) + #define _mm_getmant_round_sd( __A, __B, __C, __D, __R) __extension__ ({ \ __builtin_ia32_getmantsd_round_mask ((__v2df)( __A),\ (__v2df)( __B),\ @@ -3945,6 +4089,42 @@ _MM_FROUND_CUR_DIRECTION);\ }) +#define _mm_mask_getmant_sd( __W, __U, __A, __B, __C, __D) __extension__ ({\ +__builtin_ia32_getmantsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (( __D) << 2) |( __C),\ + (__v2df) __W,\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_mask_getmant_round_sd( __W, __U, __A, __B, __C, __D, __R)({\ +__builtin_ia32_getmantsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (( __D) << 2) |( __C),\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +#define _mm_maskz_getmant_sd( __U, __A, __B, __C, __D) __extension__ ({\ +__builtin_ia32_getmantsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (( __D) << 2) |( __C),\ + (__v2df) _mm_setzero_pd (),\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_maskz_getmant_round_sd( __U, __A, __B, __C, __D, __R) __extension__ ({\ +__builtin_ia32_getmantsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (( __D) << 2) |( __C),\ + (__v2df) _mm_setzero_pd (),\ + (__mmask8) __U,\ + __R);\ +}) + #define _mm_getmant_round_ss( __A, __B, __C, __D, __R) __extension__ ({ \ __builtin_ia32_getmantss_round_mask ((__v4sf)( __A),\ (__v4sf)( __B),\ @@ -3959,6 +4139,41 @@ _MM_FROUND_CUR_DIRECTION);\ }) +#define _mm_mask_getmant_ss( __W, __U, __A, __B, __C, __D) __extension__ ({\ +__builtin_ia32_getmantss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (( __D) << 2) |( __C),\ + (__v4sf) __W,\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_mask_getmant_round_ss( __W, __U, __A, __B, __C, __D, __R)({\ +__builtin_ia32_getmantss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (( __D) << 2) |( __C),\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +#define _mm_maskz_getmant_ss( __U, __A, __B, __C, __D) __extension__ ({\ +__builtin_ia32_getmantss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (( __D) << 2) |( __C),\ + (__v4sf) _mm_setzero_pd (),\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_maskz_getmant_round_ss( __U, __A, __B, __C, __D, __R) __extension__ ({\ +__builtin_ia32_getmantss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (( __D) << 2) |( __C),\ + (__v4sf) _mm_setzero_ps (),\ + (__mmask8) __U,\ + __R);\ +}) static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kmov (__mmask16 __A) @@ -3974,6 +4189,524 @@ __builtin_ia32_vcomiss ((__v4sf) (__A), (__v4sf) (__B), ( __P), ( __R));\ }) +#define _mm_sqrt_round_ss( __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) _mm_setzero_ps (),\ + (__mmask8) -1,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_ss( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ +return (__m128) __builtin_ia32_sqrtss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_ss( __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtss_round_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) _mm_setzero_ps(),\ + (__mmask8) __U,\ + __R);\ +}) + +#define _mm_sqrt_round_sd( __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtsd_round_mask ((__v2df) __A,\ + (__v2df) __B,\ + (__v2df) _mm_setzero_pd (),\ + (__mmask8) -1,\ + __R);\ +}) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_sd( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_sd( __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) _mm_setzero_pd (),\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmadd_round_ss( __W, __U, __A, __B, __R) __extension__({\ +__builtin_ia32_vfmaddss3_mask ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_ss( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) __C,\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_ss( __W, __X, __Y, __U, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,\ + (__v4sf) __X,\ + (__v4sf) __Y,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A, + (__v4sf) (-(__B)), + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmsub_round_ss( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_mask ((__v4sf) __A,\ + (__v4sf) (-(__B)),\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_ss( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_maskz ((__v4sf) __A,\ + (__v4sf) __B,\ + (__v4sf) __C,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) (-__Y), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_ss( __W, __X, __Y, __U, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,\ + (__v4sf) __X,\ + (__v4sf) - (__Y),\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) -(__A), + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmadd_round_ss( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_mask ((__v4sf) -(__A),\ + (__v4sf) __B,\ + (__v4sf) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) -__A, + (__v4sf) __B, + (__v4sf) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_ss( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_maskz ((__v4sf) -(__A),\ + (__v4sf) __B,\ + (__v4sf) __C,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) -(__W), + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_ss( __W, __X, __Y, __U, __R) __extension__({\ +__builtin_ia32_vfmaddss3_mask3 ((__v4sf) -(__W),\ + (__v4sf) __X,\ + (__v4sf) __Y,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) -(__A), + (__v4sf) __B, + (__v4sf) -(__W), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmsub_round_ss( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_mask ((__v4sf) -(__A),\ + (__v4sf) __B,\ + (__v4sf) -(__W),\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddss3_maskz ((__v4sf) -(__A), + (__v4sf) __B, + (__v4sf) -(__C), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_ss( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddss3_maskz((__v4sf) -(__A),\ + (__v4sf) __B,\ + (__v4sf) -(__C),\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W, + (__v4sf) __X, + (__v4sf) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_ss( __W, __X, __Y, __U, __R) __extension__({\ +__builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,\ + (__v4sf) __X,\ + (__v4sf) __Y,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmadd_sd (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmadd_round_sd( __W, __U, __A, __B, __R) __extension__({\ +__builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmadd_sd (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sd( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) __C,\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmadd_sd (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + (__v2df) __X, + (__v2df) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sd( __W, __X, __Y, __U, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,\ + (__v2df) __X,\ + (__v2df) __Y,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fmsub_sd (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A, + (__v2df) (-(__B)), + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fmsub_round_sd( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,\ + (__v2df) (-(__B)),\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fmsub_sd (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sd( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_maskz ( (__v2df) __A,\ + (__v2df) __B,\ + (__v2df) __C,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fmsub_sd (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W, + (__v2df) __X, + (__v2df) (-__Y), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sd( __W, __X, __Y, __U, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,\ + (__v2df) __X,\ + (__v2df) -(__Y),\ + (__mmask8) __U, __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmadd_sd (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask ( (__v2df) -(__A), + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmadd_round_sd( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_mask ( (__v2df) -(__A),\ + (__v2df) __B,\ + (__v2df) __W,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmadd_sd (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) -__A, + (__v2df) __B, + (__v2df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sd( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_maskz ( (__v2df) -(__A),\ + (__v2df) __B,\ + (__v2df) __C,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmadd_sd (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) -(__W), + (__v2df) __X, + (__v2df) __Y, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sd( __W, __X, __Y, __U, __R) __extension__({\ +__builtin_ia32_vfmaddsd3_mask3 ((__v2df) -(__W),\ + (__v2df) __X,\ + (__v2df) __Y,\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_fnmsub_sd (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask ( (__v2df) -(__A), + (__v2df) __B, + (__v2df) -(__W), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_fnmsub_round_sd( __W, __U, __A, __B, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_mask ( (__v2df) -(__A),\ + (__v2df) __B,\ + (__v2df) -(__W),\ + (__mmask8) __U,\ + __R);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_fnmsub_sd (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_vfmaddsd3_maskz ( (__v2df) -(__A), + (__v2df) __B, + (__v2df) -(__C), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sd( __U, __A, __B, __C, __R) __extension__ ({\ +__builtin_ia32_vfmaddsd3_maskz( (__v2df) -(__A),\ + (__v2df) __B,\ + (__v2df) -(__C),\ + (__mmask8) __U,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask3_fnmsub_sd (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return (__m128) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) -(__W), + (__v2df) __X, + (__v2df) -(__Y), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sd( __W, __X, __Y, __U, __R) __extension__({\ +__builtin_ia32_vfmaddsd3_mask3 ((__v2df) -(__W),\ + (__v2df) __X,\ + (__v2df) -(__Y),\ + (__mmask8) __U,\ + __R);\ +}) #undef __DEFAULT_FN_ATTRS Index: test/CodeGen/avx512f-builtins.c =================================================================== --- test/CodeGen/avx512f-builtins.c +++ test/CodeGen/avx512f-builtins.c @@ -2576,5 +2576,494 @@ return _mm512_kmov(__A); } +__m128 test_mm_sqrt_round_ss(__m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_sqrt_round_ss + // CHECK: @llvm.x86.avx512.mask.sqrt.ss + return _mm_sqrt_round_ss(__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_sqrt_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_sqrt_ss + // CHECK: @llvm.x86.avx512.mask.sqrt.ss + return _mm_mask_sqrt_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_sqrt_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_sqrt_round_ss + // CHECK: @llvm.x86.avx512.mask.sqrt.ss + return _mm_mask_sqrt_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_sqrt_ss + // CHECK: @llvm.x86.avx512.mask.sqrt.ss + return _mm_maskz_sqrt_ss(__U, __A, __B); +} + +__m128 test_mm_maskz_sqrt_round_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_sqrt_round_ss + // CHECK: @llvm.x86.avx512.mask.sqrt.ss + return _mm_maskz_sqrt_round_ss(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} +__m128d test_mm_sqrt_round_sd(__m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_sqrt_round_sd + // CHECK: @llvm.x86.avx512.mask.sqrt.sd + return _mm_sqrt_round_sd(__A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_sqrt_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_sqrt_sd + // CHECK: @llvm.x86.avx512.mask.sqrt.sd + return _mm_mask_sqrt_sd(__W, __U, __A, __B); +} +__m128d test_mm_mask_sqrt_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_sqrt_round_sd + // CHECK: @llvm.x86.avx512.mask.sqrt.sd + return _mm_mask_sqrt_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_sqrt_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_sqrt_sd + // CHECK: @llvm.x86.avx512.mask.sqrt.sd + return _mm_maskz_sqrt_sd(__U, __A, __B); +} + +__m128d test_mm_maskz_sqrt_round_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_sqrt_round_sd + // CHECK: @llvm.x86.avx512.mask.sqrt.sd + return _mm_maskz_sqrt_round_sd(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_fmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fmadd_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fmadd_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_fmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fmadd_round_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fmadd_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_fmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fmadd_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fmadd_ss(__U, __A, __B, __C); +} + +__m128 test_mm_maskz_fmadd_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fmadd_round_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fmadd_round_ss(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask3_fmadd_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmadd_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fmadd_ss(__W, __X, __Y, __U); +} + +__m128 test_mm_mask3_fmadd_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmadd_round_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fmadd_round_ss(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_fmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fmsub_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fmsub_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_fmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fmsub_round_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fmsub_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_fmsub_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fmsub_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fmsub_ss(__U, __A, __B, __C); +} + +__m128 test_mm_maskz_fmsub_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fmsub_round_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fmsub_round_ss(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask3_fmsub_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmsub_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fmsub_ss(__W, __X, __Y, __U); +} + +__m128 test_mm_mask3_fmsub_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmsub_round_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fmsub_round_ss(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_fnmadd_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fnmadd_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fnmadd_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_fnmadd_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fnmadd_round_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fnmadd_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_fnmadd_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fnmadd_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fnmadd_ss(__U, __A, __B, __C); +} + +__m128 test_mm_maskz_fnmadd_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fnmadd_round_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fnmadd_round_ss(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask3_fnmadd_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmadd_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fnmadd_ss(__W, __X, __Y, __U); +} + +__m128 test_mm_mask3_fnmadd_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmadd_round_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fnmadd_round_ss(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_fnmsub_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fnmsub_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fnmsub_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_fnmsub_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_fnmsub_round_ss + // CHECK: @llvm.x86.avx512.mask.vfmadd.ss + return _mm_mask_fnmsub_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_fnmsub_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fnmsub_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fnmsub_ss(__U, __A, __B, __C); +} + +__m128 test_mm_maskz_fnmsub_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C){ + // CHECK-LABEL: @test_mm_maskz_fnmsub_round_ss + // CHECK: @llvm.x86.avx512.maskz.vfmadd.ss + return _mm_maskz_fnmsub_round_ss(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask3_fnmsub_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmsub_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fnmsub_ss(__W, __X, __Y, __U); +} + +__m128 test_mm_mask3_fnmsub_round_ss(__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmsub_round_ss + // CHECK: @llvm.x86.avx512.mask3.vfmadd.ss + return _mm_mask3_fnmsub_round_ss(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_fmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fmadd_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fmadd_sd(__W, __U, __A, __B); +} + +__m128d test_mm_mask_fmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fmadd_round_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fmadd_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_fmadd_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fmadd_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fmadd_sd(__U, __A, __B, __C); +} + +__m128d test_mm_maskz_fmadd_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fmadd_round_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fmadd_round_sd(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask3_fmadd_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmadd_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fmadd_sd(__W, __X, __Y, __U); +} + +__m128d test_mm_mask3_fmadd_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmadd_round_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fmadd_round_sd(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_fmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fmsub_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fmsub_sd(__W, __U, __A, __B); +} + +__m128d test_mm_mask_fmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fmsub_round_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fmsub_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_fmsub_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fmsub_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fmsub_sd(__U, __A, __B, __C); +} + +__m128d test_mm_maskz_fmsub_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fmsub_round_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fmsub_round_sd(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask3_fmsub_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmsub_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fmsub_sd(__W, __X, __Y, __U); +} + +__m128d test_mm_mask3_fmsub_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fmsub_round_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fmsub_round_sd(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_fnmadd_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fnmadd_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fnmadd_sd(__W, __U, __A, __B); +} + +__m128d test_mm_mask_fnmadd_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fnmadd_round_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fnmadd_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_fnmadd_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fnmadd_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fnmadd_sd(__U, __A, __B, __C); +} + +__m128d test_mm_maskz_fnmadd_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fnmadd_round_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fnmadd_round_sd(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask3_fnmadd_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmadd_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fnmadd_sd(__W, __X, __Y, __U); +} + +__m128d test_mm_mask3_fnmadd_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmadd_round_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fnmadd_round_sd(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_fnmsub_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fnmsub_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fnmsub_sd(__W, __U, __A, __B); +} + +__m128d test_mm_mask_fnmsub_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_fnmsub_round_sd + // CHECK: @llvm.x86.avx512.mask.vfmadd.sd + return _mm_mask_fnmsub_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_fnmsub_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fnmsub_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fnmsub_sd(__U, __A, __B, __C); +} + +__m128d test_mm_maskz_fnmsub_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C){ + // CHECK-LABEL: @test_mm_maskz_fnmsub_round_sd + // CHECK: @llvm.x86.avx512.maskz.vfmadd.sd + return _mm_maskz_fnmsub_round_sd(__U, __A, __B, __C, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask3_fnmsub_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmsub_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fnmsub_sd(__W, __X, __Y, __U); +} + +__m128d test_mm_mask3_fnmsub_round_sd(__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U){ + // CHECK-LABEL: @test_mm_mask3_fnmsub_round_sd + // CHECK: @llvm.x86.avx512.mask3.vfmadd.sd + return _mm_mask3_fnmsub_round_sd(__W, __X, __Y, __U, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_rsqrt14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_rsqrt14_sd + // CHECK: @llvm.x86.avx512.rsqrt14.sd + return _mm_mask_rsqrt14_sd(__W, __U, __A, __B); +} + +__m128d test_mm_maskz_rsqrt14_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_rsqrt14_sd + // CHECK: @llvm.x86.avx512.rsqrt14.sd + return _mm_maskz_rsqrt14_sd(__U, __A, __B); +} + +__m128 test_mm_mask_rsqrt14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_rsqrt14_ss + // CHECK: @llvm.x86.avx512.rsqrt14.ss + return _mm_mask_rsqrt14_ss(__W, __U, __A, __B); +} + +__m128 test_mm_maskz_rsqrt14_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_rsqrt14_ss + // CHECK: @llvm.x86.avx512.rsqrt14.ss + return _mm_maskz_rsqrt14_ss(__U, __A, __B); +} + +__m128d test_mm_mask_rcp14_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_rcp14_sd + // CHECK: @llvm.x86.avx512.rcp14.sd + return _mm_mask_rcp14_sd(__W, __U, __A, __B); +} + +__m128d test_mm_maskz_rcp14_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_rcp14_sd + // CHECK: @llvm.x86.avx512.rcp14.sd + return _mm_maskz_rcp14_sd(__U, __A, __B); +} + +__m128 test_mm_mask_rcp14_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_rcp14_ss + // CHECK: @llvm.x86.avx512.rcp14.ss + return _mm_mask_rcp14_ss(__W, __U, __A, __B); +} + +__m128 test_mm_maskz_rcp14_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_rcp14_ss + // CHECK: @llvm.x86.avx512.rcp14.ss + return _mm_maskz_rcp14_ss(__U, __A, __B); +} + +__m128d test_mm_mask_getexp_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_getexp_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_mask_getexp_sd(__W, __U, __A, __B); +} + +__m128d test_mm_mask_getexp_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_getexp_round_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_mask_getexp_round_sd(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_getexp_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_getexp_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_maskz_getexp_sd(__U, __A, __B); +} + +__m128d test_mm_maskz_getexp_round_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_getexp_round_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_maskz_getexp_round_sd(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_getexp_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_getexp_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_mask_getexp_ss(__W, __U, __A, __B); +} + +__m128 test_mm_mask_getexp_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_getexp_round_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_mask_getexp_round_ss(__W, __U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_getexp_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_getexp_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_maskz_getexp_ss(__U, __A, __B); +} + +__m128 test_mm_maskz_getexp_round_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_getexp_round_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_maskz_getexp_round_ss(__U, __A, __B, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_mask_getmant_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_getmant_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_mask_getmant_sd(__W, __U, __A, __B, 1, 2); +} + +__m128d test_mm_mask_getmant_round_sd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_mask_getmant_round_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_mask_getmant_round_sd(__W, __U, __A, __B, 1, 2, _MM_FROUND_CUR_DIRECTION); +} + +__m128d test_mm_maskz_getmant_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_getmant_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_maskz_getmant_sd(__U, __A, __B, 1, 2); +} + +__m128d test_mm_maskz_getmant_round_sd(__mmask8 __U, __m128d __A, __m128d __B){ + // CHECK-LABEL: @test_mm_maskz_getmant_round_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_maskz_getmant_round_sd(__U, __A, __B, 1, 2, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_mask_getmant_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_getmant_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_mask_getmant_ss(__W, __U, __A, __B, 1, 2); +} + +__m128 test_mm_mask_getmant_round_ss(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_mask_getmant_round_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_mask_getmant_round_ss(__W, __U, __A, __B, 1, 2, _MM_FROUND_CUR_DIRECTION); +} + +__m128 test_mm_maskz_getmant_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_getmant_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_maskz_getmant_ss(__U, __A, __B, 1, 2); +} + +__m128 test_mm_maskz_getmant_round_ss(__mmask8 __U, __m128 __A, __m128 __B){ + // CHECK-LABEL: @test_mm_maskz_getmant_round_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_maskz_getmant_round_ss(__U, __A, __B, 1, 2, _MM_FROUND_CUR_DIRECTION); +}