Index: include/clang/Basic/BuiltinsX86.def =================================================================== --- include/clang/Basic/BuiltinsX86.def +++ include/clang/Basic/BuiltinsX86.def @@ -1749,6 +1749,51 @@ TARGET_BUILTIN(__builtin_ia32_vpermt2varqi256_maskz, "V32cV32cV32cV32cUi","","avx512vbmi,avx512vl") TARGET_BUILTIN(__builtin_ia32_vcomisd, "iV2dV2dIiIi","","avx512f") TARGET_BUILTIN(__builtin_ia32_vcomiss, "iV4fV4fIiIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_kunpckdi, "ULLiULLiULLi","","avx512bw") +TARGET_BUILTIN(__builtin_ia32_kunpcksi, "UiUiUi","","avx512bw") +TARGET_BUILTIN(__builtin_ia32_loaddquhi512_mask, "V32sV32s*V32sUi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_loaddquqi512_mask, "V64cV64c*V64cULLi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_mask, "V8dV8dV8dV8LLiIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_maskz, "V8dV8dV8dV8LLiIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmps512_mask, "V16fV16fV16fV16iIiUsIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmps512_maskz, "V16fV16fV16fV16iIiUsIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmsd_mask, "V2dV2dV2dV2LLiIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmsd_maskz, "V2dV2dV2dV2LLiIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmss_mask, "V4fV4fV4fV4iIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_fixupimmss_maskz, "V4fV4fV4fV4iIiUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_getexpsd128_round_mask, "V2dV2dV2dV2dUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_getexpss128_round_mask, "V4fV4fV4fV4fUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_getmantsd_round_mask, "V2dV2dV2dIiV2dUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_getmantss_round_mask, "V4fV4fV4fIiV4fUcIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_kmov16, "UsUs","","avx512f") +TARGET_BUILTIN(__builtin_ia32_maxsd_round, "V2dV2dV2dIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_maxss_round, "V4fV4fV4fIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_minsd_round, "V2dV2dV2dIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_minss_round, "V4fV4fV4fIi","","avx512f") +TARGET_BUILTIN(__builtin_ia32_loaddquhi128_mask, "V8sV8s*V8sUc","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddquhi256_mask, "V16sV16s*V16sUs","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddquqi128_mask, "V16cV16c*V16cUs","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddquqi256_mask, "V32cV32c*V32cUi","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd128_mask, "V2dV2dV2dV2LLiIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd128_maskz, "V2dV2dV2dV2LLiIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd256_mask, "V4dV4dV4dV4LLiIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmpd256_maskz, "V4dV4dV4dV4LLiIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmps128_mask, "V4fV4fV4fV4iIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmps128_maskz, "V4fV4fV4fV4iIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmps256_mask, "V8fV8fV8fV8iIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_fixupimmps256_maskz, "V8fV8fV8fV8iIiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadapd128_mask, "V2dV2d*V2dUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadapd256_mask, "V4dV4d*V4dUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadaps128_mask, "V4fV4f*V4fUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadaps256_mask, "V8fV8f*V8fUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddqudi128_mask, "V2LLiV2LLi*V2LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddqudi256_mask, "V4LLiV4LLi*V4LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loaddqusi128_mask, "V4iV4i*V4iUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_loaddqusi256_mask, "V8iV8i*V8iUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_loadupd128_mask, "V2dV2d*V2dUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadupd256_mask, "V4dV4d*V4dUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadups128_mask, "V4fV4f*V4fUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_loadups256_mask, "V8fV8f*V8fUc","","avx512vl") #undef BUILTIN #undef TARGET_BUILTIN Index: lib/Headers/avx512bwintrin.h =================================================================== --- lib/Headers/avx512bwintrin.h +++ lib/Headers/avx512bwintrin.h @@ -1934,6 +1934,53 @@ __M); } +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_kunpackd (__mmask64 __A, __mmask64 __B) +{ + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_kunpackw (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((__v32hi *) __P, + (__v32hi) + _mm512_setzero_hi (), + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((__v64qi *) __P, + (__v64qi) + _mm512_setzero_hi (), + (__mmask64) __U); +} #undef __DEFAULT_FN_ATTRS #endif Index: lib/Headers/avx512fintrin.h =================================================================== --- lib/Headers/avx512fintrin.h +++ lib/Headers/avx512fintrin.h @@ -51,12 +51,40 @@ /* Create vectors with repeated elements */ +typedef enum +{ + _MM_MANT_NORM_1_2, /* interval [1, 2) */ + _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ + _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ + _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ +} _MM_MANTISSA_NORM_ENUM; + +typedef enum +{ + _MM_MANT_SIGN_src, /* sign = sign(SRC) */ + _MM_MANT_SIGN_zero, /* sign = 0 */ + _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ +} _MM_MANTISSA_SIGN_ENUM; + static __inline __m512i __DEFAULT_FN_ATTRS _mm512_setzero_si512(void) { return (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; } +static __inline __m128i __DEFAULT_FN_ATTRS +_mm512_setzero_sd128(void) +{ + return (__m128i)(__v2df){ 0.0, 0.0 }; +} + +static __inline __m128i __DEFAULT_FN_ATTRS +_mm512_setzero_ss128(void) +{ + return (__m128i)(__v4sf){ 0.0, 0.0, 0.0, 0.0}; +} + + static __inline__ __m512d __DEFAULT_FN_ATTRS _mm512_undefined_pd() { @@ -3695,6 +3723,262 @@ (__mmask8) __U); } +#define _mm512_fixupimm_round_pd( __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + (__imm),\ + (__mmask8) -1, (__R));\ +}) + +#define _mm512_mask_fixupimm_round_pd( __A, __U, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + (__imm),\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm512_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + ( __imm),\ + (__mmask8) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm512_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_mask ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + ( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm512_maskz_fixupimm_round_pd( __U, __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_maskz ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + (__imm),\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm512_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd512_maskz ((__v8df)( __A),\ + (__v8df)( __B),\ + (__v8di)( __C),\ + ( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm512_fixupimm_round_ps( __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + (__imm),\ + (__mmask16) -1, (__R));\ +}) + +#define _mm512_mask_fixupimm_round_ps( __A, __U, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + (__imm),\ + (__mmask16)( __U), (__R));\ +}) + +#define _mm512_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + ( __imm),\ + (__mmask16) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm512_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps512_mask ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + ( __imm),\ + (__mmask16)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm512_maskz_fixupimm_round_ps( __U, __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmps512_maskz ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + (__imm),\ + (__mmask16)( __U), (__R));\ +}) + +#define _mm512_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps512_maskz ((__v16sf)( __A),\ + (__v16sf)( __B),\ + (__v16si)( __C),\ + ( __imm),\ + (__mmask16)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_fixupimm_round_sd( __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C), __imm,\ + (__mmask8) -1, (__R));\ +}) + +#define _mm_mask_fixupimm_round_sd( __A, __U, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C), __imm,\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm_fixupimm_sd( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),( __imm),\ + (__mmask8) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_mask_fixupimm_sd( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmsd_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_maskz_fixupimm_round_sd( __U, __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmsd_maskz ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),\ + __imm,\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm_maskz_fixupimm_sd( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmsd_maskz ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),\ + ( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_fixupimm_round_ss( __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C), (__imm),\ + (__mmask8) -1, (__R));\ +}) + +#define _mm_mask_fixupimm_round_ss( __A, __U, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C), (__imm),\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm_fixupimm_ss( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),( __imm),\ + (__mmask8) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_mask_fixupimm_ss( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmss_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_maskz_fixupimm_round_ss( __U, __A, __B, __C, __imm, __R) __extension__ ({ \ +__builtin_ia32_fixupimmss_maskz ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C), (__imm),\ + (__mmask8)( __U), (__R));\ +}) + +#define _mm_maskz_fixupimm_ss( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmss_maskz ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),( __imm),\ + (__mmask8)( __U),\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_getexp_round_sd( __A, __B ,__R) __extension__ ({ \ +__builtin_ia32_getexpsd128_round_mask ((__v2df)(__A),\ + (__v2df)( __B), (__v2df) _mm512_setzero_sd128(), (__mmask8) -1,\ + ( __R));\ +}) + + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_getexp_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A, + (__v2df) __B, (__v2df) _mm512_setzero_sd128(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_getexp_round_ss( __A, __B, __R) __extension__ ({ \ +__builtin_ia32_getexpss128_round_mask ((__v4sf)( __A),\ + (__v4sf)( __B), (__v4sf) _mm512_setzero_ss128(), (__mmask8) -1,\ + ( __R));\ +}) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_getexp_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, (__v4sf) _mm512_setzero_ss128(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_getmant_round_sd( __A, __B, __C, __D, __R) __extension__ ({ \ +__builtin_ia32_getmantsd_round_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (( __D) << 2) |( __C), (__v2df) _mm512_setzero_sd128(), (__mmask8) -1,\ + ( __R));\ +}) + +#define _mm_getmant_sd( __A, __B, __C, __D) __extension__ ({ \ +__builtin_ia32_getmantsd_round_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (( __D) << 2) |( __C), (__v2df) _mm512_setzero_sd128(), (__mmask8) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + +#define _mm_getmant_round_ss( __A, __B, __C, __D, __R) __extension__ ({ \ +__builtin_ia32_getmantss_round_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + ((__D) << 2) |( __C), (__v4sf) _mm512_setzero_ss128(), (__mmask8) -1,\ + ( __R));\ +}) + +#define _mm_getmant_ss(__A, __B, __C, __D) __extension__ ({ \ +__builtin_ia32_getmantss_round_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + ((__D) << 2) |( __C), (__v4sf) _mm512_setzero_ss128(), (__mmask8) -1,\ + _MM_FROUND_CUR_DIRECTION);\ +}) + + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kmov (__mmask16 __A) +{ + return __A; +} + #define _mm_comi_round_sd(__A, __B, __P, __R) __extension__ ({\ __builtin_ia32_vcomisd ((__v2df) (__A), (__v2df) (__B), ( __P), ( __R));\ }) Index: lib/Headers/avx512vlbwintrin.h =================================================================== --- lib/Headers/avx512vlbwintrin.h +++ lib/Headers/avx512vlbwintrin.h @@ -2952,6 +2952,74 @@ _mm256_setzero_si256 (), __M); } + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((__v8hi *) __P, + (__v8hi) + _mm_setzero_hi (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((__v16hi *) __P, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((__v16qi *) __P, + (__v16qi) + _mm_setzero_hi (), + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((__v32qi *) __P, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} #undef __DEFAULT_FN_ATTRS #endif /* __AVX512VLBWINTRIN_H */ Index: lib/Headers/avx512vlintrin.h =================================================================== --- lib/Headers/avx512vlintrin.h +++ lib/Headers/avx512vlintrin.h @@ -6068,6 +6068,306 @@ _mm256_setzero_si256 (), __M); } + +#define _mm_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd128_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),\ + ( __imm),\ + (__mmask8) -1);\ +}) + +#define _mm_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd128_mask ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd128_maskz ((__v2df)( __A),\ + (__v2df)( __B),\ + (__v2di)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm256_fixupimm_pd( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd256_mask ((__v4df)( __A),\ + (__v4df)( __B),\ + (__v4di)( __C),\ + ( __imm),\ + (__mmask8) -1);\ +}) + +#define _mm256_mask_fixupimm_pd( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd256_mask ((__v4df)( __A),\ + (__v4df)( __B),\ + (__v4di)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm256_maskz_fixupimm_pd( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmpd256_maskz ((__v4df)( __A),\ + (__v4df)( __B),\ + (__v4di)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps128_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),\ + ( __imm),\ + (__mmask8) -1);\ +}) + +#define _mm_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps128_mask ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps128_maskz ((__v4sf)( __A),\ + (__v4sf)( __B),\ + (__v4si)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm256_fixupimm_ps( __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps256_mask ((__v8sf)( __A),\ + (__v8sf)( __B),\ + (__v8si)( __C),\ + ( __imm),\ + (__mmask8) -1);\ +}) + +#define _mm256_mask_fixupimm_ps( __A, __U, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps256_mask ((__v8sf)( __A),\ + (__v8sf)( __B),\ + (__v8si)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +#define _mm256_maskz_fixupimm_ps( __U, __A, __B, __C, __imm) __extension__ ({ \ +__builtin_ia32_fixupimmps256_maskz ((__v8sf)( __A),\ + (__v8sf)( __B),\ + (__v8si)( __C),\ + ( __imm),\ + (__mmask8)( __U));\ +}) + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((__v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((__v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((__v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((__v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} #undef __DEFAULT_FN_ATTRS #undef __DEFAULT_FN_ATTRS_BOTH Index: test/CodeGen/avx512bw-builtins.c =================================================================== --- test/CodeGen/avx512bw-builtins.c +++ test/CodeGen/avx512bw-builtins.c @@ -1311,4 +1311,40 @@ return _mm512_maskz_set1_epi8(__M, __A); } +__mmask64 test_mm512_kunpackd(__mmask64 __A, __mmask64 __B) { + // CHECK-LABEL: @test_mm512_kunpackd + // CHECK: @llvm.x86.avx512.kunpck.dq + return _mm512_kunpackd(__A, __B); +} + +__mmask32 test_mm512_kunpackw(__mmask32 __A, __mmask32 __B) { + // CHECK-LABEL: @test_mm512_kunpackw + // CHECK: @llvm.x86.avx512.kunpck.wd + return _mm512_kunpackw(__A, __B); +} + +__m512i test_mm512_mask_loadu_epi16(__m512i __W, __mmask32 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_mask_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.512 + return _mm512_mask_loadu_epi16(__W, __U, __P); +} + +__m512i test_mm512_maskz_loadu_epi16(__mmask32 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_maskz_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.512 + return _mm512_maskz_loadu_epi16(__U, __P); +} + +__m512i test_mm512_mask_loadu_epi8(__m512i __W, __mmask64 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_mask_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.512 + return _mm512_mask_loadu_epi8(__W, __U, __P); +} + +__m512i test_mm512_maskz_loadu_epi8(__mmask64 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_maskz_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.512 + return _mm512_maskz_loadu_epi8(__U, __P); +} + Index: test/CodeGen/avx512f-builtins.c =================================================================== --- test/CodeGen/avx512f-builtins.c +++ test/CodeGen/avx512f-builtins.c @@ -2375,3 +2375,206 @@ // CHECK: @llvm.x86.avx512.vcomi.ss return _mm_comi_round_ss(__A, __B, 5, 3); } + +__m512d test_mm512_fixupimm_round_pd(__m512d __A, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_fixupimm_round_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.512 + return _mm512_fixupimm_round_pd(__A, __B, __C, 5, 8); +} + +__m512d test_mm512_mask_fixupimm_round_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_mask_fixupimm_round_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.512 + return _mm512_mask_fixupimm_round_pd(__A, __U, __B, __C, 5, 8); +} + +__m512d test_mm512_fixupimm_pd(__m512d __A, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.512 + return _mm512_fixupimm_pd(__A, __B, __C, 5); +} + +__m512d test_mm512_mask_fixupimm_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_mask_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.512 + return _mm512_mask_fixupimm_pd(__A, __U, __B, __C, 5); +} + +__m512d test_mm512_maskz_fixupimm_round_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_maskz_fixupimm_round_pd + // CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.512 + return _mm512_maskz_fixupimm_round_pd(__U, __A, __B, __C, 5, 8); +} + +__m512d test_mm512_maskz_fixupimm_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_maskz_fixupimm_pd + // CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.512 + return _mm512_maskz_fixupimm_pd(__U, __A, __B, __C, 5); +} + +__m512 test_mm512_fixupimm_round_ps(__m512 __A, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_fixupimm_round_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.512 + return _mm512_fixupimm_round_ps(__A, __B, __C, 5, 8); +} + +__m512 test_mm512_mask_fixupimm_round_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_mask_fixupimm_round_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.512 + return _mm512_mask_fixupimm_round_ps(__A, __U, __B, __C, 5, 8); +} + +__m512 test_mm512_fixupimm_ps(__m512 __A, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.512 + return _mm512_fixupimm_ps(__A, __B, __C, 5); +} + +__m512 test_mm512_mask_fixupimm_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_mask_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.512 + return _mm512_mask_fixupimm_ps(__A, __U, __B, __C, 5); +} + +__m512 test_mm512_maskz_fixupimm_round_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_maskz_fixupimm_round_ps + // CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.512 + return _mm512_maskz_fixupimm_round_ps(__U, __A, __B, __C, 5, 8); +} + +__m512 test_mm512_maskz_fixupimm_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512i __C) { + // CHECK-LABEL: @test_mm512_maskz_fixupimm_ps + // CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.512 + return _mm512_maskz_fixupimm_ps(__U, __A, __B, __C, 5); +} + +__m128d test_mm_fixupimm_round_sd(__m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_round_sd + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_fixupimm_round_sd(__A, __B, __C, 5, 8); +} + +__m128d test_mm_mask_fixupimm_round_sd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_round_sd + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_mask_fixupimm_round_sd(__A, __U, __B, __C, 5, 8); +} + +__m128d test_mm_fixupimm_sd(__m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_sd + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_fixupimm_sd(__A, __B, __C, 5); +} + +__m128d test_mm_mask_fixupimm_sd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_sd + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_mask_fixupimm_sd(__A, __U, __B, __C, 5); +} + +__m128d test_mm_maskz_fixupimm_round_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_round_sd + // CHECK: @llvm.x86.avx512.maskz.fixupimm + return _mm_maskz_fixupimm_round_sd(__U, __A, __B, __C, 5, 8); +} + +__m128d test_mm_maskz_fixupimm_sd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_sd + // CHECK: @llvm.x86.avx512.maskz.fixupimm + return _mm_maskz_fixupimm_sd(__U, __A, __B, __C, 5); +} + +__m128 test_mm_fixupimm_round_ss(__m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_round_ss + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_fixupimm_round_ss(__A, __B, __C, 5, 8); +} + +__m128 test_mm_mask_fixupimm_round_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_round_ss + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_mask_fixupimm_round_ss(__A, __U, __B, __C, 5, 8); +} + +__m128 test_mm_fixupimm_ss(__m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_ss + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_fixupimm_ss(__A, __B, __C, 5); +} + +__m128 test_mm_mask_fixupimm_ss(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_ss + // CHECK: @llvm.x86.avx512.mask.fixupimm + return _mm_mask_fixupimm_ss(__A, __U, __B, __C, 5); +} + +__m128 test_mm_maskz_fixupimm_round_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_round_ss + // CHECK: @llvm.x86.avx512.maskz.fixupimm + return _mm_maskz_fixupimm_round_ss(__U, __A, __B, __C, 5, 8); +} + +__m128 test_mm_maskz_fixupimm_ss(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_ss + // CHECK: @llvm.x86.avx512.maskz.fixupimm + return _mm_maskz_fixupimm_ss(__U, __A, __B, __C, 5); +} + +__m128d test_mm_getexp_round_sd(__m128d __A, __m128d __B) { + // CHECK-LABEL: @test_mm_getexp_round_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_getexp_round_sd(__A, __B, 8); +} + +__m128d test_mm_getexp_sd(__m128d __A, __m128d __B) { + // CHECK-LABEL: @test_mm_getexp_sd + // CHECK: @llvm.x86.avx512.mask.getexp.sd + return _mm_getexp_sd(__A, __B); +} + +__m128 test_mm_getexp_round_ss(__m128 __A, __m128 __B) { + // CHECK-LABEL: @test_mm_getexp_round_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_getexp_round_ss(__A, __B, 8); +} + +__m128 test_mm_getexp_ss(__m128 __A, __m128 __B) { + // CHECK-LABEL: @test_mm_getexp_ss + // CHECK: @llvm.x86.avx512.mask.getexp.ss + return _mm_getexp_ss(__A, __B); +} + +__m128d test_mm_getmant_round_sd(__m128d __A, __m128d __B) { + // CHECK-LABEL: @test_mm_getmant_round_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_getmant_round_sd(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src, 8); +} + +__m128d test_mm_getmant_sd(__m128d __A, __m128d __B) { + // CHECK-LABEL: @test_mm_getmant_sd + // CHECK: @llvm.x86.avx512.mask.getmant.sd + return _mm_getmant_sd(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src); +} + + + +__m128 test_mm_getmant_round_ss(__m128 __A, __m128 __B) { + // CHECK-LABEL: @test_mm_getmant_round_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_getmant_round_ss(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src, 8); +} + +__m128 test_mm_getmant_ss(__m128 __A, __m128 __B) { + // CHECK-LABEL: @test_mm_getmant_ss + // CHECK: @llvm.x86.avx512.mask.getmant.ss + return _mm_getmant_ss(__A, __B, _MM_MANT_NORM_1_2, _MM_MANT_SIGN_src); +} + +__mmask16 test_mm512_kmov(__mmask16 __A) { + // CHECK-LABEL: @test_mm512_kmov + // CHECK: load i16, i16* %__A.addr.i, align 2 + return _mm512_kmov(__A); +} + + + Index: test/CodeGen/avx512vl-builtins.c =================================================================== --- test/CodeGen/avx512vl-builtins.c +++ test/CodeGen/avx512vl-builtins.c @@ -4080,3 +4080,219 @@ return _mm256_maskz_set1_epi64(__M, __A); } +__m128d test_mm_fixupimm_pd(__m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128 + return _mm_fixupimm_pd(__A, __B, __C, 5); +} + +__m128d test_mm_mask_fixupimm_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.128 + return _mm_mask_fixupimm_pd(__A, __U, __B, __C, 5); +} + +__m128d test_mm_maskz_fixupimm_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_pd + // CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.128 + return _mm_maskz_fixupimm_pd(__U, __A, __B, __C, 5); +} + +__m256d test_mm256_fixupimm_pd(__m256d __A, __m256d __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256 + return _mm256_fixupimm_pd(__A, __B, __C, 5); +} + +__m256d test_mm256_mask_fixupimm_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_mask_fixupimm_pd + // CHECK: @llvm.x86.avx512.mask.fixupimm.pd.256 + return _mm256_mask_fixupimm_pd(__A, __U, __B, __C, 5); +} + +__m256d test_mm256_maskz_fixupimm_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_maskz_fixupimm_pd + // CHECK: @llvm.x86.avx512.maskz.fixupimm.pd.256 + return _mm256_maskz_fixupimm_pd(__U, __A, __B, __C, 5); +} + +__m128 test_mm_fixupimm_ps(__m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128 + return _mm_fixupimm_ps(__A, __B, __C, 5); +} + +__m128 test_mm_mask_fixupimm_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_mask_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.128 + return _mm_mask_fixupimm_ps(__A, __U, __B, __C, 5); +} + +__m128 test_mm_maskz_fixupimm_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128i __C) { + // CHECK-LABEL: @test_mm_maskz_fixupimm_ps + // CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.128 + return _mm_maskz_fixupimm_ps(__U, __A, __B, __C, 5); +} + +__m256 test_mm256_fixupimm_ps(__m256 __A, __m256 __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256 + return _mm256_fixupimm_ps(__A, __B, __C, 5); +} + +__m256 test_mm256_mask_fixupimm_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_mask_fixupimm_ps + // CHECK: @llvm.x86.avx512.mask.fixupimm.ps.256 + return _mm256_mask_fixupimm_ps(__A, __U, __B, __C, 5); +} + +__m256 test_mm256_maskz_fixupimm_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256i __C) { + // CHECK-LABEL: @test_mm256_maskz_fixupimm_ps + // CHECK: @llvm.x86.avx512.maskz.fixupimm.ps.256 + return _mm256_maskz_fixupimm_ps(__U, __A, __B, __C, 5); +} + +__m128d test_mm_mask_load_pd(__m128d __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_load_pd + // CHECK: @llvm.x86.avx512.mask.load.pd.128 + return _mm_mask_load_pd(__W, __U, __P); +} + +__m128d test_mm_maskz_load_pd(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_load_pd + // CHECK: @llvm.x86.avx512.mask.load.pd.128 + return _mm_maskz_load_pd(__U, __P); +} + +__m256d test_mm256_mask_load_pd(__m256d __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_load_pd + // CHECK: @llvm.x86.avx512.mask.load.pd.256 + return _mm256_mask_load_pd(__W, __U, __P); +} + +__m256d test_mm256_maskz_load_pd(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_load_pd + // CHECK: @llvm.x86.avx512.mask.load.pd.256 + return _mm256_maskz_load_pd(__U, __P); +} + +__m128 test_mm_mask_load_ps(__m128 __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_load_ps + // CHECK: @llvm.x86.avx512.mask.load.ps.128 + return _mm_mask_load_ps(__W, __U, __P); +} + +__m128 test_mm_maskz_load_ps(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_load_ps + // CHECK: @llvm.x86.avx512.mask.load.ps.128 + return _mm_maskz_load_ps(__U, __P); +} + +__m256 test_mm256_mask_load_ps(__m256 __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_load_ps + // CHECK: @llvm.x86.avx512.mask.load.ps.256 + return _mm256_mask_load_ps(__W, __U, __P); +} + +__m256 test_mm256_maskz_load_ps(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_load_ps + // CHECK: @llvm.x86.avx512.mask.load.ps.256 + return _mm256_maskz_load_ps(__U, __P); +} + +__m128i test_mm_mask_loadu_epi64(__m128i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_epi64 + // CHECK: @llvm.x86.avx512.mask.loadu.q.128 + return _mm_mask_loadu_epi64(__W, __U, __P); +} + +__m128i test_mm_maskz_loadu_epi64(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_epi64 + // CHECK: @llvm.x86.avx512.mask.loadu.q.128 + return _mm_maskz_loadu_epi64(__U, __P); +} + +__m256i test_mm256_mask_loadu_epi64(__m256i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_epi64 + // CHECK: @llvm.x86.avx512.mask.loadu.q.256 + return _mm256_mask_loadu_epi64(__W, __U, __P); +} + +__m256i test_mm256_maskz_loadu_epi64(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_epi64 + // CHECK: @llvm.x86.avx512.mask.loadu.q.256 + return _mm256_maskz_loadu_epi64(__U, __P); +} + +__m128i test_mm_mask_loadu_epi32(__m128i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_epi32 + // CHECK: @llvm.x86.avx512.mask.loadu.d.128 + return _mm_mask_loadu_epi32(__W, __U, __P); +} + +__m128i test_mm_maskz_loadu_epi32(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_epi32 + // CHECK: @llvm.x86.avx512.mask.loadu.d.128 + return _mm_maskz_loadu_epi32(__U, __P); +} + +__m256i test_mm256_mask_loadu_epi32(__m256i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_epi32 + // CHECK: @llvm.x86.avx512.mask.loadu.d.256 + return _mm256_mask_loadu_epi32(__W, __U, __P); +} + +__m256i test_mm256_maskz_loadu_epi32(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_epi32 + // CHECK: @llvm.x86.avx512.mask.loadu.d.256 + return _mm256_maskz_loadu_epi32(__U, __P); +} + +__m128d test_mm_mask_loadu_pd(__m128d __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_pd + // CHECK: @llvm.x86.avx512.mask.loadu.pd.128 + return _mm_mask_loadu_pd(__W, __U, __P); +} + +__m128d test_mm_maskz_loadu_pd(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_pd + // CHECK: @llvm.x86.avx512.mask.loadu.pd.128 + return _mm_maskz_loadu_pd(__U, __P); +} + +__m256d test_mm256_mask_loadu_pd(__m256d __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_pd + // CHECK: @llvm.x86.avx512.mask.loadu.pd.256 + return _mm256_mask_loadu_pd(__W, __U, __P); +} + +__m256d test_mm256_maskz_loadu_pd(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_pd + // CHECK: @llvm.x86.avx512.mask.loadu.pd.256 + return _mm256_maskz_loadu_pd(__U, __P); +} + +__m128 test_mm_mask_loadu_ps(__m128 __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_ps + // CHECK: @llvm.x86.avx512.mask.loadu.ps.128 + return _mm_mask_loadu_ps(__W, __U, __P); +} + +__m128 test_mm_maskz_loadu_ps(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_ps + // CHECK: @llvm.x86.avx512.mask.loadu.ps.128 + return _mm_maskz_loadu_ps(__U, __P); +} + +__m256 test_mm256_mask_loadu_ps(__m256 __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_ps + // CHECK: @llvm.x86.avx512.mask.loadu.ps.256 + return _mm256_mask_loadu_ps(__W, __U, __P); +} + +__m256 test_mm256_maskz_loadu_ps(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_ps + // CHECK: @llvm.x86.avx512.mask.loadu.ps.256 + return _mm256_maskz_loadu_ps(__U, __P); +} + Index: test/CodeGen/avx512vlbw-builtins.c =================================================================== --- test/CodeGen/avx512vlbw-builtins.c +++ test/CodeGen/avx512vlbw-builtins.c @@ -2005,3 +2005,51 @@ return _mm256_maskz_mov_epi8(__U, __A); } +__m128i test_mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.128 + return _mm_mask_loadu_epi16(__W, __U, __P); +} + +__m128i test_mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.128 + return _mm_maskz_loadu_epi16(__U, __P); +} + +__m256i test_mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.256 + return _mm256_mask_loadu_epi16(__W, __U, __P); +} + +__m256i test_mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_epi16 + // CHECK: @llvm.x86.avx512.mask.loadu.w.256 + return _mm256_maskz_loadu_epi16(__U, __P); +} + +__m128i test_mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.128 + return _mm_mask_loadu_epi8(__W, __U, __P); +} + +__m128i test_mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.128 + return _mm_maskz_loadu_epi8(__U, __P); +} + +__m256i test_mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.256 + return _mm256_mask_loadu_epi8(__W, __U, __P); +} + +__m256i test_mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_loadu_epi8 + // CHECK: @llvm.x86.avx512.mask.loadu.b.256 + return _mm256_maskz_loadu_epi8(__U, __P); +} +