Index: cfe/trunk/include/clang/Basic/BuiltinsX86.def =================================================================== --- cfe/trunk/include/clang/Basic/BuiltinsX86.def +++ cfe/trunk/include/clang/Basic/BuiltinsX86.def @@ -1697,6 +1697,19 @@ TARGET_BUILTIN(__builtin_ia32_psrlw256_mask, "V16sV16sV8sV16sUs","","avx512bw,avx512vl") TARGET_BUILTIN(__builtin_ia32_psrlwi128_mask, "V8sV8sIiV8sUc","","avx512bw,avx512vl") TARGET_BUILTIN(__builtin_ia32_psrlwi256_mask, "V16sV16sIiV16sUs","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa32load512_mask, "V16ivC*V16iUs","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa32store512_mask, "vV16i*V16iUs","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa64_512_mask, "V8LLiV8LLiV8LLiUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa64load512_mask, "V8LLivC*V8LLiUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa64store512_mask, "vV8LLi*V8LLiUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa32store128_mask, "vV4i*V4iUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa32store256_mask, "vV8i*V8iUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa64_128_mask, "V2LLiV2LLiV2LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa64_256_mask, "V4LLiV4LLiV4LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa64load128_mask, "V2LLivC*V2LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa64load256_mask, "V4LLivC*V4LLiUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa64store128_mask, "vV2LLi*V2LLiUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa64store256_mask, "vV4LLi*V4LLiUc","","avx512f") #undef BUILTIN #undef TARGET_BUILTIN Index: cfe/trunk/lib/Headers/avx512fintrin.h =================================================================== --- cfe/trunk/lib/Headers/avx512fintrin.h +++ cfe/trunk/lib/Headers/avx512fintrin.h @@ -3602,6 +3602,72 @@ (__mmask8)( __U));\ }) +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + + #undef __DEFAULT_FN_ATTRS #endif // __AVX512FINTRIN_H Index: cfe/trunk/lib/Headers/avx512vlintrin.h =================================================================== --- cfe/trunk/lib/Headers/avx512vlintrin.h +++ cfe/trunk/lib/Headers/avx512vlintrin.h @@ -5871,6 +5871,114 @@ _mm256_setzero_si256 (), (__mmask8) __U); } + + + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa32store128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa32store256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_di (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((__v2di *) __P, + (__v2di) + _mm_setzero_di (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((__v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa64store128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa64store256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + + #undef __DEFAULT_FN_ATTRS #undef __DEFAULT_FN_ATTRS_BOTH Index: cfe/trunk/test/CodeGen/avx512f-builtins.c =================================================================== --- cfe/trunk/test/CodeGen/avx512f-builtins.c +++ cfe/trunk/test/CodeGen/avx512f-builtins.c @@ -2298,3 +2298,51 @@ return _mm512_maskz_srli_epi64(__U, __A, 5); } +__m512i test_mm512_mask_load_epi32(__m512i __W, __mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_mask_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.512 + return _mm512_mask_load_epi32(__W, __U, __P); +} + +__m512i test_mm512_maskz_load_epi32(__mmask16 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_maskz_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.512 + return _mm512_maskz_load_epi32(__U, __P); +} + +__m512i test_mm512_mask_mov_epi64(__m512i __W, __mmask8 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_mask_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm512_mask_mov_epi64(__W, __U, __A); +} + +__m512i test_mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_maskz_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm512_maskz_mov_epi64(__U, __A); +} + +__m512i test_mm512_mask_load_epi64(__m512i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_mask_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.512 + return _mm512_mask_load_epi64(__W, __U, __P); +} + +__m512i test_mm512_maskz_load_epi64(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm512_maskz_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.512 + return _mm512_maskz_load_epi64(__U, __P); +} + +void test_mm512_mask_store_epi32(void *__P, __mmask16 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_mask_store_epi32 + // CHECK: @llvm.x86.avx512.mask.store.d.512 + return _mm512_mask_store_epi32(__P, __U, __A); +} + +void test_mm512_mask_store_epi64(void *__P, __mmask8 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_mask_store_epi64 + // CHECK: @llvm.x86.avx512.mask.store.q.512 + return _mm512_mask_store_epi64(__P, __U, __A); +} + Index: cfe/trunk/test/CodeGen/avx512vl-builtins.c =================================================================== --- cfe/trunk/test/CodeGen/avx512vl-builtins.c +++ cfe/trunk/test/CodeGen/avx512vl-builtins.c @@ -3936,3 +3936,75 @@ return _mm256_maskz_srav_epi64(__U, __X, __Y); } +void test_mm_mask_store_epi32(void *__P, __mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_mask_store_epi32 + // CHECK: @llvm.x86.avx512.mask.store.d.128 + return _mm_mask_store_epi32(__P, __U, __A); +} + +void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_mask_store_epi32 + // CHECK: @llvm.x86.avx512.mask.store.d.256 + return _mm256_mask_store_epi32(__P, __U, __A); +} + +__m128i test_mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_mask_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm_mask_mov_epi64(__W, __U, __A); +} + +__m128i test_mm_maskz_mov_epi64(__mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_maskz_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm_maskz_mov_epi64(__U, __A); +} + +__m256i test_mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_mask_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm256_mask_mov_epi64(__W, __U, __A); +} + +__m256i test_mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_maskz_mov_epi64 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm256_maskz_mov_epi64(__U, __A); +} + +__m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.128 + return _mm_mask_load_epi64(__W, __U, __P); +} + +__m128i test_mm_maskz_load_epi64(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.128 + return _mm_maskz_load_epi64(__U, __P); +} + +__m256i test_mm256_mask_load_epi64(__m256i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.256 + return _mm256_mask_load_epi64(__W, __U, __P); +} + +__m256i test_mm256_maskz_load_epi64(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_load_epi64 + // CHECK: @llvm.x86.avx512.mask.load.q.256 + return _mm256_maskz_load_epi64(__U, __P); +} + +void test_mm_mask_store_epi64(void *__P, __mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_mask_store_epi64 + // CHECK: @llvm.x86.avx512.mask.store.q.128 + return _mm_mask_store_epi64(__P, __U, __A); +} + +void test_mm256_mask_store_epi64(void *__P, __mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_mask_store_epi64 + // CHECK: @llvm.x86.avx512.mask.store.q.256 + return _mm256_mask_store_epi64(__P, __U, __A); +} +