Index: llvm/trunk/include/llvm/IR/IntrinsicsX86.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsX86.td +++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td @@ -2830,12 +2830,38 @@ def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">, Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadArgMem]>; - def int_x86_avx512_mask_loadu_d_512 : GCCBuiltin<"__builtin_ia32_loaddqusi512_mask">, + def int_x86_avx512_mask_loadu_d_512 : GCCBuiltin<"__builtin_ia32_loaddqusi512_mask">, Intrinsic<[llvm_v16i32_ty], [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrReadArgMem]>; def int_x86_avx512_mask_loadu_q_512 : GCCBuiltin<"__builtin_ia32_loaddqudi512_mask">, Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrReadArgMem]>; + + def int_x86_avx512_mask_load_d_128 : + GCCBuiltin<"__builtin_ia32_movdqa32load128_mask">, + Intrinsic<[llvm_v4i32_ty], + [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrReadArgMem]>; + def int_x86_avx512_mask_load_d_256 : + GCCBuiltin<"__builtin_ia32_movdqa32load256_mask">, + Intrinsic<[llvm_v8i32_ty], + [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrReadArgMem]>; + def int_x86_avx512_mask_load_d_512 : + GCCBuiltin<"__builtin_ia32_movdqa32load512_mask">, + Intrinsic<[llvm_v16i32_ty], + [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrReadArgMem]>; + + def int_x86_avx512_mask_load_q_128 : + GCCBuiltin<"__builtin_ia32_movdqa64load128_mask">, + Intrinsic<[llvm_v2i64_ty], + [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrReadArgMem]>; + def int_x86_avx512_mask_load_q_256 : + GCCBuiltin<"__builtin_ia32_movdqa64load256_mask">, + Intrinsic<[llvm_v4i64_ty], + [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrReadArgMem]>; + def int_x86_avx512_mask_load_q_512 : + GCCBuiltin<"__builtin_ia32_movdqa64load512_mask">, + Intrinsic<[llvm_v8i64_ty], + [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrReadArgMem]>; } // Conditional store ops Index: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h =================================================================== --- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h +++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h @@ -143,12 +143,18 @@ EXPAND_FROM_MEM, X86ISD::EXPAND, 0), X86_INTRINSIC_DATA(avx512_mask_expand_load_q_512, EXPAND_FROM_MEM, X86ISD::EXPAND, 0), + X86_INTRINSIC_DATA(avx512_mask_load_d_128, LOADA, ISD::DELETED_NODE, 0), + X86_INTRINSIC_DATA(avx512_mask_load_d_256, LOADA, ISD::DELETED_NODE, 0), + X86_INTRINSIC_DATA(avx512_mask_load_d_512, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_pd_128, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_pd_256, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_pd_512, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_ps_128, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_ps_256, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_load_ps_512, LOADA, ISD::DELETED_NODE, 0), + X86_INTRINSIC_DATA(avx512_mask_load_q_128, LOADA, ISD::DELETED_NODE, 0), + X86_INTRINSIC_DATA(avx512_mask_load_q_256, LOADA, ISD::DELETED_NODE, 0), + X86_INTRINSIC_DATA(avx512_mask_load_q_512, LOADA, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_loadu_pd_128, LOADU, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_loadu_pd_256, LOADU, ISD::DELETED_NODE, 0), X86_INTRINSIC_DATA(avx512_mask_loadu_pd_512, LOADU, ISD::DELETED_NODE, 0), Index: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll +++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll @@ -6674,7 +6674,42 @@ ret <8 x i64> %res4 } -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +declare <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8*, <16 x i32>, i16) + +define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, i8* %ptr, i16 %mask) { +; CHECK-LABEL: test_mask_load_aligned_d: +; CHECK: ## BB#0: +; CHECK-NEXT: kmovw %esi, %k1 +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 {%k1} {z} +; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 -1) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> %res, i16 %mask) + %res2 = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 %mask) + %res4 = add <16 x i32> %res2, %res1 + ret <16 x i32> %res4 +} + +declare <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8*, <8 x i64>, i8) + +define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, i8* %ptr, i8 %mask) { +; CHECK-LABEL: test_mask_load_aligned_q: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %sil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 {%k1} {z} +; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 -1) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> %res, i8 %mask) + %res2 = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 %mask) + %res4 = add <8 x i64> %res2, %res1 + ret <8 x i64> %res4 +} declare <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8>, <16 x i32>, i16) @@ -6883,5 +6918,3 @@ %res4 = add <8 x i64> %res3, %res2 ret <8 x i64> %res4 } - - Index: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll +++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -6875,6 +6875,82 @@ ret <4 x i64> %res4 } +declare <4 x i32> @llvm.x86.avx512.mask.load.d.128(i8*, <4 x i32>, i8) + +define <4 x i32> @test_mask_load_aligned_d_128(<4 x i32> %data, i8* %ptr, i8 %mask) { +; CHECK-LABEL: test_mask_load_aligned_d_128: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %sil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 +; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: vmovdqa32 (%rdi), %xmm1 {%k1} {z} +; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: retq + %res = call <4 x i32> @llvm.x86.avx512.mask.load.d.128(i8* %ptr, <4 x i32> zeroinitializer, i8 -1) + %res1 = call <4 x i32> @llvm.x86.avx512.mask.load.d.128(i8* %ptr, <4 x i32> %res, i8 %mask) + %res2 = call <4 x i32> @llvm.x86.avx512.mask.load.d.128(i8* %ptr, <4 x i32> zeroinitializer, i8 %mask) + %res4 = add <4 x i32> %res2, %res1 + ret <4 x i32> %res4 +} + +declare <8 x i32> @llvm.x86.avx512.mask.load.d.256(i8*, <8 x i32>, i8) + +define <8 x i32> @test_mask_load_aligned_d_256(<8 x i32> %data, i8* %ptr, i8 %mask) { +; CHECK-LABEL: test_mask_load_aligned_d_256: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %sil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 +; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: vmovdqa32 (%rdi), %ymm1 {%k1} {z} +; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = call <8 x i32> @llvm.x86.avx512.mask.load.d.256(i8* %ptr, <8 x i32> zeroinitializer, i8 -1) + %res1 = call <8 x i32> @llvm.x86.avx512.mask.load.d.256(i8* %ptr, <8 x i32> %res, i8 %mask) + %res2 = call <8 x i32> @llvm.x86.avx512.mask.load.d.256(i8* %ptr, <8 x i32> zeroinitializer, i8 %mask) + %res4 = add <8 x i32> %res2, %res1 + ret <8 x i32> %res4 +} + +declare <2 x i64> @llvm.x86.avx512.mask.load.q.128(i8*, <2 x i64>, i8) + +define <2 x i64> @test_mask_load_aligned_q_128(<2 x i64> %data, i8* %ptr, i8 %mask) { +; CHECK-LABEL: test_mask_load_aligned_q_128: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %sil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 +; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} +; CHECK-NEXT: vmovdqa64 (%rdi), %xmm1 {%k1} {z} +; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: retq + %res = call <2 x i64> @llvm.x86.avx512.mask.load.q.128(i8* %ptr, <2 x i64> zeroinitializer, i8 -1) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.load.q.128(i8* %ptr, <2 x i64> %res, i8 %mask) + %res2 = call <2 x i64> @llvm.x86.avx512.mask.load.q.128(i8* %ptr, <2 x i64> zeroinitializer, i8 %mask) + %res4 = add <2 x i64> %res2, %res1 + ret <2 x i64> %res4 +} + +declare <4 x i64> @llvm.x86.avx512.mask.load.q.256(i8*, <4 x i64>, i8) + +define <4 x i64> @test_mask_load_aligned_q_256(<4 x i64> %data, i8* %ptr, i8 %mask) { +; CHECK-LABEL: test_mask_load_aligned_q_256: +; CHECK: ## BB#0: +; CHECK-NEXT: movzbl %sil, %eax +; CHECK-NEXT: kmovw %eax, %k1 +; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 +; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} +; CHECK-NEXT: vmovdqa64 (%rdi), %ymm1 {%k1} {z} +; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = call <4 x i64> @llvm.x86.avx512.mask.load.q.256(i8* %ptr, <4 x i64> zeroinitializer, i8 -1) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.load.q.256(i8* %ptr, <4 x i64> %res, i8 %mask) + %res2 = call <4 x i64> @llvm.x86.avx512.mask.load.q.256(i8* %ptr, <4 x i64> zeroinitializer, i8 %mask) + %res4 = add <4 x i64> %res2, %res1 + ret <4 x i64> %res4 +} + declare <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {