Index: llvm/trunk/include/llvm/IR/IntrinsicsX86.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsX86.td +++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td @@ -1760,16 +1760,16 @@ // Conditional load ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">, - Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], + Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2i64_ty], [IntrReadArgMem]>; def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">, - Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], + Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4i32_ty], [IntrReadArgMem]>; def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">, - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], + Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadArgMem]>; def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">, - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], + Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8i32_ty], [IntrReadArgMem]>; def int_x86_avx512_mask_loadu_ps_512 : GCCBuiltin<"__builtin_ia32_loadups512_mask">, Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty], @@ -1789,18 +1789,18 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">, Intrinsic<[], [llvm_ptr_ty, - llvm_v2f64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>; + llvm_v2i64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4f32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>; + llvm_v4i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_pd_256 : GCCBuiltin<"__builtin_ia32_maskstorepd256">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4f64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>; + llvm_v4i64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_ps_256 : GCCBuiltin<"__builtin_ia32_maskstoreps256">, Intrinsic<[], [llvm_ptr_ty, - llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>; + llvm_v8i32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>; def int_x86_avx512_mask_storeu_ps_512 : GCCBuiltin<"__builtin_ia32_storeups512_mask">, Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty], Index: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll +++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -2536,102 +2536,102 @@ declare <32 x i8> @llvm.x86.avx.ldu.dq.256(i8*) nounwind readonly -define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x double> %a1) { +define <2 x double> @test_x86_avx_maskload_pd(i8* %a0, <2 x i64> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_pd: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovpd (%eax), %xmm0, %xmm0 ; CHECK-NEXT: retl - %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1] + %res = call <2 x double> @llvm.x86.avx.maskload.pd(i8* %a0, <2 x i64> %mask) ; <<2 x double>> [#uses=1] ret <2 x double> %res } -declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x double>) nounwind readonly +declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) nounwind readonly -define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x double> %a1) { +define <4 x double> @test_x86_avx_maskload_pd_256(i8* %a0, <4 x i64> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_pd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovpd (%eax), %ymm0, %ymm0 ; CHECK-NEXT: retl - %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x double> %a1) ; <<4 x double>> [#uses=1] + %res = call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %a0, <4 x i64> %mask) ; <<4 x double>> [#uses=1] ret <4 x double> %res } -declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x double>) nounwind readonly +declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) nounwind readonly -define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x float> %a1) { +define <4 x float> @test_x86_avx_maskload_ps(i8* %a0, <4 x i32> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_ps: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovps (%eax), %xmm0, %xmm0 ; CHECK-NEXT: retl - %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1] + %res = call <4 x float> @llvm.x86.avx.maskload.ps(i8* %a0, <4 x i32> %mask) ; <<4 x float>> [#uses=1] ret <4 x float> %res } -declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x float>) nounwind readonly +declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>) nounwind readonly -define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x float> %a1) { +define <8 x float> @test_x86_avx_maskload_ps_256(i8* %a0, <8 x i32> %mask) { ; CHECK-LABEL: test_x86_avx_maskload_ps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovps (%eax), %ymm0, %ymm0 ; CHECK-NEXT: retl - %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1] + %res = call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %a0, <8 x i32> %mask) ; <<8 x float>> [#uses=1] ret <8 x float> %res } -declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x float>) nounwind readonly +declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) nounwind readonly -define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x double> %a1, <2 x double> %a2) { +define void @test_x86_avx_maskstore_pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_pd: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovpd %xmm1, %xmm0, (%eax) ; CHECK-NEXT: retl - call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x double> %a1, <2 x double> %a2) + call void @llvm.x86.avx.maskstore.pd(i8* %a0, <2 x i64> %mask, <2 x double> %a2) ret void } -declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x double>, <2 x double>) nounwind +declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) nounwind -define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x double> %a1, <4 x double> %a2) { +define void @test_x86_avx_maskstore_pd_256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_pd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovpd %ymm1, %ymm0, (%eax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl - call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x double> %a1, <4 x double> %a2) + call void @llvm.x86.avx.maskstore.pd.256(i8* %a0, <4 x i64> %mask, <4 x double> %a2) ret void } -declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x double>, <4 x double>) nounwind +declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>) nounwind -define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x float> %a1, <4 x float> %a2) { +define void @test_x86_avx_maskstore_ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_ps: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovps %xmm1, %xmm0, (%eax) ; CHECK-NEXT: retl - call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x float> %a1, <4 x float> %a2) + call void @llvm.x86.avx.maskstore.ps(i8* %a0, <4 x i32> %mask, <4 x float> %a2) ret void } -declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x float>, <4 x float>) nounwind +declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) nounwind -define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x float> %a1, <8 x float> %a2) { +define void @test_x86_avx_maskstore_ps_256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) { ; CHECK-LABEL: test_x86_avx_maskstore_ps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vmaskmovps %ymm1, %ymm0, (%eax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl - call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x float> %a1, <8 x float> %a2) + call void @llvm.x86.avx.maskstore.ps.256(i8* %a0, <8 x i32> %mask, <8 x float> %a2) ret void } -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind +declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind define <4 x double> @test_x86_avx_max_pd_256(<4 x double> %a0, <4 x double> %a1) { Index: llvm/trunk/test/CodeGen/X86/avx-load-store.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-load-store.ll +++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll @@ -88,7 +88,7 @@ ret void } -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind +declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) nounwind ; CHECK_O0: _f_f ; CHECK-O0: vmovss LCPI @@ -105,7 +105,7 @@ br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check cif_mixed_test_all: ; preds = %cif_mask_mixed - call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x float> , <8 x float> undef) nounwind + call void @llvm.x86.avx.maskstore.ps.256(i8* undef, <8 x i32> , <8 x float> undef) nounwind unreachable cif_mixed_test_any_check: ; preds = %cif_mask_mixed Index: llvm/trunk/test/CodeGen/X86/avx-win64.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/avx-win64.ll +++ llvm/trunk/test/CodeGen/X86/avx-win64.ll @@ -42,6 +42,4 @@ } declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) nounwind readnone -declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x float>) nounwind readonly -declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x float>, <8 x float>) nounwind declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) nounwind readnone