Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -8517,6 +8517,13 @@ (VCVTPH2PSrm addr:$src)>; def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)), (VCVTPH2PSrm addr:$src)>; + + def : Pat<(store (v8i16 (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2)), + addr:$dst), + (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>; + def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)), + addr:$dst), + (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>; } // Patterns for matching conversions from float to half-float and vice versa. Index: test/CodeGen/X86/f16c-intrinsics.ll =================================================================== --- test/CodeGen/X86/f16c-intrinsics.ll +++ test/CodeGen/X86/f16c-intrinsics.ll @@ -39,6 +39,15 @@ } declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly +define void @test_x86_vcvtps2ph_128_m(<8 x i16>* nocapture %d, <4 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3) + store <8 x i16> %0, <8 x i16>* %d, align 16 + ret void +} define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) { ; CHECK-LABEL: test_x86_vcvtps2ph_256 @@ -61,3 +70,13 @@ %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2 ret <4 x float> %res } + +define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_256_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %ymm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3) + store <8 x i16> %0, <8 x i16>* %d, align 16 + ret void +}