Index: llvm/trunk/lib/Target/X86/X86InstrSSE.td =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrSSE.td +++ llvm/trunk/lib/Target/X86/X86InstrSSE.td @@ -8243,6 +8243,18 @@ (VCVTPH2PSrm addr:$src)>; def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)), (VCVTPH2PSrm addr:$src)>; + + def : Pat<(store (f64 (vector_extract (bc_v2f64 (v8i16 + (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))), + addr:$dst), + (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>; + def : Pat<(store (i64 (vector_extract (bc_v2i64 (v8i16 + (int_x86_vcvtps2ph_128 VR128:$src1, i32:$src2))), (iPTR 0))), + addr:$dst), + (VCVTPS2PHmr addr:$dst, VR128:$src1, imm:$src2)>; + def : Pat<(store (v8i16 (int_x86_vcvtps2ph_256 VR256:$src1, i32:$src2)), + addr:$dst), + (VCVTPS2PHYmr addr:$dst, VR256:$src1, imm:$src2)>; } // Patterns for matching conversions from float to half-float and vice versa. Index: llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll +++ llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll @@ -1,8 +1,8 @@ -; RUN: llc < %s -march=x86 -mattr=+avx,+f16c | FileCheck %s -; RUN: llc < %s -march=x86-64 -mattr=+avx,+f16c | FileCheck %s +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx,+f16c | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_128 + ; CHECK-LABEL: test_x86_vcvtph2ps_128: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1] @@ -12,7 +12,7 @@ define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) { - ; CHECK-LABEL: test_x86_vcvtph2ps_256 + ; CHECK-LABEL: test_x86_vcvtph2ps_256: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1] @@ -31,7 +31,7 @@ } define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_128 + ; CHECK-LABEL: test_x86_vcvtps2ph_128: ; CHECK-NOT: vmov ; CHECK: vcvtps2ph %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] @@ -39,9 +39,8 @@ } declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly - define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) { - ; CHECK-LABEL: test_x86_vcvtps2ph_256 + ; CHECK-LABEL: test_x86_vcvtps2ph_256: ; CHECK-NOT: vmov ; CHECK: vcvtps2ph %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1] @@ -50,7 +49,7 @@ declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) { -; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar +; CHECK-LABEL: test_x86_vcvtps2ph_128_scalar: ; CHECK-NOT: vmov ; CHECK: vcvtph2ps (% @@ -61,3 +60,48 @@ %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2 ret <4 x float> %res } + +define void @test_x86_vcvtps2ph_256_m(<8 x i16>* nocapture %d, <8 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_256_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %ymm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3) + store <8 x i16> %0, <8 x i16>* %d, align 16 + ret void +} + +define void @test_x86_vcvtps2ph_128_m(<4 x i16>* nocapture %d, <4 x float> %a) nounwind { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3) + %1 = shufflevector <8 x i16> %0, <8 x i16> undef, <4 x i32> + store <4 x i16> %1, <4 x i16>* %d, align 8 + ret void +} + +define void @test_x86_vcvtps2ph_128_m2(double* nocapture %hf4x16, <4 x float> %f4x32) #0 { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m2: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) + %1 = bitcast <8 x i16> %0 to <2 x double> + %vecext = extractelement <2 x double> %1, i32 0 + store double %vecext, double* %hf4x16, align 8 + ret void +} + +define void @test_x86_vcvtps2ph_128_m3(i64* nocapture %hf4x16, <4 x float> %f4x32) #0 { +entry: + ; CHECK-LABEL: test_x86_vcvtps2ph_128_m3: + ; CHECK-NOT: vmov + ; CHECK: vcvtps2ph $3, %xmm0, (% + %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4x32, i32 3) + %1 = bitcast <8 x i16> %0 to <2 x i64> + %vecext = extractelement <2 x i64> %1, i32 0 + store i64 %vecext, i64* %hf4x16, align 8 + ret void +}