diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1520,7 +1520,7 @@ // Extract subvector is special because the value type // (result) is 128-bit but the source is 256-bit wide. for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, - MVT::v4f32, MVT::v2f64 }) { + MVT::v8f16, MVT::v4f32, MVT::v2f64 }) { setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); } @@ -1860,7 +1860,7 @@ // (result) is 256-bit but the source is 512-bit wide. // 128-bit was made Legal under AVX1. for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, - MVT::v8f32, MVT::v4f64 }) + MVT::v16f16, MVT::v8f32, MVT::v4f64 }) setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64, diff --git a/llvm/test/CodeGen/X86/avx512-f16c-v16f16-fadd.ll b/llvm/test/CodeGen/X86/avx512-f16c-v16f16-fadd.ll --- a/llvm/test/CodeGen/X86/avx512-f16c-v16f16-fadd.ll +++ b/llvm/test/CodeGen/X86/avx512-f16c-v16f16-fadd.ll @@ -4,23 +4,17 @@ define <16 x half> @foo(<16 x half> %a, <16 x half> %b) nounwind { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: andq $-32, %rsp -; CHECK-NEXT: subq $96, %rsp -; CHECK-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %ymm0, (%rsp) -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %ymm0 -; CHECK-NEXT: vcvtph2ps (%rsp), %ymm1 -; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: vcvtph2ps %xmm1, %ymm2 +; CHECK-NEXT: vcvtph2ps %xmm0, %ymm3 +; CHECK-NEXT: vaddps %ymm2, %ymm3, %ymm2 +; CHECK-NEXT: vcvtps2ph $4, %ymm2, %xmm2 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vcvtph2ps %xmm1, %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vcvtps2ph $4, %ymm0, %xmm0 -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %ymm1 -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %ymm2 -; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 -; CHECK-NEXT: vcvtps2ph $4, %ymm1, %xmm1 -; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; CHECK-NEXT: movq %rbp, %rsp -; CHECK-NEXT: popq %rbp +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; CHECK-NEXT: retq %1 = fadd <16 x half> %a, %b ret <16 x half> %1 diff --git a/llvm/test/CodeGen/X86/avx512-skx-v32f16-fadd.ll b/llvm/test/CodeGen/X86/avx512-skx-v32f16-fadd.ll --- a/llvm/test/CodeGen/X86/avx512-skx-v32f16-fadd.ll +++ b/llvm/test/CodeGen/X86/avx512-skx-v32f16-fadd.ll @@ -4,23 +4,17 @@ define <32 x half> @foo(<32 x half> %a, <32 x half> %b) nounwind { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbp -; CHECK-NEXT: movq %rsp, %rbp -; CHECK-NEXT: andq $-64, %rsp -; CHECK-NEXT: subq $192, %rsp -; CHECK-NEXT: vmovaps %zmm1, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %zmm0, (%rsp) -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %zmm0 -; CHECK-NEXT: vcvtph2ps (%rsp), %zmm1 -; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vcvtph2ps %ymm1, %zmm2 +; CHECK-NEXT: vcvtph2ps %ymm0, %zmm3 +; CHECK-NEXT: vaddps %zmm2, %zmm3, %zmm2 +; CHECK-NEXT: vcvtps2ph $4, %zmm2, %ymm2 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vcvtph2ps %ymm1, %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vcvtph2ps %ymm0, %zmm0 +; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %zmm1 -; CHECK-NEXT: vcvtph2ps {{[0-9]+}}(%rsp), %zmm2 -; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1 -; CHECK-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 -; CHECK-NEXT: movq %rbp, %rsp -; CHECK-NEXT: popq %rbp +; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm2, %zmm0 ; CHECK-NEXT: retq %1 = fadd <32 x half> %a, %b ret <32 x half> %1 diff --git a/llvm/test/CodeGen/X86/pr57340.ll b/llvm/test/CodeGen/X86/pr57340.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/pr57340.ll @@ -0,0 +1,315 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s + +define void @main.41() local_unnamed_addr #1 { +; CHECK-LABEL: main.41: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vpbroadcastw (%rax), %xmm0 +; CHECK-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-NEXT: vmovdqu (%rax), %ymm2 +; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [31,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NEXT: vmovdqu (%rax), %xmm6 +; CHECK-NEXT: vpextrw $0, %xmm6, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm0, %xmm2 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: andl $1, %eax +; CHECK-NEXT: kmovw %eax, %k0 +; CHECK-NEXT: vpsrld $16, %xmm1, %xmm0 +; CHECK-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NEXT: vpsrld $16, %xmm6, %xmm3 +; CHECK-NEXT: vpextrw $0, %xmm3, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm3 +; CHECK-NEXT: vcvtph2ps %xmm3, %xmm3 +; CHECK-NEXT: vucomiss %xmm0, %xmm3 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $14, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-5, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; CHECK-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm4 +; CHECK-NEXT: movzwl (%rax), %eax +; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NEXT: vucomiss %xmm4, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $13, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-9, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm4 +; CHECK-NEXT: vpextrw $0, %xmm4, %eax +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm4 +; CHECK-NEXT: vcvtph2ps %xmm4, %xmm5 +; CHECK-NEXT: vpsrlq $48, %xmm6, %xmm4 +; CHECK-NEXT: vpextrw $0, %xmm4, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm4 +; CHECK-NEXT: vcvtph2ps %xmm4, %xmm4 +; CHECK-NEXT: vucomiss %xmm5, %xmm4 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $12, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-17, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] +; CHECK-NEXT: vpextrw $0, %xmm5, %eax +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm5 +; CHECK-NEXT: vcvtph2ps %xmm5, %xmm5 +; CHECK-NEXT: vucomiss %xmm5, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $11, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-33, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm5 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm5, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm5 +; CHECK-NEXT: vcvtph2ps %xmm5, %xmm7 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm5 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm5, %eax +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm5 +; CHECK-NEXT: vcvtph2ps %xmm5, %xmm5 +; CHECK-NEXT: vucomiss %xmm7, %xmm5 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $10, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-65, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[3,3,3,3] +; CHECK-NEXT: vpextrw $0, %xmm7, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm7 +; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7 +; CHECK-NEXT: vucomiss %xmm7, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $9, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-129, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm7 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm7, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm7 +; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm6 = xmm6[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm6, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm6 +; CHECK-NEXT: vcvtph2ps %xmm6, %xmm6 +; CHECK-NEXT: vucomiss %xmm7, %xmm6 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $8, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-257, %ax # imm = 0xFEFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vpextrw $0, %xmm1, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm7 +; CHECK-NEXT: vcvtph2ps %xmm7, %xmm7 +; CHECK-NEXT: vucomiss %xmm7, %xmm2 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $7, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-513, %ax # imm = 0xFDFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpsrld $16, %xmm1, %xmm2 +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm2, %xmm3 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $6, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-1025, %ax # imm = 0xFBFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vucomiss %xmm2, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $5, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-2049, %ax # imm = 0xF7FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpsrlq $48, %xmm1, %xmm2 +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm2, %xmm4 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $4, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-4097, %ax # imm = 0xEFFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm2, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $3, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-8193, %ax # imm = 0xDFFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm2, %xmm5 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: kshiftrw $2, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: movw $-16385, %ax # imm = 0xBFFF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kandw %k1, %k0, %k0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3] +; CHECK-NEXT: vpextrw $0, %xmm2, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm2 +; CHECK-NEXT: vcvtph2ps %xmm2, %xmm2 +; CHECK-NEXT: vucomiss %xmm2, %xmm0 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $14, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k0 +; CHECK-NEXT: kshiftlw $1, %k0, %k0 +; CHECK-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-NEXT: movzwl %ax, %eax +; CHECK-NEXT: vmovd %eax, %xmm0 +; CHECK-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NEXT: kshiftrw $1, %k0, %k0 +; CHECK-NEXT: vucomiss %xmm0, %xmm6 +; CHECK-NEXT: setnp %al +; CHECK-NEXT: sete %cl +; CHECK-NEXT: testb %al, %cl +; CHECK-NEXT: setne %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: kshiftlw $15, %k1, %k1 +; CHECK-NEXT: korw %k1, %k0, %k1 +; CHECK-NEXT: vmovdqu8 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa %xmm0, (%rax) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +entry: + %.pre = load half, ptr undef, align 16 + %vector.recur.init = insertelement <16 x half> poison, half %.pre, i64 15 + %wide.load = load <16 x half>, ptr undef, align 2 + %0 = shufflevector <16 x half> %vector.recur.init, <16 x half> %wide.load, <16 x i32> + %1 = fcmp oeq <16 x half> %wide.load, %0 + %2 = zext <16 x i1> %1 to <16 x i8> + store <16 x i8> %2, ptr undef, align 16 + ret void +} + +attributes #1 = { nounwind uwtable "target-cpu"="skx" }