diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2253,6 +2253,7 @@ setOperationAction(ISD::BUILD_VECTOR, VT, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); } + setOperationAction(ISD::FP_ROUND, MVT::v8bf16, Custom); addLegalFPImmediate(APFloat::getZero(APFloat::BFloat())); } @@ -2264,6 +2265,7 @@ setOperationAction(ISD::FMUL, MVT::v32bf16, Expand); setOperationAction(ISD::FDIV, MVT::v32bf16, Expand); setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom); + setOperationAction(ISD::FP_ROUND, MVT::v16bf16, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom); } @@ -8479,7 +8481,8 @@ if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget); - if (VT.getVectorElementType() == MVT::bf16 && Subtarget.hasBF16()) + if (VT.getVectorElementType() == MVT::bf16 && + (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget); if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget)) @@ -21284,6 +21287,12 @@ return Res; } + if (VT.getScalarType() == MVT::bf16) { + if (SVT.getScalarType() == MVT::f32 && isTypeLegal(VT)) + return Op; + return SDValue(); + } + if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) { if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32) return SDValue(); diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -12976,6 +12976,11 @@ def : Pat<(v16bf16 (X86VBroadcast (v8bf16 VR128X:$src))), (VPBROADCASTWZ256rr VR128X:$src)>; + def : Pat<(v8bf16 (X86vfpround (v8f32 VR256X:$src))), + (VCVTNEPS2BF16Z256rr VR256X:$src)>; + def : Pat<(v8bf16 (X86vfpround (loadv8f32 addr:$src))), + (VCVTNEPS2BF16Z256rm addr:$src)>; + // TODO: No scalar broadcast due to we don't support legal scalar bf16 so far. } @@ -12985,6 +12990,11 @@ def : Pat<(v32bf16 (X86VBroadcast (v8bf16 VR128X:$src))), (VPBROADCASTWZrr VR128X:$src)>; + + def : Pat<(v16bf16 (X86vfpround (v16f32 VR512:$src))), + (VCVTNEPS2BF16Zrr VR512:$src)>; + def : Pat<(v16bf16 (X86vfpround (loadv16f32 addr:$src))), + (VCVTNEPS2BF16Zrm addr:$src)>; // TODO: No scalar broadcast due to we don't support legal scalar bf16 so far. } diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -8289,6 +8289,11 @@ f256mem>, T8PS; let checkVEXPredicate = 1 in defm VCVTNEPS2BF16 : VCVTNEPS2BF16_BASE, VEX, T8XS, ExplicitVEXPrefix; + + def : Pat<(v8bf16 (X86vfpround (v8f32 VR256:$src))), + (VCVTNEPS2BF16Yrr VR256:$src)>; + def : Pat<(v8bf16 (X86vfpround (loadv8f32 addr:$src))), + (VCVTNEPS2BF16Yrm addr:$src)>; } def : InstAlias<"vcvtneps2bf16x\t{$src, $dst|$dst, $src}", diff --git a/llvm/test/CodeGen/X86/bfloat.ll b/llvm/test/CodeGen/X86/bfloat.ll --- a/llvm/test/CodeGen/X86/bfloat.ll +++ b/llvm/test/CodeGen/X86/bfloat.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SSE2 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,F16,BF16 -; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,F16,FP16 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,BF16 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,FP16 +; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avxneconvert | FileCheck %s --check-prefixes=CHECK,AVX,AVXNC define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind { ; SSE2-LABEL: add: @@ -21,22 +22,22 @@ ; SSE2-NEXT: popq %rbx ; SSE2-NEXT: retq ; -; F16-LABEL: add: -; F16: # %bb.0: -; F16-NEXT: pushq %rbx -; F16-NEXT: movq %rdx, %rbx -; F16-NEXT: movzwl (%rsi), %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: movzwl (%rdi), %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm1 -; F16-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: movw %ax, (%rbx) -; F16-NEXT: popq %rbx -; F16-NEXT: retq +; AVX-LABEL: add: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbx +; AVX-NEXT: movq %rdx, %rbx +; AVX-NEXT: movzwl (%rsi), %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: movzwl (%rdi), %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: movw %ax, (%rbx) +; AVX-NEXT: popq %rbx +; AVX-NEXT: retq %a = load bfloat, ptr %pa %b = load bfloat, ptr %pb %add = fadd bfloat %a, %b @@ -59,19 +60,19 @@ ; SSE2-NEXT: popq %rax ; SSE2-NEXT: retq ; -; F16-LABEL: add2: -; F16: # %bb.0: -; F16-NEXT: pushq %rax -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: vmovd %xmm1, %ecx -; F16-NEXT: shll $16, %ecx -; F16-NEXT: vmovd %ecx, %xmm0 -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm1 -; F16-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: popq %rax -; F16-NEXT: retq +; AVX-LABEL: add2: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rax +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: vmovd %xmm1, %ecx +; AVX-NEXT: shll $16, %ecx +; AVX-NEXT: vmovd %ecx, %xmm0 +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: popq %rax +; AVX-NEXT: retq %add = fadd bfloat %a, %b ret bfloat %add } @@ -106,34 +107,34 @@ ; SSE2-NEXT: popq %rbp ; SSE2-NEXT: retq ; -; F16-LABEL: add_double: -; F16: # %bb.0: -; F16-NEXT: pushq %rbp -; F16-NEXT: pushq %r14 -; F16-NEXT: pushq %rbx -; F16-NEXT: movq %rdx, %rbx -; F16-NEXT: movq %rsi, %r14 -; F16-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; F16-NEXT: callq __truncdfbf2@PLT -; F16-NEXT: vmovd %xmm0, %ebp -; F16-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; F16-NEXT: callq __truncdfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: shll $16, %ebp -; F16-NEXT: vmovd %ebp, %xmm1 -; F16-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; F16-NEXT: vmovsd %xmm0, (%rbx) -; F16-NEXT: popq %rbx -; F16-NEXT: popq %r14 -; F16-NEXT: popq %rbp -; F16-NEXT: retq +; AVX-LABEL: add_double: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbp +; AVX-NEXT: pushq %r14 +; AVX-NEXT: pushq %rbx +; AVX-NEXT: movq %rdx, %rbx +; AVX-NEXT: movq %rsi, %r14 +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: callq __truncdfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %ebp +; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: callq __truncdfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: shll $16, %ebp +; AVX-NEXT: vmovd %ebp, %xmm1 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovsd %xmm0, (%rbx) +; AVX-NEXT: popq %rbx +; AVX-NEXT: popq %r14 +; AVX-NEXT: popq %rbp +; AVX-NEXT: retq %la = load double, ptr %pa %a = fptrunc double %la to bfloat %lb = load double, ptr %pb @@ -170,30 +171,30 @@ ; SSE2-NEXT: popq %rbx ; SSE2-NEXT: retq ; -; F16-LABEL: add_double2: -; F16: # %bb.0: -; F16-NEXT: pushq %rbx -; F16-NEXT: subq $16, %rsp -; F16-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; F16-NEXT: callq __truncdfbf2@PLT -; F16-NEXT: vmovd %xmm0, %ebx -; F16-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload -; F16-NEXT: # xmm0 = mem[0],zero -; F16-NEXT: callq __truncdfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: shll $16, %ebx -; F16-NEXT: vmovd %ebx, %xmm1 -; F16-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; F16-NEXT: addq $16, %rsp -; F16-NEXT: popq %rbx -; F16-NEXT: retq +; AVX-LABEL: add_double2: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbx +; AVX-NEXT: subq $16, %rsp +; AVX-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX-NEXT: callq __truncdfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %ebx +; AVX-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload +; AVX-NEXT: # xmm0 = mem[0],zero +; AVX-NEXT: callq __truncdfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: shll $16, %ebx +; AVX-NEXT: vmovd %ebx, %xmm1 +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: addq $16, %rsp +; AVX-NEXT: popq %rbx +; AVX-NEXT: retq %a = fptrunc double %da to bfloat %b = fptrunc double %db to bfloat %add = fadd bfloat %a, %b @@ -216,19 +217,19 @@ ; SSE2-NEXT: popq %rbx ; SSE2-NEXT: retq ; -; F16-LABEL: add_constant: -; F16: # %bb.0: -; F16-NEXT: pushq %rbx -; F16-NEXT: movq %rsi, %rbx -; F16-NEXT: movzwl (%rdi), %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: movw %ax, (%rbx) -; F16-NEXT: popq %rbx -; F16-NEXT: retq +; AVX-LABEL: add_constant: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rbx +; AVX-NEXT: movq %rsi, %rbx +; AVX-NEXT: movzwl (%rdi), %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: movw %ax, (%rbx) +; AVX-NEXT: popq %rbx +; AVX-NEXT: retq %a = load bfloat, ptr %pa %add = fadd bfloat %a, 1.0 store bfloat %add, ptr %pc @@ -247,16 +248,16 @@ ; SSE2-NEXT: popq %rax ; SSE2-NEXT: retq ; -; F16-LABEL: add_constant2: -; F16: # %bb.0: -; F16-NEXT: pushq %rax -; F16-NEXT: vmovd %xmm0, %eax -; F16-NEXT: shll $16, %eax -; F16-NEXT: vmovd %eax, %xmm0 -; F16-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 -; F16-NEXT: callq __truncsfbf2@PLT -; F16-NEXT: popq %rax -; F16-NEXT: retq +; AVX-LABEL: add_constant2: +; AVX: # %bb.0: +; AVX-NEXT: pushq %rax +; AVX-NEXT: vmovd %xmm0, %eax +; AVX-NEXT: shll $16, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: callq __truncsfbf2@PLT +; AVX-NEXT: popq %rax +; AVX-NEXT: retq %add = fadd bfloat %a, 1.0 ret bfloat %add } @@ -656,6 +657,120 @@ ; FP16-NEXT: popq %r15 ; FP16-NEXT: popq %rbp ; FP16-NEXT: retq +; +; AVXNC-LABEL: addv: +; AVXNC: # %bb.0: +; AVXNC-NEXT: pushq %rbp +; AVXNC-NEXT: pushq %r15 +; AVXNC-NEXT: pushq %r14 +; AVXNC-NEXT: pushq %r13 +; AVXNC-NEXT: pushq %r12 +; AVXNC-NEXT: pushq %rbx +; AVXNC-NEXT: subq $40, %rsp +; AVXNC-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill +; AVXNC-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVXNC-NEXT: vpextrw $7, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vpextrw $7, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm2, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $6, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $6, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %ebp +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $5, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $5, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %r14d +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $4, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $4, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %r15d +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $3, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %r12d +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $2, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %r13d +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vpextrw $1, %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %ebx +; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; AVXNC-NEXT: vmovd %xmm1, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVXNC-NEXT: callq __truncsfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: addq $40, %rsp +; AVXNC-NEXT: popq %rbx +; AVXNC-NEXT: popq %r12 +; AVXNC-NEXT: popq %r13 +; AVXNC-NEXT: popq %r14 +; AVXNC-NEXT: popq %r15 +; AVXNC-NEXT: popq %rbp +; AVXNC-NEXT: retq %add = fadd <8 x bfloat> %a, %b ret <8 x bfloat> %add } @@ -677,6 +792,19 @@ ; F16-NEXT: vmovd %ecx, %xmm0 ; F16-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr62997: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vmovd %xmm1, %eax +; AVXNC-NEXT: vmovd %xmm0, %ecx +; AVXNC-NEXT: vmovd %ecx, %xmm0 +; AVXNC-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; AVXNC-NEXT: shrl $16, %eax +; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; AVXNC-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVXNC-NEXT: retq %1 = insertelement <2 x bfloat> undef, bfloat %a, i64 0 %2 = insertelement <2 x bfloat> %1, bfloat %b, i64 1 ret <2 x bfloat> %2 @@ -695,6 +823,12 @@ ; F16: # %bb.0: ; F16-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr63017: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVXNC-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVXNC-NEXT: retq ret <32 x bfloat> zeroinitializer } @@ -1270,6 +1404,256 @@ ; F16-NEXT: vpbroadcastw {{.*#+}} zmm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] ; F16-NEXT: vmovdqu16 (%rax), %zmm0 {%k1} ; F16-NEXT: retq +; +; AVXNC-LABEL: pr63017_2: +; AVXNC: # %bb.0: +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_1 +; AVXNC-NEXT: # %bb.2: # %cond.load +; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm1 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] +; AVXNC-NEXT: vpbroadcastw {{.*#+}} xmm0 = [49024,49024,49024,49024,49024,49024,49024,49024] +; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm0, %xmm0 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7] +; AVXNC-NEXT: jmp .LBB12_3 +; AVXNC-NEXT: .LBB12_1: +; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] +; AVXNC-NEXT: vmovdqa %ymm0, %ymm1 +; AVXNC-NEXT: .LBB12_3: # %else +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_5 +; AVXNC-NEXT: # %bb.4: # %cond.load1 +; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_5: # %else2 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_7 +; AVXNC-NEXT: # %bb.6: # %cond.load4 +; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_7: # %else5 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_9 +; AVXNC-NEXT: # %bb.8: # %cond.load7 +; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_9: # %else8 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_11 +; AVXNC-NEXT: # %bb.10: # %cond.load10 +; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_11: # %else11 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_13 +; AVXNC-NEXT: # %bb.12: # %cond.load13 +; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_13: # %else14 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_15 +; AVXNC-NEXT: # %bb.14: # %cond.load16 +; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_15: # %else17 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_17 +; AVXNC-NEXT: # %bb.16: # %cond.load19 +; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: .LBB12_17: # %else20 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_19 +; AVXNC-NEXT: # %bb.18: # %cond.load22 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_19: # %else23 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_21 +; AVXNC-NEXT: # %bb.20: # %cond.load25 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_21: # %else26 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_23 +; AVXNC-NEXT: # %bb.22: # %cond.load28 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6,7,8,9],ymm2[10],ymm0[11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_23: # %else29 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_25 +; AVXNC-NEXT: # %bb.24: # %cond.load31 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3],ymm0[4,5,6,7,8,9,10],ymm2[11],ymm0[12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_25: # %else32 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_27 +; AVXNC-NEXT: # %bb.26: # %cond.load34 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7,8,9,10,11],ymm2[12],ymm0[13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_27: # %else35 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_29 +; AVXNC-NEXT: # %bb.28: # %cond.load37 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7,8,9,10,11,12],ymm2[13],ymm0[14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_29: # %else38 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_31 +; AVXNC-NEXT: # %bb.30: # %cond.load40 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6],ymm0[7,8,9,10,11,12,13],ymm2[14],ymm0[15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_31: # %else41 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_33 +; AVXNC-NEXT: # %bb.32: # %cond.load43 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5,6],ymm2[7],ymm0[8,9,10,11,12,13,14],ymm2[15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_33: # %else44 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_35 +; AVXNC-NEXT: # %bb.34: # %cond.load46 +; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_35: # %else47 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_37 +; AVXNC-NEXT: # %bb.36: # %cond.load49 +; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_37: # %else50 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_39 +; AVXNC-NEXT: # %bb.38: # %cond.load52 +; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_39: # %else53 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_41 +; AVXNC-NEXT: # %bb.40: # %cond.load55 +; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_41: # %else56 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_43 +; AVXNC-NEXT: # %bb.42: # %cond.load58 +; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_43: # %else59 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_45 +; AVXNC-NEXT: # %bb.44: # %cond.load61 +; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_45: # %else62 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_47 +; AVXNC-NEXT: # %bb.46: # %cond.load64 +; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_47: # %else65 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_49 +; AVXNC-NEXT: # %bb.48: # %cond.load67 +; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm1, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVXNC-NEXT: .LBB12_49: # %else68 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_51 +; AVXNC-NEXT: # %bb.50: # %cond.load70 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_51: # %else71 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_53 +; AVXNC-NEXT: # %bb.52: # %cond.load73 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7,8],ymm2[9],ymm1[10,11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_53: # %else74 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_55 +; AVXNC-NEXT: # %bb.54: # %cond.load76 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6,7,8,9],ymm2[10],ymm1[11,12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_55: # %else77 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_57 +; AVXNC-NEXT: # %bb.56: # %cond.load79 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7,8,9,10],ymm2[11],ymm1[12,13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_57: # %else80 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_59 +; AVXNC-NEXT: # %bb.58: # %cond.load82 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7,8,9,10,11],ymm2[12],ymm1[13,14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_59: # %else83 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_61 +; AVXNC-NEXT: # %bb.60: # %cond.load85 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7,8,9,10,11,12],ymm2[13],ymm1[14,15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_61: # %else86 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_63 +; AVXNC-NEXT: # %bb.62: # %cond.load88 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7,8,9,10,11,12,13],ymm2[14],ymm1[15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_63: # %else89 +; AVXNC-NEXT: xorl %eax, %eax +; AVXNC-NEXT: testb %al, %al +; AVXNC-NEXT: jne .LBB12_65 +; AVXNC-NEXT: # %bb.64: # %cond.load91 +; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 +; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5,6],ymm2[7],ymm1[8,9,10,11,12,13,14],ymm2[15] +; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVXNC-NEXT: .LBB12_65: # %else92 +; AVXNC-NEXT: retq %1 = call <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr poison, i32 2, <32 x i1> poison, <32 x bfloat> ) ret <32 x bfloat> %1 } @@ -1295,6 +1679,13 @@ ; F16-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 ; F16-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr62997_3: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vmovd %xmm2, %eax +; AVXNC-NEXT: vpinsrw $1, %eax, %xmm0, %xmm2 +; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVXNC-NEXT: retq %3 = insertelement <32 x bfloat> %0, bfloat %1, i64 1 ret <32 x bfloat> %3 } @@ -1332,6 +1723,25 @@ ; F16-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; F16-NEXT: vzeroupper ; F16-NEXT: retq +; +; AVXNC-LABEL: pr64460_1: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVXNC-NEXT: retq %b = fpext <4 x bfloat> %a to <4 x float> ret <4 x float> %b } @@ -1382,6 +1792,41 @@ ; F16-NEXT: vmovdqa {{.*#+}} ymm1 = [8,0,8,1,8,2,8,3,8,4,8,5,8,6,8,7] ; F16-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr64460_2: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vpextrw $5, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vpextrw $4, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVXNC-NEXT: vpextrw $6, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVXNC-NEXT: vpextrw $7, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] +; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXNC-NEXT: retq %b = fpext <8 x bfloat> %a to <8 x float> ret <8 x float> %b } @@ -1467,6 +1912,74 @@ ; F16-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,0,16,1,16,2,16,3,16,4,16,5,16,6,16,7,16,8,16,9,16,10,16,11,16,12,16,13,16,14,16,15] ; F16-NEXT: vpermw %zmm0, %zmm1, %zmm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr64460_3: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vpextrw $5, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vpextrw $4, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVXNC-NEXT: vpextrw $6, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVXNC-NEXT: vpextrw $7, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0] +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0] +; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 +; AVXNC-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVXNC-NEXT: vpextrw $5, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vpextrw $4, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3] +; AVXNC-NEXT: vpextrw $6, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3] +; AVXNC-NEXT: vpextrw $7, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0] +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm4 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm4 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0] +; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVXNC-NEXT: vmovaps %ymm2, %ymm0 +; AVXNC-NEXT: retq %b = fpext <16 x bfloat> %a to <16 x float> ret <16 x float> %b } @@ -1524,6 +2037,575 @@ ; F16-NEXT: vpermw %ymm0, %ymm1, %ymm0 ; F16-NEXT: vcvtps2pd %ymm0, %zmm0 ; F16-NEXT: retq +; +; AVXNC-LABEL: pr64460_4: +; AVXNC: # %bb.0: +; AVXNC-NEXT: vpextrw $3, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVXNC-NEXT: vpextrw $2, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVXNC-NEXT: vpextrw $1, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm2 +; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVXNC-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 +; AVXNC-NEXT: vpextrw $7, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm1 +; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVXNC-NEXT: vpextrw $6, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVXNC-NEXT: vpextrw $5, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm3 +; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVXNC-NEXT: vpextrw $4, %xmm0, %eax +; AVXNC-NEXT: shll $16, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVXNC-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVXNC-NEXT: vmovaps %ymm2, %ymm0 +; AVXNC-NEXT: retq %b = fpext <8 x bfloat> %a to <8 x double> ret <8 x double> %b } + +define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind { +; SSE2-LABEL: fptrunc_v4f32: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $32, %rsp +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebx +; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movd %xmm0, %r14d +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pinsrw $0, %eax, %xmm0 +; SSE2-NEXT: pinsrw $0, %r14d, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: pinsrw $0, %ebp, %xmm0 +; SSE2-NEXT: pinsrw $0, %ebx, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: addq $32, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; F16-LABEL: fptrunc_v4f32: +; F16: # %bb.0: +; F16-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0 +; F16-NEXT: vzeroupper +; F16-NEXT: retq +; +; AVXNC-LABEL: fptrunc_v4f32: +; AVXNC: # %bb.0: +; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 +; AVXNC-NEXT: vmovq %xmm0, %rax +; AVXNC-NEXT: movq %rax, %rcx +; AVXNC-NEXT: movq %rax, %rdx +; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; AVXNC-NEXT: # kill: def $eax killed $eax killed $rax +; AVXNC-NEXT: shrl $16, %eax +; AVXNC-NEXT: shrq $32, %rcx +; AVXNC-NEXT: shrq $48, %rdx +; AVXNC-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1 +; AVXNC-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm2 +; AVXNC-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; AVXNC-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; AVXNC-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; AVXNC-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: retq + %b = fptrunc <4 x float> %a to <4 x bfloat> + ret <4 x bfloat> %b +} + +define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind { +; SSE2-LABEL: fptrunc_v8f32: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $32, %rsp +; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebx +; SSE2-NEXT: shll $16, %ebx +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebx, %r14d +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %ebx +; SSE2-NEXT: orl %ebp, %ebx +; SSE2-NEXT: shlq $32, %rbx +; SSE2-NEXT: orq %r14, %rbx +; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebp, %r14d +; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %eax +; SSE2-NEXT: orl %ebp, %eax +; SSE2-NEXT: shlq $32, %rax +; SSE2-NEXT: orq %r14, %rax +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: movq %rbx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: addq $32, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; F16-LABEL: fptrunc_v8f32: +; F16: # %bb.0: +; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0 +; F16-NEXT: vzeroupper +; F16-NEXT: retq +; +; AVXNC-LABEL: fptrunc_v8f32: +; AVXNC: # %bb.0: +; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: retq + %b = fptrunc <8 x float> %a to <8 x bfloat> + ret <8 x bfloat> %b +} + +define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind { +; SSE2-LABEL: fptrunc_v16f32: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r15 +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %r12 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $64, %rsp +; SSE2-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill +; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebx +; SSE2-NEXT: shll $16, %ebx +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebx, %r14d +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %ebx +; SSE2-NEXT: orl %ebp, %ebx +; SSE2-NEXT: shlq $32, %rbx +; SSE2-NEXT: orq %r14, %rbx +; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r15d +; SSE2-NEXT: orl %ebp, %r15d +; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebp, %r14d +; SSE2-NEXT: shlq $32, %r14 +; SSE2-NEXT: orq %r15, %r14 +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r12d +; SSE2-NEXT: orl %ebp, %r12d +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r15d +; SSE2-NEXT: orl %ebp, %r15d +; SSE2-NEXT: shlq $32, %r15 +; SSE2-NEXT: orq %r12, %r15 +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r12d +; SSE2-NEXT: orl %ebp, %r12d +; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncsfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %eax +; SSE2-NEXT: orl %ebp, %eax +; SSE2-NEXT: shlq $32, %rax +; SSE2-NEXT: orq %r12, %rax +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: movq %r15, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: movq %r14, %xmm2 +; SSE2-NEXT: movq %rbx, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: addq $64, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r12 +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %r15 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; F16-LABEL: fptrunc_v16f32: +; F16: # %bb.0: +; F16-NEXT: vcvtneps2bf16 %zmm0, %ymm0 +; F16-NEXT: retq +; +; AVXNC-LABEL: fptrunc_v16f32: +; AVXNC: # %bb.0: +; AVXNC-NEXT: pushq %rbp +; AVXNC-NEXT: movq %rsp, %rbp +; AVXNC-NEXT: andq $-32, %rsp +; AVXNC-NEXT: subq $64, %rsp +; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm1, %xmm1 +; AVXNC-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) +; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 +; AVXNC-NEXT: vmovaps %xmm0, (%rsp) +; AVXNC-NEXT: vmovaps (%rsp), %ymm0 +; AVXNC-NEXT: movq %rbp, %rsp +; AVXNC-NEXT: popq %rbp +; AVXNC-NEXT: retq + %b = fptrunc <16 x float> %a to <16 x bfloat> + ret <16 x bfloat> %b +} + +define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind { +; SSE2-LABEL: fptrunc_v8f64: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $64, %rsp +; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebx +; SSE2-NEXT: shll $16, %ebx +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebx, %r14d +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %ebx +; SSE2-NEXT: orl %ebp, %ebx +; SSE2-NEXT: shlq $32, %rbx +; SSE2-NEXT: orq %r14, %rbx +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %r14d +; SSE2-NEXT: orl %ebp, %r14d +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %ebp +; SSE2-NEXT: shll $16, %ebp +; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE2-NEXT: callq __truncdfbf2@PLT +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movzwl %ax, %eax +; SSE2-NEXT: orl %ebp, %eax +; SSE2-NEXT: shlq $32, %rax +; SSE2-NEXT: orq %r14, %rax +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: movq %rbx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: addq $64, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; F16-LABEL: fptrunc_v8f64: +; F16: # %bb.0: +; F16-NEXT: pushq %rbp +; F16-NEXT: pushq %r15 +; F16-NEXT: pushq %r14 +; F16-NEXT: pushq %r13 +; F16-NEXT: pushq %r12 +; F16-NEXT: pushq %rbx +; F16-NEXT: subq $136, %rsp +; F16-NEXT: vmovupd %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill +; F16-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0] +; F16-NEXT: vzeroupper +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; F16-NEXT: vextractf128 $1, %ymm0, %xmm0 +; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; F16-NEXT: vzeroupper +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; F16-NEXT: # xmm0 = mem[1,0] +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; F16-NEXT: vextractf32x4 $2, %zmm0, %xmm0 +; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; F16-NEXT: vzeroupper +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; F16-NEXT: # xmm0 = mem[1,0] +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; F16-NEXT: vextractf32x4 $3, %zmm0, %xmm0 +; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; F16-NEXT: vzeroupper +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; F16-NEXT: # xmm0 = mem[1,0] +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %ebp +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %r14d +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %r15d +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %r12d +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %r13d +; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; F16-NEXT: # xmm0 = mem[0],zero,zero,zero +; F16-NEXT: vmovd %xmm0, %ebx +; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload +; F16-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; F16-NEXT: vzeroupper +; F16-NEXT: callq __truncdfbf2@PLT +; F16-NEXT: vmovd %xmm0, %eax +; F16-NEXT: vmovd %eax, %xmm0 +; F16-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 +; F16-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload +; F16-NEXT: addq $136, %rsp +; F16-NEXT: popq %rbx +; F16-NEXT: popq %r12 +; F16-NEXT: popq %r13 +; F16-NEXT: popq %r14 +; F16-NEXT: popq %r15 +; F16-NEXT: popq %rbp +; F16-NEXT: retq +; +; AVXNC-LABEL: fptrunc_v8f64: +; AVXNC: # %bb.0: +; AVXNC-NEXT: pushq %rbp +; AVXNC-NEXT: pushq %r15 +; AVXNC-NEXT: pushq %r14 +; AVXNC-NEXT: pushq %r13 +; AVXNC-NEXT: pushq %r12 +; AVXNC-NEXT: pushq %rbx +; AVXNC-NEXT: subq $120, %rsp +; AVXNC-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVXNC-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVXNC-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0] +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[1,0] +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[1,0] +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[1,0] +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %ebp +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %r14d +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %r15d +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %r12d +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %r13d +; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero +; AVXNC-NEXT: vmovd %xmm0, %ebx +; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVXNC-NEXT: vzeroupper +; AVXNC-NEXT: callq __truncdfbf2@PLT +; AVXNC-NEXT: vmovd %xmm0, %eax +; AVXNC-NEXT: vmovd %eax, %xmm0 +; AVXNC-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 +; AVXNC-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload +; AVXNC-NEXT: addq $120, %rsp +; AVXNC-NEXT: popq %rbx +; AVXNC-NEXT: popq %r12 +; AVXNC-NEXT: popq %r13 +; AVXNC-NEXT: popq %r14 +; AVXNC-NEXT: popq %r15 +; AVXNC-NEXT: popq %rbp +; AVXNC-NEXT: retq + %b = fptrunc <8 x double> %a to <8 x bfloat> + ret <8 x bfloat> %b +}