diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1091,6 +1091,8 @@ unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const override; + bool preferScalarizeSplat(unsigned Opc) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -6012,6 +6012,10 @@ return NewShiftOpcode == ISD::SHL; } +bool X86TargetLowering::preferScalarizeSplat(unsigned Opc) const { + return Opc != ISD::FP_EXTEND; +} + bool X86TargetLowering::shouldFoldConstantShiftPairToMask( const SDNode *N, CombineLevel Level) const { assert(((N->getOpcode() == ISD::SHL && diff --git a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll --- a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll +++ b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll @@ -2,23 +2,27 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512FP16 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512FP16 define <2 x double> @prefer_f32_v2f64(ptr %p) nounwind { ; SSE-LABEL: prefer_f32_v2f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: cvtss2sd %xmm0, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: prefer_f32_v2f64: ; AVX: # %bb.0: # %entry -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX-NEXT: vbroadcastss (%rdi), %xmm0 +; AVX-NEXT: vcvtps2pd %xmm0, %xmm0 ; AVX-NEXT: retq +; +; AVX512-LABEL: prefer_f32_v2f64: +; AVX512: # %bb.0: # %entry +; AVX512-NEXT: vcvtps2pd (%rdi){1to2}, %xmm0 +; AVX512-NEXT: retq entry: %0 = load float, ptr %p, align 4 %vecinit.i = insertelement <2 x float> undef, float %0, i64 0 @@ -31,39 +35,21 @@ ; SSE-LABEL: prefer_f32_v4f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-NEXT: cvtss2sd %xmm0, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] +; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: retq ; -; AVX1-LABEL: prefer_f32_v4f64: -; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: prefer_f32_v4f64: -; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 -; AVX2-NEXT: retq +; AVX-LABEL: prefer_f32_v4f64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: vbroadcastss (%rdi), %xmm0 +; AVX-NEXT: vcvtps2pd %xmm0, %ymm0 +; AVX-NEXT: retq ; ; AVX512-LABEL: prefer_f32_v4f64: ; AVX512: # %bb.0: # %entry -; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0 +; AVX512-NEXT: vcvtps2pd (%rdi){1to4}, %ymm0 ; AVX512-NEXT: retq -; -; AVX512FP16-LABEL: prefer_f32_v4f64: -; AVX512FP16: # %bb.0: # %entry -; AVX512FP16-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512FP16-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512FP16-NEXT: vbroadcastsd %xmm0, %ymm0 -; AVX512FP16-NEXT: retq entry: %0 = load float, ptr %p, align 4 %vecinit.i = insertelement <4 x float> undef, float %0, i64 0 @@ -100,19 +86,15 @@ ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: prefer_f16_v4f32: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: movzwl (%rdi), %eax -; AVX512-NEXT: vmovd %eax, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vbroadcastss %xmm0, %xmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: prefer_f16_v4f32: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v4f32: ; AVX512FP16: # %bb.0: # %entry -; AVX512FP16-NEXT: vmovsh (%rdi), %xmm0 -; AVX512FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0 -; AVX512FP16-NEXT: vbroadcastss %xmm0, %xmm0 +; AVX512FP16-NEXT: vcvtph2psx (%rdi){1to4}, %xmm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 @@ -152,19 +134,15 @@ ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: prefer_f16_v8f32: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: movzwl (%rdi), %eax -; AVX512-NEXT: vmovd %eax, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vbroadcastss %xmm0, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: prefer_f16_v8f32: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %ymm0 +; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v8f32: ; AVX512FP16: # %bb.0: # %entry -; AVX512FP16-NEXT: vmovsh (%rdi), %xmm0 -; AVX512FP16-NEXT: vcvtsh2ss %xmm0, %xmm0, %xmm0 -; AVX512FP16-NEXT: vbroadcastss %xmm0, %ymm0 +; AVX512FP16-NEXT: vcvtph2psx (%rdi){1to8}, %ymm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 @@ -185,40 +163,28 @@ ; SSE-NEXT: popq %rax ; SSE-NEXT: retq ; -; AVX1-LABEL: prefer_f16_v2f64: -; AVX1: # %bb.0: # %entry -; AVX1-NEXT: pushq %rax -; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 -; AVX1-NEXT: callq __extendhfsf2@PLT -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; AVX1-NEXT: popq %rax -; AVX1-NEXT: retq -; -; AVX2-LABEL: prefer_f16_v2f64: -; AVX2: # %bb.0: # %entry -; AVX2-NEXT: pushq %rax -; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 -; AVX2-NEXT: callq __extendhfsf2@PLT -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; AVX2-NEXT: popq %rax -; AVX2-NEXT: retq +; AVX-LABEL: prefer_f16_v2f64: +; AVX: # %bb.0: # %entry +; AVX-NEXT: pushq %rax +; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 +; AVX-NEXT: callq __extendhfsf2@PLT +; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX-NEXT: popq %rax +; AVX-NEXT: retq ; -; AVX512-LABEL: prefer_f16_v2f64: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: movzwl (%rdi), %eax -; AVX512-NEXT: vmovd %eax, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] -; AVX512-NEXT: retq +; AVX512F-LABEL: prefer_f16_v2f64: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 +; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512F-NEXT: vcvtps2pd %xmm0, %xmm0 +; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v2f64: ; AVX512FP16: # %bb.0: # %entry -; AVX512FP16-NEXT: vmovsh (%rdi), %xmm0 -; AVX512FP16-NEXT: vcvtsh2sd %xmm0, %xmm0, %xmm0 -; AVX512FP16-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX512FP16-NEXT: vcvtph2pd (%rdi){1to2}, %xmm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 @@ -261,20 +227,16 @@ ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; -; AVX512-LABEL: prefer_f16_v4f64: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: movzwl (%rdi), %eax -; AVX512-NEXT: vmovd %eax, %xmm0 -; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: prefer_f16_v4f64: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512F-NEXT: vcvtps2pd %xmm0, %ymm0 +; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v4f64: ; AVX512FP16: # %bb.0: # %entry -; AVX512FP16-NEXT: vmovsh (%rdi), %xmm0 -; AVX512FP16-NEXT: vcvtsh2sd %xmm0, %xmm0, %xmm0 -; AVX512FP16-NEXT: vbroadcastsd %xmm0, %ymm0 +; AVX512FP16-NEXT: vcvtph2pd (%rdi){1to4}, %ymm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4