diff --git a/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll @@ -0,0 +1,2889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK-SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512F,CHECK-NO-FASTFMA +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skx -fp-contract=fast | FileCheck %s --check-prefixes=CHECK-AVX,CHECK-AVX512F,CHECK-FMA + +declare i16 @llvm.umax.i16(i16, i16) +declare i64 @llvm.umin.i64(i64, i64) + +declare <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float>, <4 x i32>) + +define <4 x float> @fmul_pow2_4xfloat(<4 x i32> %i) { +; CHECK-SSE-LABEL: fmul_pow2_4xfloat: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm0 +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,65535,0,65535,0] +; CHECK-SSE-NEXT: pand %xmm0, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: psrld $16, %xmm0 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: addps %xmm1, %xmm0 +; CHECK-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow2_4xfloat: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-AVX2-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; CHECK-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1392508928,1392508928,1392508928,1392508928] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11] +; CHECK-AVX2-NEXT: vsubps %xmm2, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-AVX2-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow2_4xfloat: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtudq2ps %zmm0, %zmm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-NO-FASTFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow2_4xfloat: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-FMA-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtudq2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %p2 = shl <4 x i32> , %i + %p2_f = uitofp <4 x i32> %p2 to <4 x float> + %r = fmul <4 x float> , %p2_f + ret <4 x float> %r +} + +define <4 x float> @fmul_pow2_ldexp_4xfloat(<4 x i32> %i) { +; CHECK-SSE-LABEL: fmul_pow2_ldexp_4xfloat: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: subq $56, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 64 +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3] +; CHECK-SSE-NEXT: movd %xmm1, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = mem[2,3,2,3] +; CHECK-SSE-NEXT: movd %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: movd %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = mem[1,1,1,1] +; CHECK-SSE-NEXT: movd %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm1 = xmm1[0],mem[0] +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: addq $56, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 8 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fmul_pow2_ldexp_4xfloat: +; CHECK-AVX: # %bb.0: +; CHECK-AVX-NEXT: subq $40, %rsp +; CHECK-AVX-NEXT: .cfi_def_cfa_offset 48 +; CHECK-AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX-NEXT: vextractps $1, %xmm0, %edi +; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX-NEXT: callq ldexpf@PLT +; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-AVX-NEXT: vmovd %xmm0, %edi +; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX-NEXT: callq ldexpf@PLT +; CHECK-AVX-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3] +; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-AVX-NEXT: vextractps $2, %xmm0, %edi +; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX-NEXT: callq ldexpf@PLT +; CHECK-AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; CHECK-AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-AVX-NEXT: vextractps $3, %xmm0, %edi +; CHECK-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX-NEXT: callq ldexpf@PLT +; CHECK-AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload +; CHECK-AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; CHECK-AVX-NEXT: addq $40, %rsp +; CHECK-AVX-NEXT: .cfi_def_cfa_offset 8 +; CHECK-AVX-NEXT: retq + %r = call <4 x float> @llvm.ldexp.v4f32.v4i32(<4 x float> , <4 x i32> %i) + ret <4 x float> %r +} + +define <4 x float> @fdiv_pow2_4xfloat(<4 x i32> %i) { +; CHECK-SSE-LABEL: fdiv_pow2_4xfloat: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm1 +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,0,65535,0,65535,0] +; CHECK-SSE-NEXT: pand %xmm1, %xmm0 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: psrld $16, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: addps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-SSE-NEXT: divps %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow2_4xfloat: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-AVX2-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1258291200,1258291200,1258291200,1258291200] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; CHECK-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1392508928,1392508928,1392508928,1392508928] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11] +; CHECK-AVX2-NEXT: vsubps %xmm2, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-AVX2-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow2_4xfloat: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtudq2ps %zmm0, %zmm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-NO-FASTFMA-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow2_4xfloat: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] +; CHECK-FMA-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtudq2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.0E+0,9.0E+0,9.0E+0,9.0E+0] +; CHECK-FMA-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %p2 = shl <4 x i32> , %i + %p2_f = uitofp <4 x i32> %p2 to <4 x float> + %r = fdiv <4 x float> , %p2_f + ret <4 x float> %r +} + +declare <8 x half> @llvm.ldexp.v8f16.v8i16(<8 x half>, <8 x i16>) + +define <8 x half> @fmul_pow2_8xhalf(<8 x i16> %i) { +; CHECK-SSE-LABEL: fmul_pow2_8xhalf: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: subq $88, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SSE-NEXT: movdqa %xmm0, %xmm1 +; CHECK-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; CHECK-SSE-NEXT: pslld $23, %xmm1 +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216] +; CHECK-SSE-NEXT: paddd %xmm2, %xmm1 +; CHECK-SSE-NEXT: cvttps2dq %xmm1, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd %xmm2, %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm0 +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: pextrw $0, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $2, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $4, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $6, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $0, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $2, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $4, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $6, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-SSE-NEXT: addq $88, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 8 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow2_8xhalf: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: subq $120, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 128 +; CHECK-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; CHECK-AVX2-NEXT: vpsllvd %ymm0, %ymm1, %ymm0 +; CHECK-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] +; CHECK-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-AVX2-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-AVX2-NEXT: addq $120, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 8 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow2_8xhalf: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-NO-FASTFMA-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %ymm0, %ymm1, %ymm1 +; CHECK-NO-FASTFMA-NEXT: vpmovdw %zmm1, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm2, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; CHECK-NO-FASTFMA-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm4, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; CHECK-NO-FASTFMA-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; CHECK-NO-FASTFMA-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm4, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; CHECK-NO-FASTFMA-NEXT: vpextrw $0, %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; CHECK-NO-FASTFMA-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; CHECK-NO-FASTFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3] +; CHECK-NO-FASTFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow2_8xhalf: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] +; CHECK-FMA-NEXT: vpsllvw %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm1 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-FMA-NEXT: vmovd %xmm1, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-FMA-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vmovd %xmm3, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; CHECK-FMA-NEXT: vmovd %xmm4, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm2 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm3 +; CHECK-FMA-NEXT: vmovd %xmm3, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; CHECK-FMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-FMA-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-FMA-NEXT: vzeroupper +; CHECK-FMA-NEXT: retq + %p2 = shl <8 x i16> , %i + %p2_f = uitofp <8 x i16> %p2 to <8 x half> + %r = fmul <8 x half> , %p2_f + ret <8 x half> %r +} + +define <8 x half> @fmul_pow2_ldexp_8xhalf(<8 x i16> %i) { +; CHECK-SSE-LABEL: fmul_pow2_ldexp_8xhalf: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: subq $72, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 80 +; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: pextrw $7, %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $6, %xmm0, %edi +; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $5, %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $4, %xmm0, %edi +; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $3, %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $2, %xmm0, %edi +; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $1, %xmm0, %edi +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: movd %xmm0, %eax +; CHECK-SSE-NEXT: movzwl %ax, %edi +; CHECK-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq ldexpf@PLT +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: punpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-SSE-NEXT: addq $72, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 8 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow2_ldexp_8xhalf: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: subq $72, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 80 +; CHECK-AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-AVX2-NEXT: vpextrw $7, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $6, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $5, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $4, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $3, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $2, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $1, %xmm0, %edi +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vmovd %xmm0, %eax +; CHECK-AVX2-NEXT: movzwl %ax, %edi +; CHECK-AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq ldexpf@PLT +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-AVX2-NEXT: addq $72, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 8 +; CHECK-AVX2-NEXT: retq +; +; CHECK-AVX512F-LABEL: fmul_pow2_ldexp_8xhalf: +; CHECK-AVX512F: # %bb.0: +; CHECK-AVX512F-NEXT: subq $72, %rsp +; CHECK-AVX512F-NEXT: .cfi_def_cfa_offset 80 +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-AVX512F-NEXT: vpextrw $7, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $6, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $5, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $4, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX512F-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $3, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $2, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vpextrw $1, %xmm0, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX512F-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: movzwl %ax, %edi +; CHECK-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-AVX512F-NEXT: callq ldexpf@PLT +; CHECK-AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vmovd %xmm0, %eax +; CHECK-AVX512F-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-AVX512F-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX512F-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX512F-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX512F-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-AVX512F-NEXT: addq $72, %rsp +; CHECK-AVX512F-NEXT: .cfi_def_cfa_offset 8 +; CHECK-AVX512F-NEXT: retq + %r = call <8 x half> @llvm.ldexp.v8f16.v8i16(<8 x half> , <8 x i16> %i) + ret <8 x half> %r +} + +define <8 x half> @fdiv_pow2_8xhalf(<8 x i16> %i) { +; CHECK-SSE-LABEL: fdiv_pow2_8xhalf: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: subq $88, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 96 +; CHECK-SSE-NEXT: movdqa %xmm0, %xmm1 +; CHECK-SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; CHECK-SSE-NEXT: pslld $23, %xmm1 +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1065353216,1065353216,1065353216,1065353216] +; CHECK-SSE-NEXT: paddd %xmm2, %xmm1 +; CHECK-SSE-NEXT: cvttps2dq %xmm1, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd %xmm2, %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm0 +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: pextrw $0, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $2, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $4, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $6, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $0, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $2, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $4, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $6, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-SSE-NEXT: punpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-SSE-NEXT: punpcklqdq (%rsp), %xmm0 # 16-byte Folded Reload +; CHECK-SSE-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-SSE-NEXT: addq $88, %rsp +; CHECK-SSE-NEXT: .cfi_def_cfa_offset 8 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow2_8xhalf: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: subq $120, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 128 +; CHECK-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; CHECK-AVX2-NEXT: vpsllvd %ymm0, %ymm1, %ymm0 +; CHECK-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] +; CHECK-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-AVX2-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-AVX2-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vzeroupper +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload +; CHECK-AVX2-NEXT: # xmm0 = xmm0[0],mem[0] +; CHECK-AVX2-NEXT: addq $120, %rsp +; CHECK-AVX2-NEXT: .cfi_def_cfa_offset 8 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow2_8xhalf: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-NO-FASTFMA-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %ymm0, %ymm1, %ymm1 +; CHECK-NO-FASTFMA-NEXT: vpmovdw %zmm1, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm2, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; CHECK-NO-FASTFMA-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm4, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; CHECK-NO-FASTFMA-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; CHECK-NO-FASTFMA-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm3, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-NO-FASTFMA-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm4, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; CHECK-NO-FASTFMA-NEXT: vpextrw $0, %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; CHECK-NO-FASTFMA-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; CHECK-NO-FASTFMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3] +; CHECK-NO-FASTFMA-NEXT: vdivps %ymm0, %ymm1, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow2_8xhalf: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1] +; CHECK-FMA-NEXT: vpsllvw %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm1 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-FMA-NEXT: vmovd %xmm1, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-FMA-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vmovd %xmm3, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vmovd %xmm4, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-FMA-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm5 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm5, %xmm5 +; CHECK-FMA-NEXT: vmovd %xmm5, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm5 +; CHECK-FMA-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm6, %xmm6 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm6, %xmm6 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; CHECK-FMA-NEXT: vmovd %xmm6, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm7, %xmm4 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vmovd %xmm4, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm4 +; CHECK-FMA-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm7, %xmm0 +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; CHECK-FMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-FMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3,8.192E+3] +; CHECK-FMA-NEXT: vdivps %ymm0, %ymm1, %ymm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-FMA-NEXT: vzeroupper +; CHECK-FMA-NEXT: retq + %p2 = shl <8 x i16> , %i + %p2_f = uitofp <8 x i16> %p2 to <8 x half> + %r = fdiv <8 x half> , %p2_f + ret <8 x half> %r +} + +define double @fmul_pow_shl_cnt(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: movq %rax, %xmm1 +; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-SSE-NEXT: addsd %xmm1, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: vmovq %rax, %xmm0 +; CHECK-AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; CHECK-AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i64 1, %cnt + %conv = uitofp i64 %shl to double + %mul = fmul double 9.000000e+00, %conv + ret double %mul +} + +define double @fmul_pow_shl_cnt2(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $2, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: movq %rax, %xmm1 +; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-SSE-NEXT: addsd %xmm1, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $2, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: vmovq %rax, %xmm0 +; CHECK-AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; CHECK-AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $2, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $2, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i64 2, %cnt + %conv = uitofp i64 %shl to double + %mul = fmul double -9.000000e+00, %conv + ret double %mul +} + +define float @fmul_pow_select(i32 %cnt, i1 %c) nounwind { +; CHECK-SSE-LABEL: fmul_pow_select: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: andl $1, %esi +; CHECK-SSE-NEXT: movl $2, %eax +; CHECK-SSE-NEXT: subl %esi, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_select: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: andl $1, %esi +; CHECK-AVX2-NEXT: movl $2, %eax +; CHECK-AVX2-NEXT: subl %esi, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_select: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: andl $1, %esi +; CHECK-NO-FASTFMA-NEXT: movl $2, %eax +; CHECK-NO-FASTFMA-NEXT: subl %esi, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_select: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: andl $1, %esi +; CHECK-FMA-NEXT: movl $2, %eax +; CHECK-FMA-NEXT: subl %esi, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl2 = shl nuw i32 2, %cnt + %shl1 = shl nuw i32 1, %cnt + %shl = select i1 %c, i32 %shl1, i32 %shl2 + %conv = uitofp i32 %shl to float + %mul = fmul float 9.000000e+00, %conv + ret float %mul +} + +define float @fmul_fly_pow_mul_min_pow2(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_fly_pow_mul_min_pow2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $8, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: cmpq $8192, %rax # imm = 0x2000 +; CHECK-SSE-NEXT: movl $8192, %ecx # imm = 0x2000 +; CHECK-SSE-NEXT: cmovbq %rax, %rcx +; CHECK-SSE-NEXT: cvtsi2ss %rcx, %xmm0 +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_fly_pow_mul_min_pow2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $8, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: cmpq $8192, %rax # imm = 0x2000 +; CHECK-AVX2-NEXT: movl $8192, %ecx # imm = 0x2000 +; CHECK-AVX2-NEXT: cmovbq %rax, %rcx +; CHECK-AVX2-NEXT: vcvtsi2ss %rcx, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_fly_pow_mul_min_pow2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $8, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: cmpq $8192, %rax # imm = 0x2000 +; CHECK-NO-FASTFMA-NEXT: movl $8192, %ecx # imm = 0x2000 +; CHECK-NO-FASTFMA-NEXT: cmovbq %rax, %rcx +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %rcx, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_fly_pow_mul_min_pow2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $8, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: cmpq $8192, %rax # imm = 0x2000 +; CHECK-FMA-NEXT: movl $8192, %ecx # imm = 0x2000 +; CHECK-FMA-NEXT: cmovbq %rax, %rcx +; CHECK-FMA-NEXT: vcvtsi2ss %rcx, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl8 = shl nuw i64 8, %cnt + %shl = call i64 @llvm.umin.i64(i64 %shl8, i64 8192) + %conv = uitofp i64 %shl to float + %mul = fmul float 9.000000e+00, %conv + ret float %mul +} + +define double @fmul_pow_mul_max_pow2(i16 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_mul_max_pow2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $2, %eax +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: movl $1, %edx +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %edx +; CHECK-SSE-NEXT: cmpw %ax, %dx +; CHECK-SSE-NEXT: cmovbel %eax, %edx +; CHECK-SSE-NEXT: movzwl %dx, %eax +; CHECK-SSE-NEXT: cvtsi2sd %eax, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_mul_max_pow2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $2, %eax +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: movl $1, %edx +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %edx +; CHECK-AVX2-NEXT: cmpw %ax, %dx +; CHECK-AVX2-NEXT: cmovbel %eax, %edx +; CHECK-AVX2-NEXT: movzwl %dx, %eax +; CHECK-AVX2-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_mul_max_pow2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $2, %eax +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: movl $1, %edx +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %edx +; CHECK-NO-FASTFMA-NEXT: cmpw %ax, %dx +; CHECK-NO-FASTFMA-NEXT: cmovbel %eax, %edx +; CHECK-NO-FASTFMA-NEXT: movzwl %dx, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_mul_max_pow2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $2, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: movl $1, %ecx +; CHECK-FMA-NEXT: shlxl %edi, %ecx, %ecx +; CHECK-FMA-NEXT: cmpw %ax, %cx +; CHECK-FMA-NEXT: cmoval %ecx, %eax +; CHECK-FMA-NEXT: movzwl %ax, %eax +; CHECK-FMA-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl2 = shl nuw i16 2, %cnt + %shl1 = shl nuw i16 1, %cnt + %shl = call i16 @llvm.umax.i16(i16 %shl1, i16 %shl2) + %conv = uitofp i16 %shl to double + %mul = fmul double 3.000000e+00, %conv + ret double %mul +} + +define double @fmul_pow_shl_cnt_fail_maybe_non_pow2(i64 %v, i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_fail_maybe_non_pow2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rsi, %rcx +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rdi +; CHECK-SSE-NEXT: movq %rdi, %xmm1 +; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-SSE-NEXT: addsd %xmm1, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_fail_maybe_non_pow2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rsi, %rcx +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rdi +; CHECK-AVX2-NEXT: vmovq %rdi, %xmm0 +; CHECK-AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; CHECK-AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_fail_maybe_non_pow2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rsi, %rcx +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rdi +; CHECK-NO-FASTFMA-NEXT: vcvtusi2sd %rdi, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_fail_maybe_non_pow2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: shlxq %rsi, %rdi, %rax +; CHECK-FMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i64 %v, %cnt + %conv = uitofp i64 %shl to double + %mul = fmul double 9.000000e+00, %conv + ret double %mul +} + +define <2 x float> @fmul_pow_shl_cnt_vec_fail_expensive_cast(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2] +; CHECK-SSE-NEXT: movdqa %xmm3, %xmm1 +; CHECK-SSE-NEXT: psllq %xmm2, %xmm1 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm3 +; CHECK-SSE-NEXT: movq %xmm3, %rax +; CHECK-SSE-NEXT: testq %rax, %rax +; CHECK-SSE-NEXT: js .LBB12_1 +; CHECK-SSE-NEXT: # %bb.2: +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: jmp .LBB12_3 +; CHECK-SSE-NEXT: .LBB12_1: +; CHECK-SSE-NEXT: movq %rax, %rcx +; CHECK-SSE-NEXT: shrq %rcx +; CHECK-SSE-NEXT: andl $1, %eax +; CHECK-SSE-NEXT: orq %rcx, %rax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: addss %xmm0, %xmm0 +; CHECK-SSE-NEXT: .LBB12_3: +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; CHECK-SSE-NEXT: movq %xmm1, %rax +; CHECK-SSE-NEXT: testq %rax, %rax +; CHECK-SSE-NEXT: js .LBB12_4 +; CHECK-SSE-NEXT: # %bb.5: +; CHECK-SSE-NEXT: xorps %xmm1, %xmm1 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: jmp .LBB12_6 +; CHECK-SSE-NEXT: .LBB12_4: +; CHECK-SSE-NEXT: movq %rax, %rcx +; CHECK-SSE-NEXT: shrq %rcx +; CHECK-SSE-NEXT: andl $1, %eax +; CHECK-SSE-NEXT: orq %rcx, %rax +; CHECK-SSE-NEXT: xorps %xmm1, %xmm1 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: addss %xmm1, %xmm1 +; CHECK-SSE-NEXT: .LBB12_6: +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpsrlq $1, %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpextrq $1, %xmm1, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm2, %xmm2 +; CHECK-AVX2-NEXT: vmovq %xmm1, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm1 +; CHECK-AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero +; CHECK-AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-AVX2-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0 +; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; CHECK-AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1] +; CHECK-AVX2-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpextrq $1, %xmm0, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm2, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vmovq %xmm0, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm2, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1] +; CHECK-NO-FASTFMA-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_expensive_cast: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x float> + %mul = fmul <2 x float> , %conv + ret <2 x float> %mul +} + +define <2 x double> @fmul_pow_shl_cnt_vec(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2,2] +; CHECK-SSE-NEXT: movdqa %xmm1, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm2 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: psllq %xmm0, %xmm1 +; CHECK-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; CHECK-SSE-NEXT: movapd {{.*#+}} xmm0 = [4294967295,4294967295] +; CHECK-SSE-NEXT: andpd %xmm1, %xmm0 +; CHECK-SSE-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: psrlq $32, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: addpd %xmm0, %xmm1 +; CHECK-SSE-NEXT: mulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2pd %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x double> + %mul = fmul <2 x double> , %conv + ret <2 x double> %mul +} + +define <4 x float> @fmul_pow_shl_cnt_vec_preserve_fma(<4 x i32> %cnt, <4 x float> %add) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_preserve_fma: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm0 +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm2 = [2,2,2,2] +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; CHECK-SSE-NEXT: pmuludq %xmm2, %xmm0 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-SSE-NEXT: pmuludq %xmm2, %xmm3 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3] +; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535] +; CHECK-SSE-NEXT: pand %xmm0, %xmm2 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 +; CHECK-SSE-NEXT: psrld $16, %xmm0 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: subps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: addps %xmm2, %xmm0 +; CHECK-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: addps %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_preserve_fma: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2] +; CHECK-AVX2-NEXT: vpsllvd %xmm0, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; CHECK-AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1392508928,1392508928,1392508928,1392508928] +; CHECK-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3],xmm0[4],xmm3[5],xmm0[6],xmm3[7] +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11] +; CHECK-AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddps %xmm0, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.0E+0,5.0E+0,5.0E+0,5.0E+0] +; CHECK-AVX2-NEXT: vmulps %xmm2, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %xmm0, %xmm2, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtudq2ps %zmm0, %zmm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} xmm2 = [5.0E+0,5.0E+0,5.0E+0,5.0E+0] +; CHECK-NO-FASTFMA-NEXT: vmulps %xmm2, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vaddps %xmm1, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_preserve_fma: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2] +; CHECK-FMA-NEXT: vpsllvd %xmm0, %xmm2, %xmm0 +; CHECK-FMA-NEXT: vcvtudq2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vfmadd132ps {{.*#+}} xmm0 = (xmm0 * mem) + xmm1 +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <4 x i32> , %cnt + %conv = uitofp <4 x i32> %shl to <4 x float> + %mul = fmul <4 x float> , %conv + %res = fadd <4 x float> %mul, %add + ret <4 x float> %res +} + +define <2 x double> @fmul_pow_shl_cnt_vec_non_splat_todo(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_non_splat_todo: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2,2] +; CHECK-SSE-NEXT: movdqa %xmm1, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm2 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: psllq %xmm0, %xmm1 +; CHECK-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; CHECK-SSE-NEXT: movapd {{.*#+}} xmm0 = [4294967295,4294967295] +; CHECK-SSE-NEXT: andpd %xmm1, %xmm0 +; CHECK-SSE-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: psrlq $32, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: addpd %xmm0, %xmm1 +; CHECK-SSE-NEXT: mulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_non_splat_todo: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_non_splat_todo: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_non_splat_todo: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2pd %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x double> + %mul = fmul <2 x double> , %conv + ret <2 x double> %mul +} + +define <2 x double> @fmul_pow_shl_cnt_vec_non_splat2_todo(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_non_splat2_todo: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [2,1] +; CHECK-SSE-NEXT: movdqa %xmm1, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm2 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: psllq %xmm0, %xmm1 +; CHECK-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; CHECK-SSE-NEXT: movapd {{.*#+}} xmm0 = [4294967295,4294967295] +; CHECK-SSE-NEXT: andpd %xmm1, %xmm0 +; CHECK-SSE-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: psrlq $32, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: addpd %xmm0, %xmm1 +; CHECK-SSE-NEXT: mulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_non_splat2_todo: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [2,1] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_non_splat2_todo: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vmovdqa {{.*#+}} xmm1 = [2,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_non_splat2_todo: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vmovdqa {{.*#+}} xmm1 = [2,1] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2pd %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x double> + %mul = fmul <2 x double> , %conv + ret <2 x double> %mul +} + +define <2 x half> @fmul_pow_shl_cnt_vec_fail_to_large(<2 x i16> %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_vec_fail_to_large: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: subq $40, %rsp +; CHECK-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; CHECK-SSE-NEXT: pslld $23, %xmm0 +; CHECK-SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: cvttps2dq %xmm0, %xmm0 +; CHECK-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; CHECK-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: pextrw $1, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-SSE-NEXT: pextrw $0, %xmm0, %eax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-SSE-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; CHECK-SSE-NEXT: movdqa %xmm1, %xmm0 +; CHECK-SSE-NEXT: addq $40, %rsp +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_vec_fail_to_large: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: subq $40, %rsp +; CHECK-AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2] +; CHECK-AVX2-NEXT: vpsllvd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-AVX2-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-AVX2-NEXT: vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload +; CHECK-AVX2-NEXT: # xmm0 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; CHECK-AVX2-NEXT: addq $40, %rsp +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_vec_fail_to_large: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; CHECK-NO-FASTFMA-NEXT: vmovdqa {{.*#+}} ymm1 = [2,2,0,0,0,0,0,0] +; CHECK-NO-FASTFMA-NEXT: vpsllvd %ymm0, %ymm1, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vpmovdw %zmm0, %ymm1 +; CHECK-NO-FASTFMA-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpextrw $1, %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm1, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NO-FASTFMA-NEXT: vmovaps {{.*#+}} xmm1 = [16,0,0,0] +; CHECK-NO-FASTFMA-NEXT: xorl %eax, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm2, %eax +; CHECK-NO-FASTFMA-NEXT: vmovd %eax, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vpbroadcastw %xmm2, %xmm2 +; CHECK-NO-FASTFMA-NEXT: vpermt2ps %zmm0, %zmm1, %zmm2 +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm2, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} ymm1 = [1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1,1.5E+1] +; CHECK-NO-FASTFMA-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vzeroupper +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_vec_fail_to_large: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastw {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2] +; CHECK-FMA-NEXT: vpsllvw %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vpextrw $7, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm1 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; CHECK-FMA-NEXT: vmovd %xmm1, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 +; CHECK-FMA-NEXT: vpextrw $6, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $5, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; CHECK-FMA-NEXT: vmovd %xmm3, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $4, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $3, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; CHECK-FMA-NEXT: vmovd %xmm4, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $2, %xmm0, %eax +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm2 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; CHECK-FMA-NEXT: vmovd %xmm2, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm2 +; CHECK-FMA-NEXT: vpextrw $1, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm4 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm4, %xmm3 +; CHECK-FMA-NEXT: vmovd %xmm3, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm3 +; CHECK-FMA-NEXT: vpextrw $0, %xmm0, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm5, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] +; CHECK-FMA-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; CHECK-FMA-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %ymm0 +; CHECK-FMA-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %ymm0, %xmm0 +; CHECK-FMA-NEXT: vzeroupper +; CHECK-FMA-NEXT: retq + %shl = shl nsw nuw <2 x i16> , %cnt + %conv = uitofp <2 x i16> %shl to <2 x half> + %mul = fmul <2 x half> , %conv + ret <2 x half> %mul +} + +define double @fmul_pow_shl_cnt_fail_maybe_bad_exp(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_fail_maybe_bad_exp: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: movq %rax, %xmm1 +; CHECK-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movapd %xmm1, %xmm0 +; CHECK-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-SSE-NEXT: addsd %xmm1, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_fail_maybe_bad_exp: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: vmovq %rax, %xmm0 +; CHECK-AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] +; CHECK-AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_fail_maybe_bad_exp: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_fail_maybe_bad_exp: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtusi2sd %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i64 1, %cnt + %conv = uitofp i64 %shl to double + %mul = fmul double 9.745314e+288, %conv + ret double %mul +} + +define double @fmul_pow_shl_cnt_safe(i16 %cnt) nounwind { +; CHECK-SSE-LABEL: fmul_pow_shl_cnt_safe: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: movzwl %ax, %eax +; CHECK-SSE-NEXT: cvtsi2sd %eax, %xmm0 +; CHECK-SSE-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fmul_pow_shl_cnt_safe: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: movzwl %ax, %eax +; CHECK-AVX2-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fmul_pow_shl_cnt_safe: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fmul_pow_shl_cnt_safe: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: movzwl %ax, %eax +; CHECK-FMA-NEXT: vcvtsi2sd %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i16 1, %cnt + %conv = uitofp i16 %shl to double + %mul = fmul double 9.745314e+288, %conv + ret double %mul +} + +define <2 x double> @fdiv_pow_shl_cnt_vec(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_vec: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,1] +; CHECK-SSE-NEXT: movdqa %xmm1, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm2 +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: psllq %xmm0, %xmm1 +; CHECK-SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; CHECK-SSE-NEXT: movapd {{.*#+}} xmm0 = [4294967295,4294967295] +; CHECK-SSE-NEXT: andpd %xmm1, %xmm0 +; CHECK-SSE-NEXT: orpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; CHECK-SSE-NEXT: psrlq $32, %xmm1 +; CHECK-SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: subpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; CHECK-SSE-NEXT: addpd %xmm0, %xmm1 +; CHECK-SSE-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0E+0] +; CHECK-SSE-NEXT: divpd %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_vec: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-AVX2-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0] +; CHECK-AVX2-NEXT: # xmm1 = mem[0,0] +; CHECK-AVX2-NEXT: vdivpd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_vec: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpblendd {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vpsrlq $32, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vsubpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vaddpd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0] +; CHECK-NO-FASTFMA-NEXT: # xmm1 = mem[0,0] +; CHECK-NO-FASTFMA-NEXT: vdivpd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_vec: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2pd %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0] +; CHECK-FMA-NEXT: # xmm1 = mem[0,0] +; CHECK-FMA-NEXT: vdivpd %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x double> + %mul = fdiv <2 x double> , %conv + ret <2 x double> %mul +} + +define <2 x float> @fdiv_pow_shl_cnt_vec_with_expensive_cast(<2 x i64> %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_vec_with_expensive_cast: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; CHECK-SSE-NEXT: movdqa {{.*#+}} xmm3 = [1,1] +; CHECK-SSE-NEXT: movdqa %xmm3, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm1, %xmm2 +; CHECK-SSE-NEXT: psllq %xmm0, %xmm3 +; CHECK-SSE-NEXT: movq %xmm3, %rax +; CHECK-SSE-NEXT: testq %rax, %rax +; CHECK-SSE-NEXT: js .LBB21_1 +; CHECK-SSE-NEXT: # %bb.2: +; CHECK-SSE-NEXT: xorps %xmm1, %xmm1 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: jmp .LBB21_3 +; CHECK-SSE-NEXT: .LBB21_1: +; CHECK-SSE-NEXT: movq %rax, %rcx +; CHECK-SSE-NEXT: shrq %rcx +; CHECK-SSE-NEXT: andl $1, %eax +; CHECK-SSE-NEXT: orq %rcx, %rax +; CHECK-SSE-NEXT: xorps %xmm1, %xmm1 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: addss %xmm1, %xmm1 +; CHECK-SSE-NEXT: .LBB21_3: +; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] +; CHECK-SSE-NEXT: movq %xmm0, %rax +; CHECK-SSE-NEXT: testq %rax, %rax +; CHECK-SSE-NEXT: js .LBB21_4 +; CHECK-SSE-NEXT: # %bb.5: +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: jmp .LBB21_6 +; CHECK-SSE-NEXT: .LBB21_4: +; CHECK-SSE-NEXT: movq %rax, %rcx +; CHECK-SSE-NEXT: shrq %rcx +; CHECK-SSE-NEXT: andl $1, %eax +; CHECK-SSE-NEXT: orq %rcx, %rax +; CHECK-SSE-NEXT: xorps %xmm0, %xmm0 +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: addss %xmm0, %xmm0 +; CHECK-SSE-NEXT: .LBB21_6: +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = <1.0E+0,1.0E+0,u,u> +; CHECK-SSE-NEXT: divps %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_vec_with_expensive_cast: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-AVX2-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpsrlq $1, %xmm0, %xmm2 +; CHECK-AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1 +; CHECK-AVX2-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm1 +; CHECK-AVX2-NEXT: vpextrq $1, %xmm1, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2 +; CHECK-AVX2-NEXT: vmovq %xmm1, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm1 +; CHECK-AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],zero,zero +; CHECK-AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-AVX2-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm0 +; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; CHECK-AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; CHECK-AVX2-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_vec_with_expensive_cast: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-NO-FASTFMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpextrq $1, %xmm0, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm2, %xmm1 +; CHECK-NO-FASTFMA-NEXT: vmovq %xmm0, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm2, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero +; CHECK-NO-FASTFMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; CHECK-NO-FASTFMA-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_vec_with_expensive_cast: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,1] +; CHECK-FMA-NEXT: vpsllvq %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtuqq2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vbroadcastss {{.*#+}} xmm1 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; CHECK-FMA-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw <2 x i64> , %cnt + %conv = uitofp <2 x i64> %shl to <2 x float> + %mul = fdiv <2 x float> , %conv + ret <2 x float> %mul +} + +define float @fdiv_pow_shl_cnt_fail_maybe_z(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_fail_maybe_z: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $8, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: testq %rax, %rax +; CHECK-SSE-NEXT: js .LBB22_1 +; CHECK-SSE-NEXT: # %bb.2: +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: jmp .LBB22_3 +; CHECK-SSE-NEXT: .LBB22_1: +; CHECK-SSE-NEXT: shrq %rax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: addss %xmm1, %xmm1 +; CHECK-SSE-NEXT: .LBB22_3: +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_fail_maybe_z: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $8, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: testq %rax, %rax +; CHECK-AVX2-NEXT: js .LBB22_1 +; CHECK-AVX2-NEXT: # %bb.2: +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: jmp .LBB22_3 +; CHECK-AVX2-NEXT: .LBB22_1: +; CHECK-AVX2-NEXT: shrq %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: .LBB22_3: +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_fail_maybe_z: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $8, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_fail_maybe_z: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $8, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtusi2ss %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl i64 8, %cnt + %conv = uitofp i64 %shl to float + %mul = fdiv float -9.000000e+00, %conv + ret float %mul +} + +define float @fdiv_pow_shl_cnt_fail_neg_int(i64 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_fail_neg_int: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: movl $8, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_fail_neg_int: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: movl $8, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_fail_neg_int: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: movl $8, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_fail_neg_int: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $8, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl i64 8, %cnt + %conv = sitofp i64 %shl to float + %mul = fdiv float -9.000000e+00, %conv + ret float %mul +} + +define float @fdiv_pow_shl_cnt(i64 %cnt_in) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movq %rdi, %rcx +; CHECK-SSE-NEXT: andb $31, %cl +; CHECK-SSE-NEXT: movl $8, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-SSE-NEXT: shlq %cl, %rax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movq %rdi, %rcx +; CHECK-AVX2-NEXT: andb $31, %cl +; CHECK-AVX2-NEXT: movl $8, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-AVX2-NEXT: shlq %cl, %rax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movq %rdi, %rcx +; CHECK-NO-FASTFMA-NEXT: andb $31, %cl +; CHECK-NO-FASTFMA-NEXT: movl $8, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $rcx +; CHECK-NO-FASTFMA-NEXT: shlq %cl, %rax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: andb $31, %dil +; CHECK-FMA-NEXT: movl $8, %eax +; CHECK-FMA-NEXT: shlxq %rdi, %rax, %rax +; CHECK-FMA-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %cnt = and i64 %cnt_in, 31 + %shl = shl i64 8, %cnt + %conv = sitofp i64 %shl to float + %mul = fdiv float -0.500000e+00, %conv + ret float %mul +} + +define half @fdiv_pow_shl_cnt_fail_out_of_bounds(i32 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_fail_out_of_bounds: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rax +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: popq %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_fail_out_of_bounds: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: pushq %rax +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: popq %rax +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_fail_out_of_bounds: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_fail_out_of_bounds: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i32 1, %cnt + %conv = uitofp i32 %shl to half + %mul = fdiv half 0xH7000, %conv + ret half %mul +} + +define half @fdiv_pow_shl_cnt_in_bounds(i16 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_in_bounds: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rax +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: movzwl %ax, %eax +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: popq %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_in_bounds: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: pushq %rax +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: movzwl %ax, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: popq %rax +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_in_bounds: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_in_bounds: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: movzwl %ax, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i16 1, %cnt + %conv = uitofp i16 %shl to half + %mul = fdiv half 0xH7000, %conv + ret half %mul +} + +define half @fdiv_pow_shl_cnt_in_bounds2(i16 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_in_bounds2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rax +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: movzwl %ax, %eax +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: popq %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_in_bounds2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: pushq %rax +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: movzwl %ax, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: popq %rax +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_in_bounds2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_in_bounds2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: movzwl %ax, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i16 1, %cnt + %conv = uitofp i16 %shl to half + %mul = fdiv half 0xH4800, %conv + ret half %mul +} + +define half @fdiv_pow_shl_cnt_fail_out_of_bound2(i16 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt_fail_out_of_bound2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rax +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: movzwl %ax, %eax +; CHECK-SSE-NEXT: cvtsi2ss %eax, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: callq __extendhfsf2@PLT +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE-NEXT: callq __truncsfhf2@PLT +; CHECK-SSE-NEXT: popq %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt_fail_out_of_bound2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: pushq %rax +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: movzwl %ax, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: callq __extendhfsf2@PLT +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: callq __truncsfhf2@PLT +; CHECK-AVX2-NEXT: popq %rax +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt_fail_out_of_bound2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: movzwl %ax, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovd %xmm0, %eax +; CHECK-NO-FASTFMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt_fail_out_of_bound2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: movzwl %ax, %eax +; CHECK-FMA-NEXT: vcvtsi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; CHECK-FMA-NEXT: vcvtph2ps %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovd %xmm0, %eax +; CHECK-FMA-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i16 1, %cnt + %conv = uitofp i16 %shl to half + %mul = fdiv half 0xH4000, %conv + ret half %mul +} + +define double @fdiv_pow_shl_cnt32_to_dbl_okay(i32 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt32_to_dbl_okay: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: cvtsi2sd %rax, %xmm1 +; CHECK-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-SSE-NEXT: divsd %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt32_to_dbl_okay: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: vcvtsi2sd %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-AVX2-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt32_to_dbl_okay: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2sd %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NO-FASTFMA-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt32_to_dbl_okay: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: vcvtusi2sd %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-FMA-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i32 1, %cnt + %conv = uitofp i32 %shl to double + %mul = fdiv double 0x36A0000000000000, %conv + ret double %mul +} + +define float @fdiv_pow_shl_cnt32_out_of_bounds2(i32 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt32_out_of_bounds2: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt32_out_of_bounds2: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt32_out_of_bounds2: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt32_out_of_bounds2: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i32 1, %cnt + %conv = uitofp i32 %shl to float + %mul = fdiv float 0x3a1fffff00000000, %conv + ret float %mul +} + +define float @fdiv_pow_shl_cnt32_okay(i32 %cnt) nounwind { +; CHECK-SSE-LABEL: fdiv_pow_shl_cnt32_okay: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl %edi, %ecx +; CHECK-SSE-NEXT: movl $1, %eax +; CHECK-SSE-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-SSE-NEXT: shll %cl, %eax +; CHECK-SSE-NEXT: cvtsi2ss %rax, %xmm1 +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: divss %xmm1, %xmm0 +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX2-LABEL: fdiv_pow_shl_cnt32_okay: +; CHECK-AVX2: # %bb.0: +; CHECK-AVX2-NEXT: movl %edi, %ecx +; CHECK-AVX2-NEXT: movl $1, %eax +; CHECK-AVX2-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-AVX2-NEXT: shll %cl, %eax +; CHECK-AVX2-NEXT: vcvtsi2ss %rax, %xmm0, %xmm0 +; CHECK-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-AVX2-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-AVX2-NEXT: retq +; +; CHECK-NO-FASTFMA-LABEL: fdiv_pow_shl_cnt32_okay: +; CHECK-NO-FASTFMA: # %bb.0: +; CHECK-NO-FASTFMA-NEXT: movl %edi, %ecx +; CHECK-NO-FASTFMA-NEXT: movl $1, %eax +; CHECK-NO-FASTFMA-NEXT: # kill: def $cl killed $cl killed $ecx +; CHECK-NO-FASTFMA-NEXT: shll %cl, %eax +; CHECK-NO-FASTFMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-NO-FASTFMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-NO-FASTFMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-NO-FASTFMA-NEXT: retq +; +; CHECK-FMA-LABEL: fdiv_pow_shl_cnt32_okay: +; CHECK-FMA: # %bb.0: +; CHECK-FMA-NEXT: movl $1, %eax +; CHECK-FMA-NEXT: shlxl %edi, %eax, %eax +; CHECK-FMA-NEXT: vcvtusi2ss %eax, %xmm0, %xmm0 +; CHECK-FMA-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-FMA-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; CHECK-FMA-NEXT: retq + %shl = shl nuw i32 1, %cnt + %conv = uitofp i32 %shl to float + %mul = fdiv float 0x3a20000000000000, %conv + ret float %mul +} +