Index: llvm/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/lib/Target/X86/X86ISelLowering.cpp +++ llvm/lib/Target/X86/X86ISelLowering.cpp @@ -56345,9 +56345,6 @@ ConcatSubOperand(SrcVT, Ops, 1)); } break; - case ISD::FADD: - case ISD::FSUB: - case ISD::FMUL: case ISD::FDIV: if (!IsSplat && (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { Index: llvm/test/CodeGen/X86/widen_fadd.ll =================================================================== --- llvm/test/CodeGen/X86/widen_fadd.ll +++ llvm/test/CodeGen/X86/widen_fadd.ll @@ -67,16 +67,42 @@ ; ; AVX1OR2-LABEL: widen_fadd_v2f32_v8f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vaddps (%rsi), %ymm0, %ymm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: widen_fadd_v2f32_v8f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovups (%rdi), %ymm0 -; AVX512F-NEXT: vaddps (%rsi), %ymm0, %ymm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vaddps %xmm4, %xmm0, %xmm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vaddps %xmm4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vaddps %xmm4, %xmm2, %xmm2 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vaddps %xmm4, %xmm3, %xmm3 +; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512F-NEXT: vmovups %ymm0, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -88,17 +114,17 @@ ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vaddps %xmm5, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero -; AVX512VL-NEXT: vaddps %xmm6, %xmm3, %xmm3 +; AVX512VL-NEXT: vaddps %xmm4, %xmm0, %xmm0 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vaddps %xmm4, %xmm1, %xmm1 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vaddps %xmm4, %xmm2, %xmm2 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vaddps %xmm4, %xmm3, %xmm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vaddps %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512VL-NEXT: vmovups %ymm0, (%rdx) ; AVX512VL-NEXT: vzeroupper @@ -170,12 +196,38 @@ ; ; AVX1OR2-LABEL: widen_fadd_v2f32_v16f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vmovups 32(%rdi), %ymm1 -; AVX1OR2-NEXT: vaddps (%rsi), %ymm0, %ymm0 -; AVX1OR2-NEXT: vaddps 32(%rsi), %ymm1, %ymm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm5, %xmm4, %xmm4 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm6, %xmm5, %xmm5 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm7, %xmm6, %xmm6 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero +; AVX1OR2-NEXT: vaddps %xmm7, %xmm8, %xmm7 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) -; AVX1OR2-NEXT: vmovups %ymm1, 32(%rdx) +; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] +; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; Index: llvm/test/CodeGen/X86/widen_fmul.ll =================================================================== --- llvm/test/CodeGen/X86/widen_fmul.ll +++ llvm/test/CodeGen/X86/widen_fmul.ll @@ -67,16 +67,42 @@ ; ; AVX1OR2-LABEL: widen_fmul_v2f32_v8f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vmulps (%rsi), %ymm0, %ymm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: widen_fmul_v2f32_v8f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovups (%rdi), %ymm0 -; AVX512F-NEXT: vmulps (%rsi), %ymm0, %ymm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vmulps %xmm4, %xmm0, %xmm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vmulps %xmm4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vmulps %xmm4, %xmm2, %xmm2 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vmulps %xmm4, %xmm3, %xmm3 +; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512F-NEXT: vmovups %ymm0, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -88,17 +114,17 @@ ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vmulps %xmm5, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero -; AVX512VL-NEXT: vmulps %xmm6, %xmm3, %xmm3 +; AVX512VL-NEXT: vmulps %xmm4, %xmm0, %xmm0 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vmulps %xmm4, %xmm1, %xmm1 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vmulps %xmm4, %xmm2, %xmm2 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vmulps %xmm4, %xmm3, %xmm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vmulps %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512VL-NEXT: vmovups %ymm0, (%rdx) ; AVX512VL-NEXT: vzeroupper @@ -170,12 +196,38 @@ ; ; AVX1OR2-LABEL: widen_fmul_v2f32_v16f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vmovups 32(%rdi), %ymm1 -; AVX1OR2-NEXT: vmulps (%rsi), %ymm0, %ymm0 -; AVX1OR2-NEXT: vmulps 32(%rsi), %ymm1, %ymm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm5, %xmm4, %xmm4 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm6, %xmm5, %xmm5 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm7, %xmm6, %xmm6 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero +; AVX1OR2-NEXT: vmulps %xmm7, %xmm8, %xmm7 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) -; AVX1OR2-NEXT: vmovups %ymm1, 32(%rdx) +; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] +; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; Index: llvm/test/CodeGen/X86/widen_fsub.ll =================================================================== --- llvm/test/CodeGen/X86/widen_fsub.ll +++ llvm/test/CodeGen/X86/widen_fsub.ll @@ -67,16 +67,42 @@ ; ; AVX1OR2-LABEL: widen_fsub_v2f32_v8f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vsubps (%rsi), %ymm0, %ymm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: widen_fsub_v2f32_v8f32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovups (%rdi), %ymm0 -; AVX512F-NEXT: vsubps (%rsi), %ymm0, %ymm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vsubps %xmm4, %xmm0, %xmm0 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vsubps %xmm4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vsubps %xmm4, %xmm2, %xmm2 +; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512F-NEXT: vsubps %xmm4, %xmm3, %xmm3 +; AVX512F-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512F-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512F-NEXT: vmovups %ymm0, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -88,17 +114,17 @@ ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vsubps %xmm5, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero -; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero -; AVX512VL-NEXT: vsubps %xmm6, %xmm3, %xmm3 +; AVX512VL-NEXT: vsubps %xmm4, %xmm0, %xmm0 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vsubps %xmm4, %xmm1, %xmm1 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vsubps %xmm4, %xmm2, %xmm2 +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX512VL-NEXT: vsubps %xmm4, %xmm3, %xmm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] -; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vsubps %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512VL-NEXT: vmovups %ymm0, (%rdx) ; AVX512VL-NEXT: vzeroupper @@ -170,12 +196,38 @@ ; ; AVX1OR2-LABEL: widen_fsub_v2f32_v16f32: ; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 -; AVX1OR2-NEXT: vmovups 32(%rdi), %ymm1 -; AVX1OR2-NEXT: vsubps (%rsi), %ymm0, %ymm0 -; AVX1OR2-NEXT: vsubps 32(%rsi), %ymm1, %ymm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm0, %xmm0 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm1, %xmm1 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm2, %xmm2 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm4, %xmm3, %xmm3 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm5, %xmm4, %xmm4 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm6, %xmm5, %xmm5 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm7, %xmm6, %xmm6 +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero +; AVX1OR2-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero +; AVX1OR2-NEXT: vsubps %xmm8, %xmm7, %xmm7 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) -; AVX1OR2-NEXT: vmovups %ymm1, 32(%rdx) +; AVX1OR2-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm0 +; AVX1OR2-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1 +; AVX1OR2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] +; AVX1OR2-NEXT: vmovups %ymm0, 32(%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ;