diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -12612,6 +12612,23 @@ return SDValue(); } +static bool isBroadcastShuffleMask(ArrayRef Mask) { + return isUndefOrEqual(Mask, 0); +} + +static bool isNoopOrBroadcastShuffleMask(ArrayRef Mask) { + return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask); +} + +static SDValue getSplatOfVectorElement(const SDLoc &DL, SDValue Vec, int EltIdx, + SelectionDAG &DAG) { + EVT VT = Vec.getValueType(); + SDValue ScalarElt = + DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT.getScalarType(), Vec, + DAG.getIntPtrConstant(EltIdx, DL)); + return DAG.getSplatBuildVector(VT, DL, ScalarElt); +} + /// Generic routine to decompose a shuffle and blend into independent /// blends and permutes. /// @@ -12645,6 +12662,28 @@ } } + auto canonicalizeBroadcastableInput = + [DL, &Subtarget, &DAG](SDValue &Input, MutableArrayRef InputMask) { + unsigned EltSizeInBits = Input.getScalarValueSizeInBits(); + if (!Subtarget.hasAVX2() && + (!Subtarget.hasAVX() || EltSizeInBits < 32 || !MayFoldLoad(Input))) + return; + if (isNoopShuffleMask(InputMask) || !isBroadcastShuffleMask(InputMask)) + return; + Input = getSplatOfVectorElement(DL, Input, 0, DAG); + for (auto I : enumerate(InputMask)) { + int &InputMaskElt = I.value(); + if (InputMaskElt >= 0) + InputMaskElt = I.index(); + } + }; + + if (isNoopOrBroadcastShuffleMask(V1Mask) && + isNoopOrBroadcastShuffleMask(V2Mask)) { + canonicalizeBroadcastableInput(V1, V1Mask); + canonicalizeBroadcastableInput(V2, V2Mask); + } + // Try to lower with the simpler initial blend/unpack/rotate strategies unless // one of the input shuffles would be a no-op. We prefer to shuffle inputs as // the shuffle may be able to fold with a load or other benefit. However, when diff --git a/llvm/test/CodeGen/X86/copy-low-subvec-elt-to-high-subvec-elt.ll b/llvm/test/CodeGen/X86/copy-low-subvec-elt-to-high-subvec-elt.ll --- a/llvm/test/CodeGen/X86/copy-low-subvec-elt-to-high-subvec-elt.ll +++ b/llvm/test/CodeGen/X86/copy-low-subvec-elt-to-high-subvec-elt.ll @@ -741,7 +741,7 @@ define <32 x i8> @vec256_eltty_i8_source_subvec_0_target_subvec_mask_1_binary(<32 x i8> %x, <32 x i8> %y) nounwind { ; CHECK-LABEL: vec256_eltty_i8_source_subvec_0_target_subvec_mask_1_binary: ; CHECK: # %bb.0: -; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] +; CHECK-NEXT: vpbroadcastb %xmm1, %ymm1 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; CHECK-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -763,8 +763,7 @@ define <32 x i8> @vec256_eltty_i8_source_subvec_0_target_subvec_mask_2_binary(<32 x i8> %x, <32 x i8> %y) nounwind { ; CHECK-LABEL: vec256_eltty_i8_source_subvec_0_target_subvec_mask_2_binary: ; CHECK: # %bb.0: -; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] -; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; CHECK-NEXT: vpbroadcastb %xmm1, %ymm1 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0] ; CHECK-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -800,7 +799,7 @@ ; CHECK-LABEL: vec256_eltty_i8_source_subvec_1_target_subvec_mask_1_unary: ; CHECK: # %bb.0: ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0] +; CHECK-NEXT: vpbroadcastb %xmm1, %ymm1 ; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; CHECK-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/horizontal-sum.ll b/llvm/test/CodeGen/X86/horizontal-sum.ll --- a/llvm/test/CodeGen/X86/horizontal-sum.ll +++ b/llvm/test/CodeGen/X86/horizontal-sum.ll @@ -463,8 +463,7 @@ ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm1 -; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm2 -; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1 +; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm1, %xmm1 ; AVX2-FAST-NEXT: vpbroadcastq %xmm1, %ymm1 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] ; AVX2-FAST-NEXT: retq @@ -1082,28 +1081,28 @@ ; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; SSSE3-FAST-NEXT: retq ; -; AVX-SLOW-LABEL: reduction_sum_v4i32_v4i32: -; AVX-SLOW: # %bb.0: -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] -; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1] -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3] -; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm1, %xmm1 -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1] -; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] -; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2 -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1] -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] -; AVX-SLOW-NEXT: vpaddd %xmm6, %xmm3, %xmm3 -; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1] -; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] -; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2 -; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 -; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX-SLOW-NEXT: retq +; AVX1-SLOW-LABEL: reduction_sum_v4i32_v4i32: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] +; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1] +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3] +; AVX1-SLOW-NEXT: vpaddd %xmm5, %xmm1, %xmm1 +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1] +; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] +; AVX1-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2 +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1] +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] +; AVX1-SLOW-NEXT: vpaddd %xmm6, %xmm3, %xmm3 +; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1] +; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX1-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2 +; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-SLOW-NEXT: retq ; ; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32: ; AVX1-FAST: # %bb.0: @@ -1122,6 +1121,28 @@ ; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX1-FAST-NEXT: retq ; +; AVX2-SLOW-LABEL: reduction_sum_v4i32_v4i32: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] +; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1] +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3] +; AVX2-SLOW-NEXT: vpaddd %xmm5, %xmm1, %xmm1 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] +; AVX2-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2 +; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[2,3,2,3] +; AVX2-SLOW-NEXT: vpaddd %xmm5, %xmm3, %xmm3 +; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3] +; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-SLOW-NEXT: vpbroadcastd %xmm3, %xmm1 +; AVX2-SLOW-NEXT: vpbroadcastd %xmm2, %xmm2 +; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0 +; AVX2-SLOW-NEXT: retq +; ; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] @@ -1134,7 +1155,7 @@ ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3] ; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2 ; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2] +; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] ; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX2-FAST-NEXT: retq