Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -37088,6 +37088,113 @@ return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec); } +static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1, + SDLoc DL, EVT VT, const X86Subtarget &Subtarget) { + // Example of pattern we try to detect: + // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1)))) + //(add (build_vector (extract_elt t, 0), + // (extract_elt t, 2), + // (extract_elt t, 4), + // (extract_elt t, 6)), + // (build_vector (extract_elt t, 1), + // (extract_elt t, 3), + // (extract_elt t, 5), + // (extract_elt t, 7))) + + if (!Subtarget.hasSSE2()) + return SDValue(); + + if (Op0.getOpcode() != ISD::BUILD_VECTOR || + Op1.getOpcode() != ISD::BUILD_VECTOR) + return SDValue(); + + if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 || + VT.getVectorNumElements() < 4 || + !isPowerOf2_32(VT.getVectorNumElements())) + return SDValue(); + + // Check if one of Op0,Op1 is of the form: + // (build_vector (extract_elt Mul, 0), + // (extract_elt Mul, 2), + // (extract_elt Mul, 4), + // ... + // the other is of the form: + // (build_vector (extract_elt Mul, 1), + // (extract_elt Mul, 3), + // (extract_elt Mul, 5), + // ... + // and identify Mul. + SDValue Mul; + for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) { + SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i), + Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1); + // TODO: Be more tolerant to undefs. + if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT) + return SDValue(); + auto *Const0L = dyn_cast(Op0L->getOperand(1)); + auto *Const1L = dyn_cast(Op1L->getOperand(1)); + auto *Const0H = dyn_cast(Op0H->getOperand(1)); + auto *Const1H = dyn_cast(Op1H->getOperand(1)); + if (!Const0L || !Const1L || !Const0H || !Const1H) + return SDValue(); + unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(), + Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue(); + // Commutativity of mul allows factors of a product to reorder. + if (Idx0L > Idx1L) + std::swap(Idx0L, Idx1L); + if (Idx0H > Idx1H) + std::swap(Idx0H, Idx1H); + // Commutativity of add allows pairs of factors to reorder. + if (Idx0L > Idx0H) { + std::swap(Idx0L, Idx0H); + std::swap(Idx1L, Idx1H); + } + if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 || + Idx1H != 2 * i + 3) + return SDValue(); + if (!Mul) { + // First time an extract_elt's source vector is visited. Must be a MUL + // with 2X number of vector elements than the BUILD_VECTOR. + // Both extracts must be from same MUL. + Mul = Op0L->getOperand(0); + if (Mul->getOpcode() != ISD::MUL || + Mul.getValueType().getVectorNumElements() != 2 * e) + return SDValue(); + } + // Check that the extract is from the same MUL previously seen. + if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) || + Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0)) + return SDValue(); + } + + // Check if the Mul source can be safely shrunk. + ShrinkMode Mode; + if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16) + return SDValue(); + + auto PMADDBuilder = [](SelectionDAG &DAG, SDLoc DL, SDValue Op0, + SDValue Op1) { + // Shrink by adding truncate nodes and let DAGCombine fold with the + // sources. + EVT InVT = Op0.getValueType(); + assert(InVT.getScalarType() == MVT::i32 && + "Unexpected scalar element type"); + assert(InVT == Op1.getValueType() && "Operands' types mismatch"); + EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, + InVT.getVectorNumElements() / 2); + EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, + InVT.getVectorNumElements()); + return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, + DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Op0), + DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Op1)); + }; + return SplitBinaryOpsAndApply(DAG, Subtarget, DL, VT, Mul.getOperand(0), + Mul.getOperand(1), PMADDBuilder); +} + static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { const SDNodeFlags Flags = N->getFlags(); @@ -37101,6 +37208,9 @@ SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); + if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget)) + return MAdd; + // Try to synthesize horizontal adds from adds of shuffles. if (((Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) || (Subtarget.hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) && Index: llvm/trunk/test/CodeGen/X86/madd.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/madd.ll +++ llvm/trunk/test/CodeGen/X86/madd.ll @@ -316,26 +316,12 @@ define <4 x i32> @pmaddwd_8(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: pmaddwd_8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pmulhw %xmm1, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: pmaddwd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: pmaddwd_8: ; AVX: # %bb.0: -; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper +; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %b = sext <8 x i16> %B to <8 x i32> @@ -346,63 +332,97 @@ ret <4 x i32> %ret } -define <8 x i32> @pmaddwd_16(<16 x i16> %A, <16 x i16> %B) { -; SSE2-LABEL: pmaddwd_16: +define <4 x i32> @pmaddwd_8_swapped(<8 x i16> %A, <8 x i16> %B) { +; SSE2-LABEL: pmaddwd_8_swapped: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pmulhw %xmm2, %xmm4 +; SSE2-NEXT: pmaddwd %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: pmaddwd_8_swapped: +; AVX: # %bb.0: +; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %a = sext <8 x i16> %A to <8 x i32> + %b = sext <8 x i16> %B to <8 x i32> + %m = mul nsw <8 x i32> %a, %b + %odd = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> + %even = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> + %ret = add <4 x i32> %even, %odd + ret <4 x i32> %ret +} + +define <4 x i32> @larger_mul(<16 x i16> %A, <16 x i16> %B) { +; SSE2-LABEL: larger_mul: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pmulhw %xmm2, %xmm1 ; SSE2-NEXT: pmullw %xmm2, %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pmulhw %xmm3, %xmm4 -; SSE2-NEXT: pmullw %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm2[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3] -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3] -; SSE2-NEXT: paddd %xmm5, %xmm0 +; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; AVX2-LABEL: pmaddwd_16: +; AVX2-LABEL: larger_mul: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm0[0,2],ymm2[4,6],ymm0[4,6] -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3] -; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,3],ymm0[1,3],ymm2[5,7],ymm0[5,7] -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512-LABEL: pmaddwd_16: +; AVX512-LABEL: larger_mul: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 ; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512-NEXT: vshufps {{.*#+}} ymm2 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6] -; AVX512-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7] -; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpextrd $2, %xmm0, %eax +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX512-NEXT: vpextrd $2, %xmm2, %eax +; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1 +; AVX512-NEXT: vpextrd $3, %xmm0, %eax +; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpextrd $1, %xmm2, %eax +; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpextrd $3, %xmm2, %eax +; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 +; AVX512-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %a = sext <16 x i16> %A to <16 x i32> %b = sext <16 x i16> %B to <16 x i32> %m = mul nsw <16 x i32> %a, %b + %odd = shufflevector <16 x i32> %m, <16 x i32> undef, <4 x i32> + %even = shufflevector <16 x i32> %m, <16 x i32> undef, <4 x i32> + %ret = add <4 x i32> %odd, %even + ret <4 x i32> %ret +} + +define <8 x i32> @pmaddwd_16(<16 x i16> %A, <16 x i16> %B) { +; SSE2-LABEL: pmaddwd_16: +; SSE2: # %bb.0: +; SSE2-NEXT: pmaddwd %xmm2, %xmm0 +; SSE2-NEXT: pmaddwd %xmm3, %xmm1 +; SSE2-NEXT: retq +; +; AVX-LABEL: pmaddwd_16: +; AVX: # %bb.0: +; AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %a = sext <16 x i16> %A to <16 x i32> + %b = sext <16 x i16> %B to <16 x i32> + %m = mul nsw <16 x i32> %a, %b %odd = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %even = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> %ret = add <8 x i32> %odd, %even @@ -412,108 +432,28 @@ define <16 x i32> @pmaddwd_32(<32 x i16> %A, <32 x i16> %B) { ; SSE2-LABEL: pmaddwd_32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm8 -; SSE2-NEXT: pmulhw %xmm4, %xmm8 -; SSE2-NEXT: pmullw %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pmulhw %xmm5, %xmm4 -; SSE2-NEXT: pmullw %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pmulhw %xmm6, %xmm4 -; SSE2-NEXT: pmullw %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pmulhw %xmm7, %xmm4 -; SSE2-NEXT: pmullw %xmm7, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm7[0,2] -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm6[0,2] -; SSE2-NEXT: movdqa %xmm1, %xmm10 -; SSE2-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm8[0,2] -; SSE2-NEXT: movdqa %xmm0, %xmm11 -; SSE2-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm9[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,3],xmm7[1,3] -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm6[1,3] -; SSE2-NEXT: paddd %xmm5, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm8[1,3] -; SSE2-NEXT: paddd %xmm10, %xmm1 -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm9[1,3] -; SSE2-NEXT: paddd %xmm11, %xmm0 +; SSE2-NEXT: pmaddwd %xmm4, %xmm0 +; SSE2-NEXT: pmaddwd %xmm5, %xmm1 +; SSE2-NEXT: pmaddwd %xmm6, %xmm2 +; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; AVX2-LABEL: pmaddwd_32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm4 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm6 -; AVX2-NEXT: vpmulld %ymm6, %ymm4, %ymm4 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm5, %ymm3 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2 -; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vshufps {{.*#+}} ymm2 = ymm4[0,2],ymm1[0,2],ymm4[4,6],ymm1[4,6] -; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX2-NEXT: vshufps {{.*#+}} ymm5 = ymm3[0,2],ymm0[0,2],ymm3[4,6],ymm0[4,6] -; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] -; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm4[1,3],ymm1[1,3],ymm4[5,7],ymm1[5,7] -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3] -; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm3[1,3],ymm0[1,3],ymm3[5,7],ymm0[5,7] -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: pmaddwd_32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpmulld %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm2 -; AVX512F-NEXT: vpmulld %zmm2, %zmm1, %zmm1 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] -; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm3 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] -; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 -; AVX512F-NEXT: vpaddd %zmm3, %zmm2, %zmm0 +; AVX512F-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: pmaddwd_32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm2 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm3 -; AVX512BW-NEXT: vpmulld %zmm3, %zmm2, %zmm2 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1 -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512BW-NEXT: vpmulld %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm3 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddd %zmm3, %zmm1, %zmm0 +; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %a = sext <32 x i16> %A to <32 x i32> %b = sext <32 x i16> %B to <32 x i32> @@ -527,26 +467,12 @@ define <4 x i32> @pmaddwd_const(<8 x i16> %A) { ; SSE2-LABEL: pmaddwd_const: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [32767,32768,0,0,1,7,42,32] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pmulhw %xmm1, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: pmaddwd {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: pmaddwd_const: ; AVX: # %bb.0: -; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vphaddd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper +; AVX-NEXT: vpmaddwd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %a = sext <8 x i16> %A to <8 x i32> %m = mul nsw <8 x i32> %a, @@ -556,7 +482,7 @@ ret <4 x i32> %ret } -; Check that there is not selection for unsigned multiplication +; Do not select unsigned i16 multiplication define <4 x i32> @pmaddwd_negative1(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: pmaddwd_negative1: ; SSE2: # %bb.0: @@ -590,7 +516,7 @@ ret <4 x i32> %ret } -; Check that there is not selection for out-of-bounds constant +; Do not select if constant is too large define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) { ; SSE2-LABEL: pmaddwd_negative2: ; SSE2: # %bb.0: @@ -639,28 +565,12 @@ define <4 x i32> @jumbled_indices4(<8 x i16> %A, <8 x i16> %B) { ; SSE2-LABEL: jumbled_indices4: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pmulhw %xmm1, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm1 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[1,2] -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,3] -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: pmaddwd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: jumbled_indices4: ; AVX: # %bb.0: -; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,1],xmm1[1,2] -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,3] -; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vzeroupper +; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %exta = sext <8 x i16> %A to <8 x i32> %extb = sext <8 x i16> %B to <8 x i32> @@ -674,69 +584,14 @@ define <8 x i32> @jumbled_indices8(<16 x i16> %A, <16 x i16> %B) { ; SSE2-LABEL: jumbled_indices8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pmulhw %xmm2, %xmm4 -; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pmulhw %xmm3, %xmm4 -; SSE2-NEXT: pmullw %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,0],xmm3[3,0] -; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm2[3,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm3[2,1] -; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[2,1] -; SSE2-NEXT: paddd %xmm5, %xmm0 +; SSE2-NEXT: pmaddwd %xmm2, %xmm0 +; SSE2-NEXT: pmaddwd %xmm3, %xmm1 ; SSE2-NEXT: retq ; -; AVX2-LABEL: jumbled_indices8: -; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm2[3,0,2,3,7,4,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,2,3,0,4,6,7,4] -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,1,2,3,6,5,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,1,5,7,6,5] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: retq -; -; AVX512-LABEL: jumbled_indices8: -; AVX512: # %bb.0: -; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[3,0,2,3,7,4,6,7] -; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2] -; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,2,3,0,4,6,7,4] -; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,3,2,3] -; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7] -; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7] -; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,2] -; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,1,5,7,6,5] -; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] -; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0 -; AVX512-NEXT: retq +; AVX-LABEL: jumbled_indices8: +; AVX: # %bb.0: +; AVX-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq %exta = sext <16 x i16> %A to <16 x i32> %extb = sext <16 x i16> %B to <16 x i32> %m = mul <16 x i32> %exta, %extb @@ -749,116 +604,28 @@ define <16 x i32> @jumbled_indices16(<32 x i16> %A, <32 x i16> %B) { ; SSE2-LABEL: jumbled_indices16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm0, %xmm8 -; SSE2-NEXT: pmulhw %xmm4, %xmm8 -; SSE2-NEXT: pmullw %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm9 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3] -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pmulhw %xmm5, %xmm4 -; SSE2-NEXT: pmullw %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pmulhw %xmm6, %xmm4 -; SSE2-NEXT: pmullw %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pmulhw %xmm7, %xmm4 -; SSE2-NEXT: pmullw %xmm7, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3] -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,0],xmm7[3,1] -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,2],xmm6[0,3] -; SSE2-NEXT: movdqa %xmm1, %xmm10 -; SSE2-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm8[3,0] -; SSE2-NEXT: movdqa %xmm0, %xmm11 -; SSE2-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm9[1,2] -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,1],xmm7[2,0] -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm6[1,2] -; SSE2-NEXT: paddd %xmm5, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[2,1] -; SSE2-NEXT: paddd %xmm10, %xmm1 -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm9[0,3] -; SSE2-NEXT: paddd %xmm11, %xmm0 +; SSE2-NEXT: pmaddwd %xmm4, %xmm0 +; SSE2-NEXT: pmaddwd %xmm5, %xmm1 +; SSE2-NEXT: pmaddwd %xmm6, %xmm2 +; SSE2-NEXT: pmaddwd %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; AVX2-LABEL: jumbled_indices16: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 -; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6 -; AVX2-NEXT: vpmovsxwd %xmm6, %ymm6 -; AVX2-NEXT: vpmulld %ymm6, %ymm4, %ymm4 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3 -; AVX2-NEXT: vpmulld %ymm3, %ymm5, %ymm3 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2 -; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm4[3,0,3,1,7,4,7,5] -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,2,0,3,5,6,4,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm0[1],ymm3[2],ymm0[3,4],ymm3[5,6],ymm0[7] -; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [2,0,5,6,3,1,7,4] -; AVX2-NEXT: vpermd %ymm5, %ymm6, %ymm5 -; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,1,2,0,6,5,6,4] -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,3,1,2,4,7,5,6] -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3,4],ymm0[5,6],ymm3[7] -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,1,4,7,2,0,6,5] -; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: jumbled_indices16: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpmulld %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm2 -; AVX512F-NEXT: vpmulld %zmm2, %zmm1, %zmm1 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [2,0,5,6,11,9,15,12,17,18,20,23,27,24,31,29] -; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,1,4,7,10,8,14,13,16,19,21,22,26,25,30,28] -; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 -; AVX512F-NEXT: vpaddd %zmm3, %zmm2, %zmm0 +; AVX512F-NEXT: vpmaddwd %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: jumbled_indices16: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm2 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm3 -; AVX512BW-NEXT: vpmulld %zmm3, %zmm2, %zmm2 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1 -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512BW-NEXT: vpmulld %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm1 = [2,0,5,6,11,9,15,12,17,18,20,23,27,24,31,29] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,1,4,7,10,8,14,13,16,19,21,22,26,25,30,28] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm3 -; AVX512BW-NEXT: vpaddd %zmm3, %zmm1, %zmm0 +; AVX512BW-NEXT: vpmaddwd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %exta = sext <32 x i16> %A to <32 x i32> %extb = sext <32 x i16> %B to <32 x i32> @@ -872,98 +639,15 @@ define <32 x i32> @jumbled_indices32(<64 x i16> %A, <64 x i16> %B) { ; SSE2-LABEL: jumbled_indices32: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm7, %xmm8 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7 -; SSE2-NEXT: movdqa %xmm0, %xmm15 -; SSE2-NEXT: pmulhw %xmm7, %xmm15 -; SSE2-NEXT: pmullw %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm15[4],xmm7[5],xmm15[5],xmm7[6],xmm15[6],xmm7[7],xmm15[7] -; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm1, %xmm15 -; SSE2-NEXT: pmulhw %xmm9, %xmm15 -; SSE2-NEXT: pmullw %xmm9, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm9 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm2, %xmm15 -; SSE2-NEXT: pmulhw %xmm10, %xmm15 -; SSE2-NEXT: pmullw %xmm10, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm10 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm3, %xmm15 -; SSE2-NEXT: pmulhw %xmm11, %xmm15 -; SSE2-NEXT: pmullw %xmm11, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm11 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm4, %xmm15 -; SSE2-NEXT: pmulhw %xmm13, %xmm15 -; SSE2-NEXT: pmullw %xmm13, %xmm4 -; SSE2-NEXT: movdqa %xmm4, %xmm13 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm5, %xmm15 -; SSE2-NEXT: pmulhw %xmm14, %xmm15 -; SSE2-NEXT: pmullw %xmm14, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm14 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3] -; SSE2-NEXT: movdqa %xmm6, %xmm15 -; SSE2-NEXT: pmulhw %xmm12, %xmm15 -; SSE2-NEXT: pmullw %xmm12, %xmm6 -; SSE2-NEXT: movdqa %xmm6, %xmm12 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3] -; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15 -; SSE2-NEXT: movdqa %xmm8, %xmm7 -; SSE2-NEXT: pmulhw %xmm15, %xmm7 -; SSE2-NEXT: pmullw %xmm15, %xmm8 -; SSE2-NEXT: movdqa %xmm8, %xmm15 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3] -; SSE2-NEXT: movdqa %xmm8, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,0],xmm15[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,1],xmm15[0,2] -; SSE2-NEXT: paddd %xmm7, %xmm8 -; SSE2-NEXT: movdqa %xmm6, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm12[0,2] -; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm12[1,3] -; SSE2-NEXT: paddd %xmm7, %xmm6 -; SSE2-NEXT: movdqa %xmm5, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm14[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm14[0,2] -; SSE2-NEXT: paddd %xmm7, %xmm5 -; SSE2-NEXT: movdqa %xmm4, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm13[1,2] -; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm13[0,3] -; SSE2-NEXT: paddd %xmm7, %xmm4 -; SSE2-NEXT: movdqa %xmm3, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm11[2,0] -; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm11[3,1] -; SSE2-NEXT: paddd %xmm7, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1],xmm10[2,0] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm10[3,1] -; SSE2-NEXT: paddd %xmm7, %xmm2 -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm9[2,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm9[3,1] -; SSE2-NEXT: paddd %xmm7, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload -; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,2],xmm9[2,1] -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm9[3,0] -; SSE2-NEXT: paddd %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm8, 112(%rdi) +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm0 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm1 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm2 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm3 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm4 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm5 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm6 +; SSE2-NEXT: pmaddwd {{[0-9]+}}(%rsp), %xmm7 +; SSE2-NEXT: movdqa %xmm7, 112(%rdi) ; SSE2-NEXT: movdqa %xmm6, 96(%rdi) ; SSE2-NEXT: movdqa %xmm5, 80(%rdi) ; SSE2-NEXT: movdqa %xmm4, 64(%rdi) @@ -976,138 +660,26 @@ ; ; AVX2-LABEL: jumbled_indices32: ; AVX2: # %bb.0: -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm8 -; AVX2-NEXT: vpmovsxwd %xmm8, %ymm8 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm9 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpmovsxwd %xmm3, %ymm10 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm11 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpmovsxwd %xmm2, %ymm12 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm13 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm14 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm15 -; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm8, %ymm3 -; AVX2-NEXT: vpmovsxwd %xmm7, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm9, %ymm7 -; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm10, %ymm2 -; AVX2-NEXT: vpmovsxwd %xmm6, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm11, %ymm6 -; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm12, %ymm1 -; AVX2-NEXT: vpmovsxwd %xmm5, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm13, %ymm5 -; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpmulld %ymm0, %ymm14, %ymm0 -; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4 -; AVX2-NEXT: vpmulld %ymm4, %ymm15, %ymm4 -; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm3[3,0,1,3,7,4,5,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm7[2,0,0,2,6,4,4,6] -; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm2[1,3,2,3,5,7,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[0,2,1,2,4,6,5,6] -; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm10 = ymm1[1,3,2,0,5,7,6,4] -; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm11 = ymm5[3,1,2,0,7,5,6,4] -; AVX2-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm11 = ymm0[2,0,2,3,6,4,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[1,2,2,1,5,6,6,5] -; AVX2-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7] -; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,1,0,2,6,5,4,6] -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[3,1,1,3,7,5,5,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3 -; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[1,3,0,3,5,7,4,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm2, %ymm9, %ymm2 -; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5] -; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,0,3,1,6,4,7,5] -; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm1, %ymm10, %ymm1 -; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,2,3,7,5,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,2] -; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,3,3,0,4,7,7,4] -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,3,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vpaddd %ymm0, %ymm11, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpmaddwd %ymm5, %ymm1, %ymm1 +; AVX2-NEXT: vpmaddwd %ymm6, %ymm2, %ymm2 +; AVX2-NEXT: vpmaddwd %ymm7, %ymm3, %ymm3 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: jumbled_indices32: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3 -; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6 -; AVX512F-NEXT: vpmulld %zmm6, %zmm2, %zmm2 -; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm6 -; AVX512F-NEXT: vpmulld %zmm6, %zmm3, %zmm3 -; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4 -; AVX512F-NEXT: vpmulld %zmm4, %zmm0, %zmm0 -; AVX512F-NEXT: vpmovsxwd %ymm5, %zmm4 -; AVX512F-NEXT: vpmulld %zmm4, %zmm1, %zmm4 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,5,6,9,11,13,15,18,16,20,22,27,24,29,31] -; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm1 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm5 = [1,2,6,5,10,8,14,12,19,17,22,20,25,27,30,28] -; AVX512F-NEXT: vpermi2d %zmm4, %zmm0, %zmm5 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm6 = [1,3,4,7,8,10,12,14,19,17,21,23,26,25,28,30] -; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6 -; AVX512F-NEXT: vpaddd %zmm6, %zmm1, %zmm1 -; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,3,7,4,11,9,15,13,18,16,23,21,24,26,31,29] -; AVX512F-NEXT: vpermi2d %zmm4, %zmm0, %zmm2 -; AVX512F-NEXT: vpaddd %zmm2, %zmm5, %zmm0 +; AVX512F-NEXT: vpmaddwd %ymm5, %ymm1, %ymm1 +; AVX512F-NEXT: vpmaddwd %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpmaddwd %ymm7, %ymm3, %ymm1 +; AVX512F-NEXT: vpmaddwd %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: jumbled_indices32: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm4 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1 -; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1 -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm5 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0 -; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm6 -; AVX512BW-NEXT: vpmulld %zmm6, %zmm4, %zmm4 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm3, %ymm3 -; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm3 -; AVX512BW-NEXT: vpmulld %zmm3, %zmm1, %zmm1 -; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm3 -; AVX512BW-NEXT: vpmulld %zmm3, %zmm5, %zmm3 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm2, %ymm2 -; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm2 -; AVX512BW-NEXT: vpmulld %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,5,6,9,11,13,15,18,16,20,22,27,24,29,31] -; AVX512BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm2 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm5 = [1,2,6,5,10,8,14,12,19,17,22,20,25,27,30,28] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm5 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm6 = [1,3,4,7,8,10,12,14,19,17,21,23,26,25,28,30] -; AVX512BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm6 -; AVX512BW-NEXT: vpaddd %zmm6, %zmm2, %zmm1 -; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,3,7,4,11,9,15,13,18,16,23,21,24,26,31,29] -; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm2 -; AVX512BW-NEXT: vpaddd %zmm2, %zmm5, %zmm0 +; AVX512BW-NEXT: vpmaddwd %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmaddwd %zmm3, %zmm1, %zmm1 ; AVX512BW-NEXT: retq %exta = sext <64 x i16> %A to <64 x i32> %extb = sext <64 x i16> %B to <64 x i32>