Index: llvm/trunk/include/llvm/CodeGen/SelectionDAG.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/SelectionDAG.h +++ llvm/trunk/include/llvm/CodeGen/SelectionDAG.h @@ -1535,6 +1535,11 @@ /// vector op and fill the end of the resulting vector with UNDEFS. SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0); + /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes. + /// This is a separate function because those opcodes have two results. + std::pair UnrollVectorOverflowOp(SDNode *N, + unsigned ResNE = 0); + /// Return true if loads are next to each other and can be /// merged. Check that both are nonvolatile and if LD is loading /// 'Bytes' bytes from a location that is 'Dist' units away from the Index: llvm/trunk/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/TargetLowering.h +++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h @@ -3888,9 +3888,10 @@ /// integers as its arguments. SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; - /// Method for building the DAG expansion of ISD::[US]MULO, returning the two - /// result values as a pair. - std::pair expandMULO(SDNode *Node, SelectionDAG &DAG) const; + /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether + /// expansion was successful and populates the Result and Overflow arguments. + bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, + SelectionDAG &DAG) const; //===--------------------------------------------------------------------===// // Instruction Emitting Hooks Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3321,9 +3321,11 @@ } case ISD::UMULO: case ISD::SMULO: { - auto Pair = TLI.expandMULO(Node, DAG); - Results.push_back(Pair.first); - Results.push_back(Pair.second); + SDValue Result, Overflow; + if (TLI.expandMULO(Node, Result, Overflow, DAG)) { + Results.push_back(Result); + Results.push_back(Overflow); + } break; } case ISD::BUILD_PAIR: { Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -140,6 +140,7 @@ SDValue ExpandFunnelShift(SDValue Op); SDValue ExpandROT(SDValue Op); SDValue ExpandFMINNUM_FMAXNUM(SDValue Op); + SDValue ExpandMULO(SDValue Op); SDValue ExpandAddSubSat(SDValue Op); SDValue ExpandFixedPointMul(SDValue Op); SDValue ExpandStrictFPOp(SDValue Op); @@ -418,6 +419,8 @@ case ISD::UMAX: case ISD::SMUL_LOHI: case ISD::UMUL_LOHI: + case ISD::SMULO: + case ISD::UMULO: case ISD::FCANONICALIZE: case ISD::SADDSAT: case ISD::UADDSAT: @@ -779,6 +782,9 @@ case ISD::FMINNUM: case ISD::FMAXNUM: return ExpandFMINNUM_FMAXNUM(Op); + case ISD::UMULO: + case ISD::SMULO: + return ExpandMULO(Op); case ISD::USUBSAT: case ISD::SSUBSAT: case ISD::UADDSAT: @@ -1216,6 +1222,16 @@ return DAG.UnrollVectorOp(Op.getNode()); } +SDValue VectorLegalizer::ExpandMULO(SDValue Op) { + SDValue Result, Overflow; + if (!TLI.expandMULO(Op.getNode(), Result, Overflow, DAG)) + std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Op.getNode()); + + AddLegalizedOperand(Op.getValue(0), Result); + AddLegalizedOperand(Op.getValue(1), Overflow); + return Op.getResNo() ? Overflow : Result; +} + SDValue VectorLegalizer::ExpandAddSubSat(SDValue Op) { if (SDValue Expanded = TLI.expandAddSubSat(Op.getNode(), DAG)) return Expanded; Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -8918,6 +8918,50 @@ return getBuildVector(VecVT, dl, Scalars); } +std::pair SelectionDAG::UnrollVectorOverflowOp( + SDNode *N, unsigned ResNE) { + unsigned Opcode = N->getOpcode(); + assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || + Opcode == ISD::USUBO || Opcode == ISD::SSUBO || + Opcode == ISD::UMULO || Opcode == ISD::SMULO) && + "Expected an overflow opcode"); + + EVT ResVT = N->getValueType(0); + EVT OvVT = N->getValueType(1); + EVT ResEltVT = ResVT.getVectorElementType(); + EVT OvEltVT = OvVT.getVectorElementType(); + SDLoc dl(N); + + // If ResNE is 0, fully unroll the vector op. + unsigned NE = ResVT.getVectorNumElements(); + if (ResNE == 0) + ResNE = NE; + else if (NE > ResNE) + NE = ResNE; + + SmallVector LHSScalars; + SmallVector RHSScalars; + ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); + ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); + + SDVTList VTs = getVTList(ResEltVT, OvEltVT); + SmallVector ResScalars; + SmallVector OvScalars; + for (unsigned i = 0; i < NE; ++i) { + SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); + ResScalars.push_back(Res); + OvScalars.push_back(SDValue(Res.getNode(), 1)); + } + + ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); + OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); + + EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); + EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); + return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), + getBuildVector(NewOvVT, dl, OvScalars)); +} + bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, Index: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5522,11 +5522,15 @@ DAG.getConstant(Scale, dl, ShiftTy)); } -std::pair TargetLowering::expandMULO( - SDNode *Node, SelectionDAG &DAG) const { +bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, + SDValue &Overflow, SelectionDAG &DAG) const { SDLoc dl(Node); EVT VT = Node->getValueType(0); - EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2); + EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); + if (VT.isVector()) + WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, + VT.getVectorNumElements()); + SDValue LHS = Node->getOperand(0); SDValue RHS = Node->getOperand(1); SDValue BottomHalf; @@ -5546,11 +5550,15 @@ LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); - BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Mul, - DAG.getIntPtrConstant(0, dl)); - TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Mul, - DAG.getIntPtrConstant(1, dl)); + BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); + SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, + getShiftAmountTy(WideVT, DAG.getDataLayout())); + TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, + DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); } else { + if (VT.isVector()) + return false; + // We can fall back to a libcall with an illegal type for the MUL if we // have a libcall big enough. // Also, we can fall back to a division in some cases, but that's a big @@ -5618,24 +5626,24 @@ } EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); + Result = BottomHalf; if (isSigned) { SDValue ShiftAmt = DAG.getConstant( - VT.getSizeInBits() - 1, dl, + VT.getScalarSizeInBits() - 1, dl, getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); - TopHalf = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); + Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); } else { - TopHalf = DAG.getSetCC(dl, SetCCVT, TopHalf, - DAG.getConstant(0, dl, VT), ISD::SETNE); + Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, + DAG.getConstant(0, dl, VT), ISD::SETNE); } // Truncate the result if SetCC returns a larger type than needed. EVT RType = Node->getValueType(1); - if (RType.getSizeInBits() < TopHalf.getValueSizeInBits()) - TopHalf = DAG.getNode(ISD::TRUNCATE, dl, RType, TopHalf); + if (RType.getSizeInBits() < Overflow.getValueSizeInBits()) + Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); - assert(RType.getSizeInBits() == TopHalf.getValueSizeInBits() && + assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && "Unexpected result type for S/UMULO legalization"); - - return std::make_pair(BottomHalf, TopHalf); + return true; } Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -20184,6 +20184,8 @@ std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG); SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG); + if (Op->getValueType(1) != MVT::i8) + SetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, Op->getValueType(1), SetCC); return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC); } Index: llvm/trunk/test/CodeGen/AArch64/vec_umulo.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/vec_umulo.ll +++ llvm/trunk/test/CodeGen/AArch64/vec_umulo.ll @@ -0,0 +1,401 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK + +declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>) +declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>) +declare {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32>, <3 x i32>) +declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) +declare {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32>, <6 x i32>) +declare {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>) + +declare {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8>, <16 x i8>) +declare {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16>, <8 x i16>) +declare {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64>, <2 x i64>) + +declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24>) +declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>) +declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>) + +define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umull v1.2d, v0.2s, v1.2s +; CHECK-NEXT: shrn v0.2s, v1.2d, #32 +; CHECK-NEXT: cmeq v0.2s, v0.2s, #0 +; CHECK-NEXT: mvn v0.8b, v0.8b +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: str s1, [x0] +; CHECK-NEXT: ret + %t = call {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1) + %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0 + %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1 + %res = sext <1 x i1> %obit to <1 x i32> + store <1 x i32> %val, <1 x i32>* %p2 + ret <1 x i32> %res +} + +define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umull v1.2d, v0.2s, v1.2s +; CHECK-NEXT: shrn v0.2s, v1.2d, #32 +; CHECK-NEXT: cmeq v0.2s, v0.2s, #0 +; CHECK-NEXT: mvn v0.8b, v0.8b +; CHECK-NEXT: xtn v1.2s, v1.2d +; CHECK-NEXT: str d1, [x0] +; CHECK-NEXT: ret + %t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1) + %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i32> %val, <2 x i32>* %p2 + ret <2 x i32> %res +} + +define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v3i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umull2 v2.2d, v0.4s, v1.4s +; CHECK-NEXT: umull v3.2d, v0.2s, v1.2s +; CHECK-NEXT: mul v1.4s, v0.4s, v1.4s +; CHECK-NEXT: uzp2 v0.4s, v3.4s, v2.4s +; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-NEXT: add x8, x0, #8 // =8 +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: st1 { v1.s }[2], [x8] +; CHECK-NEXT: str d1, [x0] +; CHECK-NEXT: ret + %t = call {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1) + %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0 + %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1 + %res = sext <3 x i1> %obit to <3 x i32> + store <3 x i32> %val, <3 x i32>* %p2 + ret <3 x i32> %res +} + +define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umull2 v2.2d, v0.4s, v1.4s +; CHECK-NEXT: umull v3.2d, v0.2s, v1.2s +; CHECK-NEXT: uzp2 v2.4s, v3.4s, v2.4s +; CHECK-NEXT: cmeq v2.4s, v2.4s, #0 +; CHECK-NEXT: mvn v2.16b, v2.16b +; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s +; CHECK-NEXT: str q0, [x0] +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %t = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1) + %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i32> %val, <4 x i32>* %p2 + ret <4 x i32> %res +} + +define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v6i32: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov s2, w6 +; CHECK-NEXT: ldr s0, [sp, #16] +; CHECK-NEXT: mov x9, sp +; CHECK-NEXT: mov v2.s[1], w7 +; CHECK-NEXT: ld1 { v2.s }[2], [x9] +; CHECK-NEXT: add x8, sp, #24 // =24 +; CHECK-NEXT: add x10, sp, #8 // =8 +; CHECK-NEXT: ld1 { v0.s }[1], [x8] +; CHECK-NEXT: fmov s3, w0 +; CHECK-NEXT: ldr x11, [sp, #32] +; CHECK-NEXT: ld1 { v2.s }[3], [x10] +; CHECK-NEXT: fmov s1, w4 +; CHECK-NEXT: mov v3.s[1], w1 +; CHECK-NEXT: mov v1.s[1], w5 +; CHECK-NEXT: mov v3.s[2], w2 +; CHECK-NEXT: mov v3.s[3], w3 +; CHECK-NEXT: umull2 v4.2d, v1.4s, v0.4s +; CHECK-NEXT: umull v5.2d, v1.2s, v0.2s +; CHECK-NEXT: mul v0.4s, v1.4s, v0.4s +; CHECK-NEXT: uzp2 v1.4s, v5.4s, v4.4s +; CHECK-NEXT: str d0, [x11, #16] +; CHECK-NEXT: umull2 v0.2d, v3.4s, v2.4s +; CHECK-NEXT: umull v4.2d, v3.2s, v2.2s +; CHECK-NEXT: uzp2 v0.4s, v4.4s, v0.4s +; CHECK-NEXT: cmeq v1.4s, v1.4s, #0 +; CHECK-NEXT: cmeq v0.4s, v0.4s, #0 +; CHECK-NEXT: mvn v1.16b, v1.16b +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: mul v2.4s, v3.4s, v2.4s +; CHECK-NEXT: mov w5, v1.s[1] +; CHECK-NEXT: mov w1, v0.s[1] +; CHECK-NEXT: mov w2, v0.s[2] +; CHECK-NEXT: mov w3, v0.s[3] +; CHECK-NEXT: fmov w4, s1 +; CHECK-NEXT: fmov w0, s0 +; CHECK-NEXT: str q2, [x11] +; CHECK-NEXT: ret + %t = call {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1) + %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0 + %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1 + %res = sext <6 x i1> %obit to <6 x i32> + store <6 x i32> %val, <6 x i32>* %p2 + ret <6 x i32> %res +} + +define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind { +; CHECK-LABEL: umulo_v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umull2 v4.2d, v0.4s, v2.4s +; CHECK-NEXT: umull v5.2d, v0.2s, v2.2s +; CHECK-NEXT: umull2 v6.2d, v1.4s, v3.4s +; CHECK-NEXT: mul v2.4s, v0.4s, v2.4s +; CHECK-NEXT: umull v0.2d, v1.2s, v3.2s +; CHECK-NEXT: mul v3.4s, v1.4s, v3.4s +; CHECK-NEXT: uzp2 v1.4s, v5.4s, v4.4s +; CHECK-NEXT: uzp2 v0.4s, v0.4s, v6.4s +; CHECK-NEXT: cmeq v1.4s, v1.4s, #0 +; CHECK-NEXT: cmeq v4.4s, v0.4s, #0 +; CHECK-NEXT: mvn v0.16b, v1.16b +; CHECK-NEXT: mvn v1.16b, v4.16b +; CHECK-NEXT: stp q2, q3, [x0] +; CHECK-NEXT: ret + %t = call {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1) + %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i32> %val, <8 x i32>* %p2 + ret <8 x i32> %res +} + +define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind { +; CHECK-LABEL: umulo_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: umull2 v2.8h, v0.16b, v1.16b +; CHECK-NEXT: umull v3.8h, v0.8b, v1.8b +; CHECK-NEXT: mul v4.16b, v0.16b, v1.16b +; CHECK-NEXT: uzp2 v0.16b, v3.16b, v2.16b +; CHECK-NEXT: cmeq v0.16b, v0.16b, #0 +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: zip1 v1.8b, v0.8b, v0.8b +; CHECK-NEXT: zip2 v2.8b, v0.8b, v0.8b +; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-NEXT: ushll v2.4s, v2.4h, #0 +; CHECK-NEXT: zip1 v3.8b, v0.8b, v0.8b +; CHECK-NEXT: zip2 v0.8b, v0.8b, v0.8b +; CHECK-NEXT: shl v1.4s, v1.4s, #31 +; CHECK-NEXT: shl v2.4s, v2.4s, #31 +; CHECK-NEXT: ushll v3.4s, v3.4h, #0 +; CHECK-NEXT: ushll v5.4s, v0.4h, #0 +; CHECK-NEXT: sshr v0.4s, v1.4s, #31 +; CHECK-NEXT: sshr v1.4s, v2.4s, #31 +; CHECK-NEXT: shl v2.4s, v3.4s, #31 +; CHECK-NEXT: shl v3.4s, v5.4s, #31 +; CHECK-NEXT: sshr v2.4s, v2.4s, #31 +; CHECK-NEXT: sshr v3.4s, v3.4s, #31 +; CHECK-NEXT: str q4, [x0] +; CHECK-NEXT: ret + %t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1) + %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0 + %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1 + %res = sext <16 x i1> %obit to <16 x i32> + store <16 x i8> %val, <16 x i8>* %p2 + ret <16 x i32> %res +} + +define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind { +; CHECK-LABEL: umulo_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: umull2 v2.4s, v0.8h, v1.8h +; CHECK-NEXT: umull v3.4s, v0.4h, v1.4h +; CHECK-NEXT: mul v4.8h, v0.8h, v1.8h +; CHECK-NEXT: uzp2 v0.8h, v3.8h, v2.8h +; CHECK-NEXT: cmeq v0.8h, v0.8h, #0 +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: xtn v0.8b, v0.8h +; CHECK-NEXT: zip1 v1.8b, v0.8b, v0.8b +; CHECK-NEXT: zip2 v0.8b, v0.8b, v0.8b +; CHECK-NEXT: ushll v1.4s, v1.4h, #0 +; CHECK-NEXT: ushll v0.4s, v0.4h, #0 +; CHECK-NEXT: shl v1.4s, v1.4s, #31 +; CHECK-NEXT: shl v2.4s, v0.4s, #31 +; CHECK-NEXT: sshr v0.4s, v1.4s, #31 +; CHECK-NEXT: sshr v1.4s, v2.4s, #31 +; CHECK-NEXT: str q4, [x0] +; CHECK-NEXT: ret + %t = call {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1) + %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i16> %val, <8 x i16>* %p2 + ret <8 x i32> %res +} + +define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind { +; CHECK-LABEL: umulo_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: fmov x10, d1 +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: mov x8, v1.d[1] +; CHECK-NEXT: mov x9, v0.d[1] +; CHECK-NEXT: umulh x12, x11, x10 +; CHECK-NEXT: mul x10, x11, x10 +; CHECK-NEXT: cmp xzr, x12 +; CHECK-NEXT: umulh x11, x9, x8 +; CHECK-NEXT: mul x8, x9, x8 +; CHECK-NEXT: cset w9, ne +; CHECK-NEXT: cmp xzr, x11 +; CHECK-NEXT: fmov d1, x10 +; CHECK-NEXT: fmov s0, w9 +; CHECK-NEXT: cset w9, ne +; CHECK-NEXT: mov v0.s[1], w9 +; CHECK-NEXT: mov v1.d[1], x8 +; CHECK-NEXT: str q1, [x0] +; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-NEXT: ret + %t = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1) + %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i64> %val, <2 x i64>* %p2 + ret <2 x i32> %res +} + +define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { +; CHECK-LABEL: umulo_v4i24: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v1.4s, #255, lsl #24 +; CHECK-NEXT: bic v0.4s, #255, lsl #24 +; CHECK-NEXT: umull2 v2.2d, v0.4s, v1.4s +; CHECK-NEXT: umull v3.2d, v0.2s, v1.2s +; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s +; CHECK-NEXT: uzp2 v1.4s, v3.4s, v2.4s +; CHECK-NEXT: ushr v2.4s, v0.4s, #24 +; CHECK-NEXT: mov w8, v0.s[3] +; CHECK-NEXT: mov w9, v0.s[2] +; CHECK-NEXT: mov w10, v0.s[1] +; CHECK-NEXT: fmov w11, s0 +; CHECK-NEXT: cmeq v0.4s, v1.4s, #0 +; CHECK-NEXT: cmeq v1.4s, v2.4s, #0 +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: mvn v1.16b, v1.16b +; CHECK-NEXT: xtn v0.4h, v0.4s +; CHECK-NEXT: xtn v1.4h, v1.4s +; CHECK-NEXT: sturh w8, [x0, #9] +; CHECK-NEXT: lsr w8, w8, #16 +; CHECK-NEXT: orr v0.8b, v1.8b, v0.8b +; CHECK-NEXT: strh w9, [x0, #6] +; CHECK-NEXT: sturh w10, [x0, #3] +; CHECK-NEXT: lsr w9, w9, #16 +; CHECK-NEXT: lsr w10, w10, #16 +; CHECK-NEXT: strb w8, [x0, #11] +; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: lsr w8, w11, #16 +; CHECK-NEXT: strh w11, [x0] +; CHECK-NEXT: strb w9, [x0, #8] +; CHECK-NEXT: strb w10, [x0, #5] +; CHECK-NEXT: strb w8, [x0, #2] +; CHECK-NEXT: ret + %t = call {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1) + %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i24> %val, <4 x i24>* %p2 + ret <4 x i32> %res +} + +define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind { +; CHECK-LABEL: umulo_v4i1: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v2.4h, #1 +; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: umull v0.4s, v0.4h, v1.4h +; CHECK-NEXT: xtn v1.4h, v0.4s +; CHECK-NEXT: umov w9, v1.h[1] +; CHECK-NEXT: umov w8, v1.h[0] +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: bfi w8, w9, #1, #1 +; CHECK-NEXT: umov w9, v1.h[2] +; CHECK-NEXT: ushr v0.4h, v1.4h, #1 +; CHECK-NEXT: and w9, w9, #0x1 +; CHECK-NEXT: cmeq v0.4h, v0.4h, #0 +; CHECK-NEXT: bfi w8, w9, #2, #1 +; CHECK-NEXT: umov w9, v1.h[3] +; CHECK-NEXT: mvn v0.8b, v0.8b +; CHECK-NEXT: bfi w8, w9, #3, #29 +; CHECK-NEXT: sshll v0.4s, v0.4h, #0 +; CHECK-NEXT: and w8, w8, #0xf +; CHECK-NEXT: strb w8, [x0] +; CHECK-NEXT: ret + %t = call {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1) + %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i1> %val, <4 x i1>* %p2 + ret <4 x i32> %res +} + +define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind { +; CHECK-LABEL: umulo_v2i128: +; CHECK: // %bb.0: +; CHECK-NEXT: mul x9, x7, x2 +; CHECK-NEXT: umulh x10, x2, x6 +; CHECK-NEXT: madd x9, x3, x6, x9 +; CHECK-NEXT: add x15, x10, x9 +; CHECK-NEXT: cmp x15, x10 +; CHECK-NEXT: cset w10, lo +; CHECK-NEXT: cmp x9, #0 // =0 +; CHECK-NEXT: csel w10, wzr, w10, eq +; CHECK-NEXT: cmp x7, #0 // =0 +; CHECK-NEXT: umulh x11, x3, x6 +; CHECK-NEXT: mul x13, x5, x0 +; CHECK-NEXT: cset w17, ne +; CHECK-NEXT: cmp x3, #0 // =0 +; CHECK-NEXT: umulh x12, x7, x2 +; CHECK-NEXT: umulh x9, x0, x4 +; CHECK-NEXT: madd x13, x1, x4, x13 +; CHECK-NEXT: cset w18, ne +; CHECK-NEXT: cmp xzr, x11 +; CHECK-NEXT: ldr x8, [sp] +; CHECK-NEXT: add x11, x9, x13 +; CHECK-NEXT: and w17, w18, w17 +; CHECK-NEXT: cset w18, ne +; CHECK-NEXT: cmp xzr, x12 +; CHECK-NEXT: orr w12, w17, w18 +; CHECK-NEXT: cset w17, ne +; CHECK-NEXT: cmp x11, x9 +; CHECK-NEXT: orr w9, w12, w17 +; CHECK-NEXT: cset w12, lo +; CHECK-NEXT: cmp x13, #0 // =0 +; CHECK-NEXT: mul x14, x2, x6 +; CHECK-NEXT: csel w12, wzr, w12, eq +; CHECK-NEXT: cmp x5, #0 // =0 +; CHECK-NEXT: stp x14, x15, [x8, #16] +; CHECK-NEXT: umulh x14, x1, x4 +; CHECK-NEXT: cset w13, ne +; CHECK-NEXT: cmp x1, #0 // =0 +; CHECK-NEXT: umulh x16, x5, x0 +; CHECK-NEXT: cset w17, ne +; CHECK-NEXT: cmp xzr, x14 +; CHECK-NEXT: and w13, w17, w13 +; CHECK-NEXT: cset w14, ne +; CHECK-NEXT: cmp xzr, x16 +; CHECK-NEXT: orr w13, w13, w14 +; CHECK-NEXT: cset w14, ne +; CHECK-NEXT: orr w13, w13, w14 +; CHECK-NEXT: orr w12, w13, w12 +; CHECK-NEXT: orr w9, w9, w10 +; CHECK-NEXT: fmov s0, w12 +; CHECK-NEXT: mov v0.s[1], w9 +; CHECK-NEXT: shl v0.2s, v0.2s, #31 +; CHECK-NEXT: mul x15, x0, x4 +; CHECK-NEXT: sshr v0.2s, v0.2s, #31 +; CHECK-NEXT: stp x15, x11, [x8] +; CHECK-NEXT: ret + %t = call {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1) + %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i128> %val, <2 x i128>* %p2 + ret <2 x i32> %res +} Index: llvm/trunk/test/CodeGen/X86/vec_smulo.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_smulo.ll +++ llvm/trunk/test/CodeGen/X86/vec_smulo.ll @@ -0,0 +1,2770 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512 + +declare {<1 x i32>, <1 x i1>} @llvm.smul.with.overflow.v1i32(<1 x i32>, <1 x i32>) +declare {<2 x i32>, <2 x i1>} @llvm.smul.with.overflow.v2i32(<2 x i32>, <2 x i32>) +declare {<3 x i32>, <3 x i1>} @llvm.smul.with.overflow.v3i32(<3 x i32>, <3 x i32>) +declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32>, <4 x i32>) +declare {<6 x i32>, <6 x i1>} @llvm.smul.with.overflow.v6i32(<6 x i32>, <6 x i32>) +declare {<8 x i32>, <8 x i1>} @llvm.smul.with.overflow.v8i32(<8 x i32>, <8 x i32>) +declare {<16 x i32>, <16 x i1>} @llvm.smul.with.overflow.v16i32(<16 x i32>, <16 x i32>) + +declare {<16 x i8>, <16 x i1>} @llvm.smul.with.overflow.v16i8(<16 x i8>, <16 x i8>) +declare {<8 x i16>, <8 x i1>} @llvm.smul.with.overflow.v8i16(<8 x i16>, <8 x i16>) +declare {<2 x i64>, <2 x i1>} @llvm.smul.with.overflow.v2i64(<2 x i64>, <2 x i64>) + +declare {<4 x i24>, <4 x i1>} @llvm.smul.with.overflow.v4i24(<4 x i24>, <4 x i24>) +declare {<4 x i1>, <4 x i1>} @llvm.smul.with.overflow.v4i1(<4 x i1>, <4 x i1>) +declare {<2 x i128>, <2 x i1>} @llvm.smul.with.overflow.v2i128(<2 x i128>, <2 x i128>) + +define <1 x i32> @smulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind { +; SSE-LABEL: smulo_v1i32: +; SSE: # %bb.0: +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: imull %esi, %edi +; SSE-NEXT: seto %al +; SSE-NEXT: negl %eax +; SSE-NEXT: movl %edi, (%rdx) +; SSE-NEXT: retq +; +; AVX-LABEL: smulo_v1i32: +; AVX: # %bb.0: +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: imull %esi, %edi +; AVX-NEXT: seto %al +; AVX-NEXT: negl %eax +; AVX-NEXT: movl %edi, (%rdx) +; AVX-NEXT: retq + %t = call {<1 x i32>, <1 x i1>} @llvm.smul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1) + %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0 + %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1 + %res = sext <1 x i1> %obit to <1 x i32> + store <1 x i32> %val, <1 x i32>* %p2 + ret <1 x i32> %res +} + +define <2 x i32> @smulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v2i32: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %r8 +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: movq %xmm2, %rdx +; SSE2-NEXT: movq %xmm1, %rsi +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: imulq %rdx, %rsi +; SSE2-NEXT: seto %al +; SSE2-NEXT: movq %rsi, %xmm1 +; SSE2-NEXT: imulq %r8, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: pcmpeqd %xmm1, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movq %rax, %xmm0 +; SSE2-NEXT: seto %al +; SSE2-NEXT: movzbl %al, %eax +; SSE2-NEXT: movq %rax, %xmm3 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: movq %xmm1, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v2i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: psllq $32, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %r8 +; SSSE3-NEXT: psllq $32, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: movq %xmm2, %rdx +; SSSE3-NEXT: movq %xmm1, %rsi +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: imulq %rdx, %rsi +; SSSE3-NEXT: seto %al +; SSSE3-NEXT: movq %rsi, %xmm1 +; SSSE3-NEXT: imulq %r8, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: psllq $32, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: movq %rax, %xmm0 +; SSSE3-NEXT: seto %al +; SSSE3-NEXT: movzbl %al, %eax +; SSSE3-NEXT: movq %rax, %xmm3 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: movq %xmm1, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE41-NEXT: movq %xmm2, %r8 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; SSE41-NEXT: movq %xmm1, %rcx +; SSE41-NEXT: pextrq $1, %xmm2, %rdx +; SSE41-NEXT: pextrq $1, %xmm1, %rsi +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: imulq %rdx, %rsi +; SSE41-NEXT: seto %al +; SSE41-NEXT: movq %rsi, %xmm0 +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: imulq %r8, %rcx +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllq $32, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: pxor %xmm0, %xmm2 +; SSE41-NEXT: movq %rax, %xmm3 +; SSE41-NEXT: seto %dl +; SSE41-NEXT: movq %rdx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSE41-NEXT: por %xmm2, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE41-NEXT: movq %xmm1, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllq $32, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vmovq %xmm1, %r8 +; AVX1-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vmovq %xmm0, %rcx +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: vpextrq $1, %xmm0, %rsi +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: imulq %rdx, %rsi +; AVX1-NEXT: seto %al +; AVX1-NEXT: vmovq %rsi, %xmm0 +; AVX1-NEXT: xorl %edx, %edx +; AVX1-NEXT: imulq %r8, %rcx +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; AVX1-NEXT: vpsllq $32, %xmm1, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; AVX1-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: seto %dl +; AVX1-NEXT: vmovq %rdx, %xmm3 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX1-NEXT: vmovq %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllq $32, %xmm1, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX2-NEXT: vmovq %xmm1, %r8 +; AVX2-NEXT: vpsllq $32, %xmm0, %xmm2 +; AVX2-NEXT: vpsrad $31, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vmovq %xmm0, %rcx +; AVX2-NEXT: vpextrq $1, %xmm1, %rdx +; AVX2-NEXT: vpextrq $1, %xmm0, %rsi +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: imulq %rdx, %rsi +; AVX2-NEXT: seto %al +; AVX2-NEXT: vmovq %rsi, %xmm0 +; AVX2-NEXT: xorl %edx, %edx +; AVX2-NEXT: imulq %r8, %rcx +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; AVX2-NEXT: vpsllq $32, %xmm1, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: seto %dl +; AVX2-NEXT: vmovq %rdx, %xmm3 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] +; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX2-NEXT: vmovq %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX512-NEXT: vpsraq $32, %xmm1, %xmm1 +; AVX512-NEXT: vmovq %xmm1, %rax +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $32, %xmm0, %xmm0 +; AVX512-NEXT: vmovq %xmm0, %rcx +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: vpextrq $1, %xmm0, %rsi +; AVX512-NEXT: imulq %rdx, %rsi +; AVX512-NEXT: seto %dl +; AVX512-NEXT: vmovq %rsi, %xmm0 +; AVX512-NEXT: imulq %rax, %rcx +; AVX512-NEXT: vmovq %rcx, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vpsllq $32, %xmm0, %xmm1 +; AVX512-NEXT: vpsraq $32, %xmm1, %xmm1 +; AVX512-NEXT: vpcmpneqq %xmm0, %xmm1, %k0 +; AVX512-NEXT: kmovd %edx, %k1 +; AVX512-NEXT: kshiftlw $1, %k1, %k1 +; AVX512-NEXT: seto %al +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: kmovw %eax, %k2 +; AVX512-NEXT: korw %k1, %k2, %k1 +; AVX512-NEXT: korw %k1, %k0, %k1 +; AVX512-NEXT: vpmovqd %xmm0, (%rdi) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %t = call {<2 x i32>, <2 x i1>} @llvm.smul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1) + %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i32> %val, <2 x i32>* %p2 + ret <2 x i32> %res +} + +define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v3i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: paddd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: psubd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movq %xmm0, (%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: movd %xmm1, 8(%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v3i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 +; SSSE3-NEXT: pand %xmm0, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 +; SSSE3-NEXT: paddd %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSSE3-NEXT: psubd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movq %xmm0, (%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSSE3-NEXT: movd %xmm1, 8(%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v3i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pmulld %xmm1, %xmm0 +; SSE41-NEXT: pextrd $2, %xmm0, 8(%rdi) +; SSE41-NEXT: movq %xmm0, (%rdi) +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v3i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $2, %xmm1, 8(%rdi) +; AVX1-NEXT: vmovq %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v3i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpextrd $2, %xmm1, 8(%rdi) +; AVX2-NEXT: vmovq %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v3i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX512-NEXT: vpcmpneqd %xmm0, %xmm2, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vpextrd $2, %xmm1, 8(%rdi) +; AVX512-NEXT: vmovq %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<3 x i32>, <3 x i1>} @llvm.smul.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1) + %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0 + %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1 + %res = sext <3 x i1> %obit to <3 x i32> + store <3 x i32> %val, <3 x i32>* %p2 + ret <3 x i32> %res +} + +define <4 x i32> @smulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v4i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: paddd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: psubd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v4i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 +; SSSE3-NEXT: pand %xmm0, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 +; SSSE3-NEXT: paddd %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSSE3-NEXT: psubd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v4i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pmulld %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v4i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX512-NEXT: vpcmpneqd %xmm0, %xmm2, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1) + %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i32> %val, <4 x i32>* %p2 + ret <4 x i32> %res +} + +define <6 x i32> @smulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v6i32: +; SSE2: # %bb.0: +; SSE2-NEXT: movq %rdi, %rax +; SSE2-NEXT: movd %r8d, %xmm9 +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1] +; SSE2-NEXT: movd %edx, %xmm6 +; SSE2-NEXT: movd %esi, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0] +; SSE2-NEXT: movd {{.*#+}} xmm10 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1] +; SSE2-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE2-NEXT: movd %r9d, %xmm12 +; SSE2-NEXT: movd {{.*#+}} xmm11 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1] +; SSE2-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm5, %xmm0 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm5, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: pmuludq %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm10[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm9[0,0] +; SSE2-NEXT: pmuludq %xmm7, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] +; SSE2-NEXT: psubd %xmm1, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] +; SSE2-NEXT: movdqa %xmm7, (%rcx) +; SSE2-NEXT: psrad $31, %xmm7 +; SSE2-NEXT: pcmpeqd %xmm5, %xmm7 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: pxor %xmm0, %xmm7 +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm1 +; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pcmpgtd %xmm12, %xmm4 +; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: paddd %xmm1, %xmm4 +; SSE2-NEXT: pmuludq %xmm12, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] +; SSE2-NEXT: pmuludq %xmm8, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-NEXT: psubd %xmm4, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: movq %xmm2, 16(%rcx) +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movq %xmm2, 16(%rdi) +; SSE2-NEXT: movdqa %xmm7, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v6i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movq %rdi, %rax +; SSSE3-NEXT: movd %r8d, %xmm9 +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1] +; SSSE3-NEXT: movd %edx, %xmm6 +; SSSE3-NEXT: movd %esi, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0] +; SSSE3-NEXT: movd {{.*#+}} xmm10 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1] +; SSSE3-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSSE3-NEXT: movd %r9d, %xmm12 +; SSSE3-NEXT: movd {{.*#+}} xmm11 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1] +; SSSE3-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSSE3-NEXT: pcmpgtd %xmm3, %xmm0 +; SSSE3-NEXT: pand %xmm5, %xmm0 +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: pcmpgtd %xmm5, %xmm1 +; SSSE3-NEXT: pand %xmm3, %xmm1 +; SSSE3-NEXT: paddd %xmm0, %xmm1 +; SSSE3-NEXT: pmuludq %xmm5, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSSE3-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm10[0,0] +; SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm9[0,0] +; SSSE3-NEXT: pmuludq %xmm7, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] +; SSSE3-NEXT: psubd %xmm1, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] +; SSSE3-NEXT: movdqa %xmm7, (%rcx) +; SSSE3-NEXT: psrad $31, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: pxor %xmm0, %xmm7 +; SSSE3-NEXT: pxor %xmm1, %xmm1 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm1 +; SSSE3-NEXT: pand %xmm12, %xmm1 +; SSSE3-NEXT: pcmpgtd %xmm12, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 +; SSSE3-NEXT: paddd %xmm1, %xmm4 +; SSSE3-NEXT: pmuludq %xmm12, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] +; SSSE3-NEXT: pmuludq %xmm8, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm11[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSSE3-NEXT: psubd %xmm4, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: movq %xmm2, 16(%rcx) +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm2 +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: movq %xmm2, 16(%rdi) +; SSSE3-NEXT: movdqa %xmm7, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v6i32: +; SSE41: # %bb.0: +; SSE41-NEXT: movq %rdi, %rax +; SSE41-NEXT: movd %esi, %xmm3 +; SSE41-NEXT: pinsrd $1, %edx, %xmm3 +; SSE41-NEXT: pinsrd $2, %ecx, %xmm3 +; SSE41-NEXT: pinsrd $3, %r8d, %xmm3 +; SSE41-NEXT: movd %r9d, %xmm0 +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm0 +; SSE41-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm1 +; SSE41-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm2 +; SSE41-NEXT: pinsrd $2, {{[0-9]+}}(%rsp), %xmm2 +; SSE41-NEXT: pinsrd $3, {{[0-9]+}}(%rsp), %xmm2 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pmuldq %xmm2, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, (%rcx) +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pmuldq %xmm1, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm0, %xmm1 +; SSE41-NEXT: movq %xmm1, 16(%rcx) +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE41-NEXT: pxor %xmm3, %xmm1 +; SSE41-NEXT: movq %xmm1, 16(%rdi) +; SSE41-NEXT: movdqa %xmm2, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v6i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm4 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm5, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vmovq %xmm2, 16(%rdi) +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v6i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuldq %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vmovq %xmm2, 16(%rdi) +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v6i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpmuldq %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpmuldq %ymm1, %ymm0, %ymm3 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX512-NEXT: vpsrad $31, %ymm1, %ymm0 +; AVX512-NEXT: vpcmpneqd %ymm0, %ymm2, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vmovq %xmm2, 16(%rdi) +; AVX512-NEXT: vmovdqa %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<6 x i32>, <6 x i1>} @llvm.smul.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1) + %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0 + %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1 + %res = sext <6 x i1> %obit to <6 x i32> + store <6 x i32> %val, <6 x i32>* %p2 + ret <6 x i32> %res +} + +define <8 x i32> @smulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v8i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 +; SSE2-NEXT: pand %xmm0, %xmm5 +; SSE2-NEXT: pxor %xmm6, %xmm6 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm2, %xmm6 +; SSE2-NEXT: paddd %xmm5, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm7, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] +; SSE2-NEXT: psubd %xmm6, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm5, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm5 +; SSE2-NEXT: pand %xmm1, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: paddd %xmm5, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] +; SSE2-NEXT: psubd %xmm4, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-NEXT: movdqa %xmm1, 16(%rdi) +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm6, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v8i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5 +; SSSE3-NEXT: pand %xmm0, %xmm5 +; SSSE3-NEXT: pxor %xmm6, %xmm6 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm6 +; SSSE3-NEXT: pand %xmm2, %xmm6 +; SSSE3-NEXT: paddd %xmm5, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm2, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm7, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] +; SSSE3-NEXT: psubd %xmm6, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSSE3-NEXT: pcmpgtd %xmm3, %xmm5 +; SSSE3-NEXT: pand %xmm1, %xmm5 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 +; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: paddd %xmm5, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm5, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] +; SSSE3-NEXT: psubd %xmm4, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm6, %xmm1 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v8i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pmuldq %xmm2, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: pxor %xmm2, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: pmuldq %xmm3, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, 16(%rdi) +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm1 +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v8i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsrad $31, %xmm2, %xmm4 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm2, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuldq %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpackssdw %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa %ymm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpmuldq %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpmuldq %ymm1, %ymm0, %ymm3 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX512-NEXT: vpsrad $31, %ymm1, %ymm0 +; AVX512-NEXT: vpcmpneqd %ymm0, %ymm2, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %ymm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<8 x i32>, <8 x i1>} @llvm.smul.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1) + %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i32> %val, <8 x i32>* %p2 + ret <8 x i32> %res +} + +define <16 x i32> @smulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind { +; SSE2-LABEL: smulo_v16i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pxor %xmm8, %xmm8 +; SSE2-NEXT: pxor %xmm9, %xmm9 +; SSE2-NEXT: pcmpgtd %xmm4, %xmm9 +; SSE2-NEXT: pand %xmm0, %xmm9 +; SSE2-NEXT: pxor %xmm10, %xmm10 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm10 +; SSE2-NEXT: pand %xmm4, %xmm10 +; SSE2-NEXT: paddd %xmm9, %xmm10 +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm4[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm11, %xmm12 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1] +; SSE2-NEXT: psubd %xmm10, %xmm9 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm9, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm9, %xmm9 +; SSE2-NEXT: pxor %xmm9, %xmm0 +; SSE2-NEXT: pxor %xmm10, %xmm10 +; SSE2-NEXT: pcmpgtd %xmm5, %xmm10 +; SSE2-NEXT: pand %xmm1, %xmm10 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: paddd %xmm10, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm5, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm10, %xmm12 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1] +; SSE2-NEXT: psubd %xmm4, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-NEXT: movdqa %xmm1, 16(%rdi) +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm11, %xmm1 +; SSE2-NEXT: pxor %xmm9, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 +; SSE2-NEXT: pand %xmm6, %xmm5 +; SSE2-NEXT: paddd %xmm4, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm2[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm6, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm10, %xmm11 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm11[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; SSE2-NEXT: psubd %xmm5, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm11[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSE2-NEXT: movdqa %xmm2, 32(%rdi) +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm2 +; SSE2-NEXT: pxor %xmm9, %xmm2 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm7, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: pcmpgtd %xmm3, %xmm8 +; SSE2-NEXT: pand %xmm7, %xmm8 +; SSE2-NEXT: paddd %xmm4, %xmm8 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm7, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSE2-NEXT: psubd %xmm8, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: movdqa %xmm3, 48(%rdi) +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm5, %xmm3 +; SSE2-NEXT: pxor %xmm9, %xmm3 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v16i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pxor %xmm8, %xmm8 +; SSSE3-NEXT: pxor %xmm9, %xmm9 +; SSSE3-NEXT: pcmpgtd %xmm4, %xmm9 +; SSSE3-NEXT: pand %xmm0, %xmm9 +; SSSE3-NEXT: pxor %xmm10, %xmm10 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm10 +; SSSE3-NEXT: pand %xmm4, %xmm10 +; SSSE3-NEXT: paddd %xmm9, %xmm10 +; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm4, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm4[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm11, %xmm12 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1] +; SSSE3-NEXT: psubd %xmm10, %xmm9 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm9, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm9, %xmm9 +; SSSE3-NEXT: pxor %xmm9, %xmm0 +; SSSE3-NEXT: pxor %xmm10, %xmm10 +; SSSE3-NEXT: pcmpgtd %xmm5, %xmm10 +; SSSE3-NEXT: pand %xmm1, %xmm10 +; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm4 +; SSSE3-NEXT: pand %xmm5, %xmm4 +; SSSE3-NEXT: paddd %xmm10, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm5, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm1[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm10, %xmm12 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1] +; SSSE3-NEXT: psubd %xmm4, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm11, %xmm1 +; SSSE3-NEXT: pxor %xmm9, %xmm1 +; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 +; SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSSE3-NEXT: pcmpgtd %xmm2, %xmm5 +; SSSE3-NEXT: pand %xmm6, %xmm5 +; SSSE3-NEXT: paddd %xmm4, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm2[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm6, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm10, %xmm11 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm11[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; SSSE3-NEXT: psubd %xmm5, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm11[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSSE3-NEXT: movdqa %xmm2, 32(%rdi) +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 +; SSSE3-NEXT: pxor %xmm9, %xmm2 +; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm7, %xmm4 +; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: pcmpgtd %xmm3, %xmm8 +; SSSE3-NEXT: pand %xmm7, %xmm8 +; SSSE3-NEXT: paddd %xmm4, %xmm8 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm7, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm7[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm4, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; SSSE3-NEXT: psubd %xmm8, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm6[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSSE3-NEXT: movdqa %xmm3, 48(%rdi) +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3 +; SSSE3-NEXT: pxor %xmm9, %xmm3 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v16i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm8, %xmm9 +; SSE41-NEXT: movdqa %xmm0, %xmm8 +; SSE41-NEXT: pmuldq %xmm4, %xmm8 +; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7] +; SSE41-NEXT: pmulld %xmm4, %xmm0 +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm8, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm8, %xmm8 +; SSE41-NEXT: pxor %xmm8, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm9, %xmm10 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: pmuldq %xmm5, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm10[2,3],xmm4[4,5],xmm10[6,7] +; SSE41-NEXT: pmulld %xmm5, %xmm1 +; SSE41-NEXT: movdqa %xmm1, 16(%rdi) +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE41-NEXT: pxor %xmm8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: pmuldq %xmm6, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm6, %xmm2 +; SSE41-NEXT: movdqa %xmm2, 32(%rdi) +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm2 +; SSE41-NEXT: pxor %xmm8, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pmuldq %xmm7, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pmulld %xmm7, %xmm3 +; SSE41-NEXT: movdqa %xmm3, 48(%rdi) +; SSE41-NEXT: psrad $31, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm4, %xmm3 +; SSE41-NEXT: pxor %xmm8, %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v16i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpmuldq %xmm4, %xmm6, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpmulld %xmm4, %xmm6, %xmm9 +; AVX1-NEXT: vpsrad $31, %xmm9, %xmm6 +; AVX1-NEXT: vpcmpeqd %xmm6, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm8, %xmm8 +; AVX1-NEXT: vpxor %xmm8, %xmm6, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm3[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmuldq %xmm3, %xmm1, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpxor %xmm8, %xmm1, %xmm1 +; AVX1-NEXT: vpackssdw %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm7[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpmuldq %xmm5, %xmm7, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3],xmm6[4,5],xmm4[6,7] +; AVX1-NEXT: vpmulld %xmm5, %xmm7, %xmm5 +; AVX1-NEXT: vpsrad $31, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpeqd %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm6, %xmm7, %xmm6 +; AVX1-NEXT: vpmuldq %xmm2, %xmm0, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7] +; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vpxor %xmm8, %xmm2, %xmm2 +; AVX1-NEXT: vpackssdw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm4 +; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm3, %ymm3 +; AVX1-NEXT: vpmovsxwd %xmm2, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovaps %ymm3, 32(%rdi) +; AVX1-NEXT: vmovaps %ymm4, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuldq %ymm4, %ymm5, %ymm4 +; AVX2-NEXT: vpmuldq %ymm3, %ymm1, %ymm5 +; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7] +; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm3 +; AVX2-NEXT: vpsrad $31, %ymm3, %ymm1 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 +; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5 +; AVX2-NEXT: vpackssdw %xmm5, %xmm1, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm2[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuldq %ymm5, %ymm6, %ymm5 +; AVX2-NEXT: vpmuldq %ymm2, %ymm0, %ymm6 +; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7] +; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpsrad $31, %ymm2, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm0 +; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 +; AVX2-NEXT: vpackssdw %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX2-NEXT: vmovdqa %ymm3, 32(%rdi) +; AVX2-NEXT: vmovdqa %ymm2, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vpshufd {{.*#+}} zmm3 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; AVX512-NEXT: vpshufd {{.*#+}} zmm4 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; AVX512-NEXT: vpmuldq %zmm3, %zmm4, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [1,17,3,19,5,21,7,23,9,25,11,27,13,29,15,31] +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1 +; AVX512-NEXT: vpsrad $31, %zmm1, %zmm0 +; AVX512-NEXT: vpcmpneqd %zmm0, %zmm4, %k1 +; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<16 x i32>, <16 x i1>} @llvm.smul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1) + %val = extractvalue {<16 x i32>, <16 x i1>} %t, 0 + %obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1 + %res = sext <16 x i1> %obit to <16 x i32> + store <16 x i32> %val, <16 x i32>* %p2 + ret <16 x i32> %res +} + +define <16 x i32> @smulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind { +; SSE2-LABEL: smulo_v16i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-NEXT: pmullw %xmm2, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm4 +; SSE2-NEXT: packuswb %xmm0, %xmm4 +; SSE2-NEXT: pxor %xmm0, %xmm0 +; SSE2-NEXT: pcmpgtb %xmm4, %xmm0 +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: psraw $8, %xmm6 +; SSE2-NEXT: pmullw %xmm3, %xmm6 +; SSE2-NEXT: psrlw $8, %xmm6 +; SSE2-NEXT: psraw $8, %xmm5 +; SSE2-NEXT: psraw $8, %xmm7 +; SSE2-NEXT: pmullw %xmm5, %xmm7 +; SSE2-NEXT: psrlw $8, %xmm7 +; SSE2-NEXT: packuswb %xmm6, %xmm7 +; SSE2-NEXT: pcmpeqb %xmm0, %xmm7 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm7, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-NEXT: pslld $31, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm3 +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: movdqa %xmm4, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v16i8: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7] +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSSE3-NEXT: movdqa %xmm0, %xmm4 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSSE3-NEXT: pmullw %xmm1, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSSE3-NEXT: pmullw %xmm2, %xmm4 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: pand %xmm1, %xmm4 +; SSSE3-NEXT: packuswb %xmm0, %xmm4 +; SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSSE3-NEXT: pcmpgtb %xmm4, %xmm0 +; SSSE3-NEXT: psraw $8, %xmm3 +; SSSE3-NEXT: psraw $8, %xmm6 +; SSSE3-NEXT: pmullw %xmm3, %xmm6 +; SSSE3-NEXT: psrlw $8, %xmm6 +; SSSE3-NEXT: psraw $8, %xmm5 +; SSSE3-NEXT: psraw $8, %xmm7 +; SSSE3-NEXT: pmullw %xmm5, %xmm7 +; SSSE3-NEXT: psrlw $8, %xmm7 +; SSSE3-NEXT: packuswb %xmm6, %xmm7 +; SSSE3-NEXT: pcmpeqb %xmm0, %xmm7 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm7, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSSE3-NEXT: pslld $31, %xmm2 +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm3 +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: movdqa %xmm4, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v16i8: +; SSE41: # %bb.0: +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: pmovsxbw %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] +; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: pmovsxbw %xmm0, %xmm6 +; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,0,1] +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE41-NEXT: pmullw %xmm1, %xmm0 +; SSE41-NEXT: pmullw %xmm2, %xmm4 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm1, %xmm0 +; SSE41-NEXT: pand %xmm1, %xmm4 +; SSE41-NEXT: packuswb %xmm0, %xmm4 +; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pcmpgtb %xmm4, %xmm0 +; SSE41-NEXT: pmullw %xmm3, %xmm6 +; SSE41-NEXT: psrlw $8, %xmm6 +; SSE41-NEXT: pmovsxbw %xmm5, %xmm1 +; SSE41-NEXT: pmovsxbw %xmm7, %xmm2 +; SSE41-NEXT: pmullw %xmm1, %xmm2 +; SSE41-NEXT: psrlw $8, %xmm2 +; SSE41-NEXT: packuswb %xmm2, %xmm6 +; SSE41-NEXT: pcmpeqb %xmm0, %xmm6 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm6, %xmm3 +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm3 +; SSE41-NEXT: psrad $31, %xmm3 +; SSE41-NEXT: movdqa %xmm4, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v16i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm3 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm4 +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm5 +; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0 +; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; AVX1-NEXT: vmovdqa %xmm2, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm2 +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpgtb %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa %xmm3, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v16i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpmovwb %ymm2, %xmm2 +; AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vpcmpgtb %xmm2, %xmm3, %xmm3 +; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0 +; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX512-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512-NEXT: vpcmpneqb %xmm3, %xmm0, %k1 +; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %xmm2, (%rdi) +; AVX512-NEXT: retq + %t = call {<16 x i8>, <16 x i1>} @llvm.smul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1) + %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0 + %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1 + %res = sext <16 x i1> %obit to <16 x i32> + store <16 x i8> %val, <16 x i8>* %p2 + ret <16 x i32> %res +} + +define <8 x i32> @smulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind { +; SSE2-LABEL: smulo_v8i16: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pmulhw %xmm1, %xmm2 +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: psraw $15, %xmm0 +; SSE2-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v8i16: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pmulhw %xmm1, %xmm2 +; SSSE3-NEXT: pmullw %xmm1, %xmm0 +; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: psraw $15, %xmm0 +; SSSE3-NEXT: pcmpeqw %xmm2, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm0, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v8i16: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmulhw %xmm1, %xmm2 +; SSE41-NEXT: pmullw %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: psraw $15, %xmm0 +; SSE41-NEXT: pcmpeqw %xmm2, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE41-NEXT: pxor %xmm0, %xmm1 +; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE41-NEXT: pslld $31, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v8i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmulhw %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsraw $15, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqw %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhw %xmm1, %xmm0, %xmm2 +; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsraw $15, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqw %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v8i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmulhw %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpsraw $15, %xmm1, %xmm0 +; AVX512-NEXT: vpcmpneqw %xmm0, %xmm2, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<8 x i16>, <8 x i1>} @llvm.smul.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1) + %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i16> %val, <8 x i16>* %p2 + ret <8 x i32> %res +} + +define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind { +; SSE2-LABEL: smulo_v2i64: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %r8 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rcx +; SSE2-NEXT: movq %xmm1, %rdx +; SSE2-NEXT: movq %xmm0, %rsi +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: imulq %rdx, %rsi +; SSE2-NEXT: seto %al +; SSE2-NEXT: movq %rax, %xmm0 +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: imulq %r8, %rcx +; SSE2-NEXT: seto %al +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: movq %rsi, %xmm1 +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: movdqa %xmm1, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v2i64: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %r8 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rcx +; SSSE3-NEXT: movq %xmm1, %rdx +; SSSE3-NEXT: movq %xmm0, %rsi +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: imulq %rdx, %rsi +; SSSE3-NEXT: seto %al +; SSSE3-NEXT: movq %rax, %xmm0 +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: imulq %r8, %rcx +; SSSE3-NEXT: seto %al +; SSSE3-NEXT: movq %rax, %xmm1 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: movq %rsi, %xmm1 +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSSE3-NEXT: movdqa %xmm1, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v2i64: +; SSE41: # %bb.0: +; SSE41-NEXT: movq %xmm1, %r8 +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: pextrq $1, %xmm1, %rdx +; SSE41-NEXT: pextrq $1, %xmm0, %rsi +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: imulq %rdx, %rsi +; SSE41-NEXT: seto %al +; SSE41-NEXT: movq %rax, %xmm1 +; SSE41-NEXT: xorl %eax, %eax +; SSE41-NEXT: imulq %r8, %rcx +; SSE41-NEXT: seto %al +; SSE41-NEXT: movq %rax, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT: movq %rsi, %xmm1 +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE41-NEXT: movdqa %xmm2, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq %xmm1, %r8 +; AVX1-NEXT: vmovq %xmm0, %rcx +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: vpextrq $1, %xmm0, %rsi +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: imulq %rdx, %rsi +; AVX1-NEXT: seto %al +; AVX1-NEXT: vmovq %rax, %xmm0 +; AVX1-NEXT: xorl %eax, %eax +; AVX1-NEXT: imulq %r8, %rcx +; AVX1-NEXT: seto %al +; AVX1-NEXT: vmovq %rax, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq %rsi, %xmm1 +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq %xmm1, %r8 +; AVX2-NEXT: vmovq %xmm0, %rcx +; AVX2-NEXT: vpextrq $1, %xmm1, %rdx +; AVX2-NEXT: vpextrq $1, %xmm0, %rsi +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: imulq %rdx, %rsi +; AVX2-NEXT: seto %al +; AVX2-NEXT: vmovq %rax, %xmm0 +; AVX2-NEXT: xorl %eax, %eax +; AVX2-NEXT: imulq %r8, %rcx +; AVX2-NEXT: seto %al +; AVX2-NEXT: vmovq %rax, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq %rsi, %xmm1 +; AVX2-NEXT: vmovq %rcx, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq %xmm1, %rax +; AVX512-NEXT: vmovq %xmm0, %rcx +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: vpextrq $1, %xmm0, %rsi +; AVX512-NEXT: imulq %rdx, %rsi +; AVX512-NEXT: seto %dl +; AVX512-NEXT: kmovd %edx, %k0 +; AVX512-NEXT: kshiftlw $1, %k0, %k0 +; AVX512-NEXT: imulq %rax, %rcx +; AVX512-NEXT: seto %al +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: kmovw %eax, %k1 +; AVX512-NEXT: korw %k0, %k1, %k1 +; AVX512-NEXT: vmovq %rsi, %xmm0 +; AVX512-NEXT: vmovq %rcx, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vmovdqa %xmm0, (%rdi) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %t = call {<2 x i64>, <2 x i1>} @llvm.smul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1) + %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i64> %val, <2 x i64>* %p2 + ret <2 x i32> %res +} + +define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { +; SSE2-LABEL: smulo_v4i24: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $8, %xmm0 +; SSE2-NEXT: psrad $8, %xmm0 +; SSE2-NEXT: pslld $8, %xmm1 +; SSE2-NEXT: psrad $8, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: psubd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: pslld $8, %xmm1 +; SSE2-NEXT: psrad $8, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,3] +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm2, %xmm3 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: por %xmm3, %xmm1 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movw %ax, (%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movd %xmm4, %eax +; SSE2-NEXT: movw %ax, 9(%rdi) +; SSE2-NEXT: movd %xmm5, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm6, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 11(%rdi) +; SSE2-NEXT: shrl $16, %ecx +; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v4i24: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pslld $8, %xmm0 +; SSSE3-NEXT: psrad $8, %xmm0 +; SSSE3-NEXT: pslld $8, %xmm1 +; SSSE3-NEXT: psrad $8, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 +; SSSE3-NEXT: pand %xmm0, %xmm2 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 +; SSSE3-NEXT: pand %xmm1, %xmm3 +; SSSE3-NEXT: paddd %xmm2, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm4, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: psubd %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: pslld $8, %xmm1 +; SSSE3-NEXT: psrad $8, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,1,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,3] +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm2, %xmm3 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: por %xmm3, %xmm1 +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: movw %ax, (%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 2(%rdi) +; SSSE3-NEXT: movd %xmm4, %eax +; SSSE3-NEXT: movw %ax, 9(%rdi) +; SSSE3-NEXT: movd %xmm5, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm6, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 11(%rdi) +; SSSE3-NEXT: shrl $16, %ecx +; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v4i24: +; SSE41: # %bb.0: +; SSE41-NEXT: pslld $8, %xmm0 +; SSE41-NEXT: psrad $8, %xmm0 +; SSE41-NEXT: pslld $8, %xmm1 +; SSE41-NEXT: psrad $8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pmulld %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: pslld $8, %xmm3 +; SSE41-NEXT: psrad $8, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm3 +; SSE41-NEXT: pextrd $3, %xmm1, %eax +; SSE41-NEXT: pextrd $2, %xmm1, %ecx +; SSE41-NEXT: pextrd $1, %xmm1, %edx +; SSE41-NEXT: movd %xmm1, %esi +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: pxor %xmm0, %xmm1 +; SSE41-NEXT: pxor %xmm3, %xmm0 +; SSE41-NEXT: por %xmm1, %xmm0 +; SSE41-NEXT: movw %ax, 9(%rdi) +; SSE41-NEXT: movw %cx, 6(%rdi) +; SSE41-NEXT: movw %dx, 3(%rdi) +; SSE41-NEXT: movw %si, (%rdi) +; SSE41-NEXT: shrl $16, %eax +; SSE41-NEXT: movb %al, 11(%rdi) +; SSE41-NEXT: shrl $16, %ecx +; SSE41-NEXT: movb %cl, 8(%rdi) +; SSE41-NEXT: shrl $16, %edx +; SSE41-NEXT: movb %dl, 5(%rdi) +; SSE41-NEXT: shrl $16, %esi +; SSE41-NEXT: movb %sil, 2(%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v4i24: +; AVX1: # %bb.0: +; AVX1-NEXT: vpslld $8, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $8, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $8, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $8, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $8, %xmm1, %xmm3 +; AVX1-NEXT: vpsrad $8, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpextrd $3, %xmm1, %eax +; AVX1-NEXT: movw %ax, 9(%rdi) +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: movw %cx, 6(%rdi) +; AVX1-NEXT: vpextrd $1, %xmm1, %edx +; AVX1-NEXT: movw %dx, 3(%rdi) +; AVX1-NEXT: vmovd %xmm1, %esi +; AVX1-NEXT: movw %si, (%rdi) +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: movb %al, 11(%rdi) +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: movb %cl, 8(%rdi) +; AVX1-NEXT: shrl $16, %edx +; AVX1-NEXT: movb %dl, 5(%rdi) +; AVX1-NEXT: shrl $16, %esi +; AVX1-NEXT: movb %sil, 2(%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v4i24: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $8, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $8, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $8, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $8, %xmm1, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $8, %xmm1, %xmm3 +; AVX2-NEXT: vpsrad $8, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm3, %xmm3 +; AVX2-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: movw %ax, 9(%rdi) +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: movw %cx, 6(%rdi) +; AVX2-NEXT: vpextrd $1, %xmm1, %edx +; AVX2-NEXT: movw %dx, 3(%rdi) +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: movw %si, (%rdi) +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: movb %al, 11(%rdi) +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: movb %cl, 8(%rdi) +; AVX2-NEXT: shrl $16, %edx +; AVX2-NEXT: movb %dl, 5(%rdi) +; AVX2-NEXT: shrl $16, %esi +; AVX2-NEXT: movb %sil, 2(%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v4i24: +; AVX512: # %bb.0: +; AVX512-NEXT: vpslld $8, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $8, %xmm0, %xmm0 +; AVX512-NEXT: vpslld $8, %xmm1, %xmm1 +; AVX512-NEXT: vpsrad $8, %xmm1, %xmm1 +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpsrad $31, %xmm1, %xmm0 +; AVX512-NEXT: vpcmpneqd %xmm0, %xmm2, %k0 +; AVX512-NEXT: vpslld $8, %xmm1, %xmm0 +; AVX512-NEXT: vpsrad $8, %xmm0, %xmm0 +; AVX512-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 +; AVX512-NEXT: korw %k0, %k1, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vpextrd $3, %xmm1, %eax +; AVX512-NEXT: movw %ax, 9(%rdi) +; AVX512-NEXT: vpextrd $2, %xmm1, %ecx +; AVX512-NEXT: movw %cx, 6(%rdi) +; AVX512-NEXT: vpextrd $1, %xmm1, %edx +; AVX512-NEXT: movw %dx, 3(%rdi) +; AVX512-NEXT: vmovd %xmm1, %esi +; AVX512-NEXT: movw %si, (%rdi) +; AVX512-NEXT: shrl $16, %eax +; AVX512-NEXT: movb %al, 11(%rdi) +; AVX512-NEXT: shrl $16, %ecx +; AVX512-NEXT: movb %cl, 8(%rdi) +; AVX512-NEXT: shrl $16, %edx +; AVX512-NEXT: movb %dl, 5(%rdi) +; AVX512-NEXT: shrl $16, %esi +; AVX512-NEXT: movb %sil, 2(%rdi) +; AVX512-NEXT: retq + %t = call {<4 x i24>, <4 x i1>} @llvm.smul.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1) + %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i24> %val, <4 x i24>* %p2 + ret <4 x i32> %res +} + +define <4 x i32> @smulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind { +; SSE2-LABEL: smulo_v4i1: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pslld $31, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: paddd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE2-NEXT: psubd %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: pslld $31, %xmm3 +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: movmskps %xmm3, %eax +; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v4i1: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pslld $31, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm1, %xmm3 +; SSSE3-NEXT: pand %xmm0, %xmm3 +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 +; SSSE3-NEXT: paddd %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSSE3-NEXT: psubd %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: movdqa %xmm0, %xmm3 +; SSSE3-NEXT: pslld $31, %xmm3 +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm0 +; SSSE3-NEXT: pxor %xmm2, %xmm0 +; SSSE3-NEXT: por %xmm1, %xmm0 +; SSSE3-NEXT: movmskps %xmm3, %eax +; SSSE3-NEXT: movb %al, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v4i1: +; SSE41: # %bb.0: +; SSE41-NEXT: pslld $31, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuldq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuldq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pmulld %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: psrad $31, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: pxor %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: pslld $31, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm1 +; SSE41-NEXT: pxor %xmm1, %xmm0 +; SSE41-NEXT: por %xmm3, %xmm0 +; SSE41-NEXT: movmskps %xmm2, %eax +; SSE41-NEXT: movb %al, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v4i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpslld $31, %xmm0, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm0, %xmm3, %xmm0 +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovmskps %xmm3, %eax +; AVX1-NEXT: movb %al, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v4i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpslld $31, %xmm0, %xmm3 +; AVX2-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm0, %xmm3, %xmm0 +; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovmskps %xmm3, %eax +; AVX2-NEXT: movb %al, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v4i1: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX512-NEXT: vptestmd %xmm1, %xmm1, %k0 +; AVX512-NEXT: kshiftrw $3, %k0, %k1 +; AVX512-NEXT: kmovd %k1, %r9d +; AVX512-NEXT: andb $1, %r9b +; AVX512-NEXT: negb %r9b +; AVX512-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1 +; AVX512-NEXT: kshiftrw $3, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %r10d +; AVX512-NEXT: andb $1, %r10b +; AVX512-NEXT: negb %r10b +; AVX512-NEXT: kshiftrw $2, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %r11d +; AVX512-NEXT: andb $1, %r11b +; AVX512-NEXT: negb %r11b +; AVX512-NEXT: kshiftrw $2, %k0, %k2 +; AVX512-NEXT: kmovd %k2, %ebx +; AVX512-NEXT: andb $1, %bl +; AVX512-NEXT: negb %bl +; AVX512-NEXT: kshiftrw $1, %k0, %k2 +; AVX512-NEXT: kmovd %k2, %esi +; AVX512-NEXT: andb $1, %sil +; AVX512-NEXT: negb %sil +; AVX512-NEXT: kshiftrw $1, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %edx +; AVX512-NEXT: andb $1, %dl +; AVX512-NEXT: negb %dl +; AVX512-NEXT: kmovd %k1, %eax +; AVX512-NEXT: andb $1, %al +; AVX512-NEXT: negb %al +; AVX512-NEXT: kmovd %k0, %ecx +; AVX512-NEXT: andb $1, %cl +; AVX512-NEXT: negb %cl +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: imulb %cl +; AVX512-NEXT: movl %eax, %r8d +; AVX512-NEXT: seto %al +; AVX512-NEXT: movl %r8d, %ecx +; AVX512-NEXT: andb $1, %cl +; AVX512-NEXT: negb %cl +; AVX512-NEXT: cmpb %r8b, %cl +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k1 +; AVX512-NEXT: movl %edx, %eax +; AVX512-NEXT: imulb %sil +; AVX512-NEXT: movl %eax, %edx +; AVX512-NEXT: seto %al +; AVX512-NEXT: movl %edx, %ecx +; AVX512-NEXT: andb $1, %cl +; AVX512-NEXT: negb %cl +; AVX512-NEXT: cmpb %dl, %cl +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k1 +; AVX512-NEXT: movl %r11d, %eax +; AVX512-NEXT: imulb %bl +; AVX512-NEXT: movl %eax, %esi +; AVX512-NEXT: seto %al +; AVX512-NEXT: movl %esi, %ecx +; AVX512-NEXT: andb $1, %cl +; AVX512-NEXT: negb %cl +; AVX512-NEXT: cmpb %sil, %cl +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftlw $13, %k0, %k0 +; AVX512-NEXT: kshiftrw $13, %k0, %k0 +; AVX512-NEXT: movl %r10d, %eax +; AVX512-NEXT: imulb %r9b +; AVX512-NEXT: # kill: def $al killed $al def $eax +; AVX512-NEXT: seto %cl +; AVX512-NEXT: movl %eax, %ebx +; AVX512-NEXT: andb $1, %bl +; AVX512-NEXT: negb %bl +; AVX512-NEXT: cmpb %al, %bl +; AVX512-NEXT: setne %bl +; AVX512-NEXT: orb %cl, %bl +; AVX512-NEXT: kmovd %ebx, %k1 +; AVX512-NEXT: kshiftlw $3, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: kmovd %r8d, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k1 +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k1 +; AVX512-NEXT: kmovd %esi, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $3, %k0, %k1 +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $12, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, (%rdi) +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: retq + %t = call {<4 x i1>, <4 x i1>} @llvm.smul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1) + %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i1> %val, <4 x i1>* %p2 + ret <4 x i32> %res +} + +define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind { +; SSE2-LABEL: smulo_v2i128: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r15 +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %r13 +; SSE2-NEXT: pushq %r12 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $24, %rsp +; SSE2-NEXT: movq %r8, %rax +; SSE2-NEXT: movq %rcx, %r14 +; SSE2-NEXT: movq %rdx, %rbx +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; SSE2-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSE2-NEXT: movq %rax, %rdx +; SSE2-NEXT: movq %r9, %rcx +; SSE2-NEXT: callq __muloti4 +; SSE2-NEXT: movq %rax, %r13 +; SSE2-NEXT: movq %rdx, %rbp +; SSE2-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSE2-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSE2-NEXT: movq %rbx, %rdi +; SSE2-NEXT: movq %r14, %rsi +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE2-NEXT: movq %r12, %rcx +; SSE2-NEXT: callq __muloti4 +; SSE2-NEXT: xorl %ecx, %ecx +; SSE2-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSE2-NEXT: setne %cl +; SSE2-NEXT: xorl %esi, %esi +; SSE2-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSE2-NEXT: setne %sil +; SSE2-NEXT: movd %esi, %xmm0 +; SSE2-NEXT: pinsrw $4, %ecx, %xmm0 +; SSE2-NEXT: movq %rdx, 24(%r15) +; SSE2-NEXT: movq %rax, 16(%r15) +; SSE2-NEXT: movq %rbp, 8(%r15) +; SSE2-NEXT: movq %r13, (%r15) +; SSE2-NEXT: psllq $63, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-NEXT: addq $24, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r12 +; SSE2-NEXT: popq %r13 +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %r15 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; SSSE3-LABEL: smulo_v2i128: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pushq %rbp +; SSSE3-NEXT: pushq %r15 +; SSSE3-NEXT: pushq %r14 +; SSSE3-NEXT: pushq %r13 +; SSSE3-NEXT: pushq %r12 +; SSSE3-NEXT: pushq %rbx +; SSSE3-NEXT: subq $24, %rsp +; SSSE3-NEXT: movq %r8, %rax +; SSSE3-NEXT: movq %rcx, %r14 +; SSSE3-NEXT: movq %rdx, %rbx +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; SSSE3-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSSE3-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSSE3-NEXT: movq %rax, %rdx +; SSSE3-NEXT: movq %r9, %rcx +; SSSE3-NEXT: callq __muloti4 +; SSSE3-NEXT: movq %rax, %r13 +; SSSE3-NEXT: movq %rdx, %rbp +; SSSE3-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSSE3-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSSE3-NEXT: movq %rbx, %rdi +; SSSE3-NEXT: movq %r14, %rsi +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSSE3-NEXT: movq %r12, %rcx +; SSSE3-NEXT: callq __muloti4 +; SSSE3-NEXT: xorl %ecx, %ecx +; SSSE3-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSSE3-NEXT: setne %cl +; SSSE3-NEXT: xorl %esi, %esi +; SSSE3-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSSE3-NEXT: setne %sil +; SSSE3-NEXT: movd %esi, %xmm0 +; SSSE3-NEXT: pinsrw $4, %ecx, %xmm0 +; SSSE3-NEXT: movq %rdx, 24(%r15) +; SSSE3-NEXT: movq %rax, 16(%r15) +; SSSE3-NEXT: movq %rbp, 8(%r15) +; SSSE3-NEXT: movq %r13, (%r15) +; SSSE3-NEXT: psllq $63, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSSE3-NEXT: addq $24, %rsp +; SSSE3-NEXT: popq %rbx +; SSSE3-NEXT: popq %r12 +; SSSE3-NEXT: popq %r13 +; SSSE3-NEXT: popq %r14 +; SSSE3-NEXT: popq %r15 +; SSSE3-NEXT: popq %rbp +; SSSE3-NEXT: retq +; +; SSE41-LABEL: smulo_v2i128: +; SSE41: # %bb.0: +; SSE41-NEXT: pushq %rbp +; SSE41-NEXT: pushq %r15 +; SSE41-NEXT: pushq %r14 +; SSE41-NEXT: pushq %r13 +; SSE41-NEXT: pushq %r12 +; SSE41-NEXT: pushq %rbx +; SSE41-NEXT: subq $24, %rsp +; SSE41-NEXT: movq %r8, %rax +; SSE41-NEXT: movq %rcx, %r14 +; SSE41-NEXT: movq %rdx, %rbx +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; SSE41-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSE41-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSE41-NEXT: movq %rax, %rdx +; SSE41-NEXT: movq %r9, %rcx +; SSE41-NEXT: callq __muloti4 +; SSE41-NEXT: movq %rax, %r13 +; SSE41-NEXT: movq %rdx, %rbp +; SSE41-NEXT: movq $0, {{[0-9]+}}(%rsp) +; SSE41-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; SSE41-NEXT: movq %rbx, %rdi +; SSE41-NEXT: movq %r14, %rsi +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; SSE41-NEXT: movq %r12, %rcx +; SSE41-NEXT: callq __muloti4 +; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSE41-NEXT: setne %cl +; SSE41-NEXT: xorl %esi, %esi +; SSE41-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; SSE41-NEXT: setne %sil +; SSE41-NEXT: movd %esi, %xmm0 +; SSE41-NEXT: pinsrb $8, %ecx, %xmm0 +; SSE41-NEXT: movq %rdx, 24(%r15) +; SSE41-NEXT: movq %rax, 16(%r15) +; SSE41-NEXT: movq %rbp, 8(%r15) +; SSE41-NEXT: movq %r13, (%r15) +; SSE41-NEXT: psllq $63, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: addq $24, %rsp +; SSE41-NEXT: popq %rbx +; SSE41-NEXT: popq %r12 +; SSE41-NEXT: popq %r13 +; SSE41-NEXT: popq %r14 +; SSE41-NEXT: popq %r15 +; SSE41-NEXT: popq %rbp +; SSE41-NEXT: retq +; +; AVX1-LABEL: smulo_v2i128: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: subq $24, %rsp +; AVX1-NEXT: movq %r8, %rax +; AVX1-NEXT: movq %rcx, %r14 +; AVX1-NEXT: movq %rdx, %rbx +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX1-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX1-NEXT: movq %rax, %rdx +; AVX1-NEXT: movq %r9, %rcx +; AVX1-NEXT: callq __muloti4 +; AVX1-NEXT: movq %rax, %r13 +; AVX1-NEXT: movq %rdx, %rbp +; AVX1-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX1-NEXT: movq %rbx, %rdi +; AVX1-NEXT: movq %r14, %rsi +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX1-NEXT: movq %r12, %rcx +; AVX1-NEXT: callq __muloti4 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: setne %cl +; AVX1-NEXT: xorl %esi, %esi +; AVX1-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX1-NEXT: setne %sil +; AVX1-NEXT: vmovd %esi, %xmm0 +; AVX1-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX1-NEXT: movq %rdx, 24(%r15) +; AVX1-NEXT: movq %rax, 16(%r15) +; AVX1-NEXT: movq %rbp, 8(%r15) +; AVX1-NEXT: movq %r13, (%r15) +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: addq $24, %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: smulo_v2i128: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: subq $24, %rsp +; AVX2-NEXT: movq %r8, %rax +; AVX2-NEXT: movq %rcx, %r14 +; AVX2-NEXT: movq %rdx, %rbx +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12 +; AVX2-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX2-NEXT: movq %rax, %rdx +; AVX2-NEXT: movq %r9, %rcx +; AVX2-NEXT: callq __muloti4 +; AVX2-NEXT: movq %rax, %r13 +; AVX2-NEXT: movq %rdx, %rbp +; AVX2-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX2-NEXT: movq %rbx, %rdi +; AVX2-NEXT: movq %r14, %rsi +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX2-NEXT: movq %r12, %rcx +; AVX2-NEXT: callq __muloti4 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: setne %cl +; AVX2-NEXT: xorl %esi, %esi +; AVX2-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX2-NEXT: setne %sil +; AVX2-NEXT: vmovd %esi, %xmm0 +; AVX2-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0 +; AVX2-NEXT: movq %rdx, 24(%r15) +; AVX2-NEXT: movq %rax, 16(%r15) +; AVX2-NEXT: movq %rbp, 8(%r15) +; AVX2-NEXT: movq %r13, (%r15) +; AVX2-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: addq $24, %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: smulo_v2i128: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: subq $40, %rsp +; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: movq %r8, %r15 +; AVX512-NEXT: movq %rdx, %rax +; AVX512-NEXT: movq %rsi, %r12 +; AVX512-NEXT: movq %rdi, %rbx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9 +; AVX512-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX512-NEXT: movq %rax, %rdi +; AVX512-NEXT: movq %rcx, %rsi +; AVX512-NEXT: movq %r9, %rcx +; AVX512-NEXT: callq __muloti4 +; AVX512-NEXT: movq %rax, %r13 +; AVX512-NEXT: movq %rdx, %rbp +; AVX512-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: setne %al +; AVX512-NEXT: movb %al, {{[0-9]+}}(%rsp) +; AVX512-NEXT: movq $0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: leaq {{[0-9]+}}(%rsp), %r8 +; AVX512-NEXT: movq %rbx, %rdi +; AVX512-NEXT: movq %r12, %rsi +; AVX512-NEXT: movq %r15, %rdx +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload +; AVX512-NEXT: callq __muloti4 +; AVX512-NEXT: cmpq $0, {{[0-9]+}}(%rsp) +; AVX512-NEXT: setne %cl +; AVX512-NEXT: movb %cl, {{[0-9]+}}(%rsp) +; AVX512-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 +; AVX512-NEXT: movq %rbp, 24(%r14) +; AVX512-NEXT: movq %r13, 16(%r14) +; AVX512-NEXT: movq %rdx, 8(%r14) +; AVX512-NEXT: movq %rax, (%r14) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: addq $40, %rsp +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %t = call {<2 x i128>, <2 x i1>} @llvm.smul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1) + %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i128> %val, <2 x i128>* %p2 + ret <2 x i32> %res +} Index: llvm/trunk/test/CodeGen/X86/vec_umulo.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_umulo.ll +++ llvm/trunk/test/CodeGen/X86/vec_umulo.ll @@ -0,0 +1,2633 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512 + +declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>) +declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>) +declare {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32>, <3 x i32>) +declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) +declare {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32>, <6 x i32>) +declare {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>) +declare {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32>, <16 x i32>) + +declare {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8>, <16 x i8>) +declare {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16>, <8 x i16>) +declare {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64>, <2 x i64>) + +declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24>) +declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>) +declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>) + +define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind { +; SSE-LABEL: umulo_v1i32: +; SSE: # %bb.0: +; SSE-NEXT: movq %rdx, %rcx +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: xorl %edi, %edi +; SSE-NEXT: mull %esi +; SSE-NEXT: seto %dil +; SSE-NEXT: negl %edi +; SSE-NEXT: movl %eax, (%rcx) +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: retq +; +; AVX-LABEL: umulo_v1i32: +; AVX: # %bb.0: +; AVX-NEXT: movq %rdx, %rcx +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: xorl %edi, %edi +; AVX-NEXT: mull %esi +; AVX-NEXT: seto %dil +; AVX-NEXT: negl %edi +; AVX-NEXT: movl %eax, (%rcx) +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: retq + %t = call {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1) + %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0 + %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1 + %res = sext <1 x i1> %obit to <1 x i32> + store <1 x i32> %val, <1 x i32>* %p2 + ret <1 x i32> %res +} + +define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v2i32: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm3, %r8 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %r9 +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: xorl %esi, %esi +; SSE2-NEXT: mulq %rcx +; SSE2-NEXT: seto %sil +; SSE2-NEXT: movq %rax, %xmm0 +; SSE2-NEXT: xorl %ecx, %ecx +; SSE2-NEXT: movq %r8, %rax +; SSE2-NEXT: mulq %r9 +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] +; SSE2-NEXT: psrlq $32, %xmm0 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm0, %xmm2 +; SSE2-NEXT: movq %rsi, %xmm0 +; SSE2-NEXT: seto %cl +; SSE2-NEXT: movq %rcx, %xmm3 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: movq %xmm1, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v2i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,0,4294967295,0] +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm3, %r8 +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %r9 +; SSSE3-NEXT: movq %xmm0, %rax +; SSSE3-NEXT: movq %xmm1, %rcx +; SSSE3-NEXT: xorl %esi, %esi +; SSSE3-NEXT: mulq %rcx +; SSSE3-NEXT: seto %sil +; SSSE3-NEXT: movq %rax, %xmm0 +; SSSE3-NEXT: xorl %ecx, %ecx +; SSSE3-NEXT: movq %r8, %rax +; SSSE3-NEXT: mulq %r9 +; SSSE3-NEXT: movq %rax, %xmm1 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] +; SSSE3-NEXT: psrlq $32, %xmm0 +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,0,3,2] +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: pxor %xmm0, %xmm2 +; SSSE3-NEXT: movq %rsi, %xmm0 +; SSSE3-NEXT: seto %cl +; SSSE3-NEXT: movq %rcx, %xmm3 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSSE3-NEXT: por %xmm2, %xmm0 +; SSSE3-NEXT: movq %xmm1, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: movq %xmm0, %r9 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE41-NEXT: movq %xmm1, %rsi +; SSE41-NEXT: pextrq $1, %xmm0, %rax +; SSE41-NEXT: pextrq $1, %xmm1, %rdx +; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: mulq %rdx +; SSE41-NEXT: movq %rax, %r8 +; SSE41-NEXT: seto %cl +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: movq %r9, %rax +; SSE41-NEXT: mulq %rsi +; SSE41-NEXT: seto %cl +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE41-NEXT: movq %r8, %xmm0 +; SSE41-NEXT: movq %rax, %xmm3 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3] +; SSE41-NEXT: psrlq $32, %xmm3 +; SSE41-NEXT: pcmpeqq %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: pxor %xmm3, %xmm0 +; SSE41-NEXT: por %xmm1, %xmm0 +; SSE41-NEXT: movq %xmm4, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v2i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vmovq %xmm0, %r9 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; AVX1-NEXT: vmovq %xmm1, %rsi +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: mulq %rdx +; AVX1-NEXT: movq %rax, %r8 +; AVX1-NEXT: seto %cl +; AVX1-NEXT: vmovq %rcx, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: movq %r9, %rax +; AVX1-NEXT: mulq %rsi +; AVX1-NEXT: seto %cl +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq %r8, %xmm1 +; AVX1-NEXT: vmovq %rax, %xmm3 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX1-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX1-NEXT: vmovq %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v2i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX2-NEXT: vmovq %xmm0, %r9 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vpextrq $1, %xmm1, %rdx +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: mulq %rdx +; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: seto %cl +; AVX2-NEXT: vmovq %rcx, %xmm0 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: movq %r9, %rax +; AVX2-NEXT: mulq %rsi +; AVX2-NEXT: seto %cl +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq %r8, %xmm1 +; AVX2-NEXT: vmovq %rax, %xmm3 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX2-NEXT: vpsrlq $32, %xmm1, %xmm3 +; AVX2-NEXT: vpcmpeqq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX2-NEXT: vmovq %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v2i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX512-NEXT: vmovq %xmm0, %rcx +; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX512-NEXT: vmovq %xmm1, %rsi +; AVX512-NEXT: vpextrq $1, %xmm0, %rax +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: mulq %rdx +; AVX512-NEXT: seto %r8b +; AVX512-NEXT: vmovq %rax, %xmm0 +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: mulq %rsi +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vpsrlq $32, %xmm0, %xmm1 +; AVX512-NEXT: vptestmq %xmm1, %xmm1, %k0 +; AVX512-NEXT: kmovd %r8d, %k1 +; AVX512-NEXT: kshiftlw $1, %k1, %k1 +; AVX512-NEXT: seto %al +; AVX512-NEXT: andl $1, %eax +; AVX512-NEXT: kmovw %eax, %k2 +; AVX512-NEXT: korw %k1, %k2, %k1 +; AVX512-NEXT: korw %k1, %k0, %k1 +; AVX512-NEXT: vpmovqd %xmm0, (%rdi) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1) + %val = extractvalue {<2 x i32>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i32> %val, <2 x i32>* %p2 + ret <2 x i32> %res +} + +define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v3i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: movq %xmm0, (%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, 8(%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v3i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: movq %xmm0, (%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, 8(%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v3i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuludq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pxor %xmm3, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: pmulld %xmm1, %xmm0 +; SSE41-NEXT: pextrd $2, %xmm0, 8(%rdi) +; SSE41-NEXT: movq %xmm0, (%rdi) +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v3i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi) +; AVX1-NEXT: vmovq %xmm0, (%rdi) +; AVX1-NEXT: vmovdqa %xmm2, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v3i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi) +; AVX2-NEXT: vmovq %xmm0, (%rdi) +; AVX2-NEXT: vmovdqa %xmm2, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v3i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vptestmd %xmm2, %xmm2, %k1 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vpextrd $2, %xmm1, 8(%rdi) +; AVX512-NEXT: vmovq %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1) + %val = extractvalue {<3 x i32>, <3 x i1>} %t, 0 + %obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1 + %res = sext <3 x i1> %obit to <3 x i32> + store <3 x i32> %val, <3 x i32>* %p2 + ret <3 x i32> %res +} + +define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v4i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: movdqa %xmm0, (%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v4i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: movdqa %xmm0, (%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v4i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuludq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] +; SSE41-NEXT: pxor %xmm3, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: pmulld %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v4i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, (%rdi) +; AVX1-NEXT: vmovdqa %xmm2, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v4i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovdqa %xmm0, (%rdi) +; AVX2-NEXT: vmovdqa %xmm2, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v4i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vptestmd %xmm2, %xmm2, %k1 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1) + %val = extractvalue {<4 x i32>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i32> %val, <4 x i32>* %p2 + ret <4 x i32> %res +} + +define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v6i32: +; SSE2: # %bb.0: +; SSE2-NEXT: movq %rdi, %rax +; SSE2-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] +; SSE2-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE2-NEXT: movd %r8d, %xmm3 +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: movd %esi, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0] +; SSE2-NEXT: pmuludq %xmm1, %xmm5 +; SSE2-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero +; SSE2-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSE2-NEXT: movd %r9d, %xmm0 +; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: pmuludq %xmm6, %xmm0 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3] +; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm8[0,0] +; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0] +; SSE2-NEXT: pmuludq %xmm4, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm6 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: pmuludq %xmm7, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movq %xmm0, 16(%rcx) +; SSE2-NEXT: movdqa %xmm5, (%rcx) +; SSE2-NEXT: movq %xmm2, 16(%rdi) +; SSE2-NEXT: movdqa %xmm6, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v6i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movq %rdi, %rax +; SSSE3-NEXT: movd {{.*#+}} xmm8 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] +; SSSE3-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSSE3-NEXT: movd %r8d, %xmm3 +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: movd %esi, %xmm5 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0] +; SSSE3-NEXT: pmuludq %xmm1, %xmm5 +; SSSE3-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero +; SSSE3-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSSE3-NEXT: movd %r9d, %xmm0 +; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: pmuludq %xmm6, %xmm0 +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3] +; SSSE3-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0],xmm8[0,0] +; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm3[0,0] +; SSSE3-NEXT: pmuludq %xmm4, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm6 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSSE3-NEXT: pmuludq %xmm7, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1] +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 +; SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSSE3-NEXT: movq %xmm0, 16(%rcx) +; SSSE3-NEXT: movdqa %xmm5, (%rcx) +; SSSE3-NEXT: movq %xmm2, 16(%rdi) +; SSSE3-NEXT: movdqa %xmm6, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v6i32: +; SSE41: # %bb.0: +; SSE41-NEXT: movq %rdi, %rax +; SSE41-NEXT: movd %esi, %xmm0 +; SSE41-NEXT: pinsrd $1, %edx, %xmm0 +; SSE41-NEXT: pinsrd $2, %ecx, %xmm0 +; SSE41-NEXT: pinsrd $3, %r8d, %xmm0 +; SSE41-NEXT: movd %r9d, %xmm1 +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm1 +; SSE41-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm2 +; SSE41-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm3 +; SSE41-NEXT: pinsrd $2, {{[0-9]+}}(%rsp), %xmm3 +; SSE41-NEXT: pinsrd $3, {{[0-9]+}}(%rsp), %xmm3 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rcx +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pmuludq %xmm3, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pxor %xmm8, %xmm8 +; SSE41-NEXT: pcmpeqd %xmm8, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm6, %xmm6 +; SSE41-NEXT: pxor %xmm6, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm7, %xmm5 +; SSE41-NEXT: movdqa %xmm1, %xmm7 +; SSE41-NEXT: pmuludq %xmm2, %xmm7 +; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; SSE41-NEXT: pcmpeqd %xmm8, %xmm7 +; SSE41-NEXT: pxor %xmm6, %xmm7 +; SSE41-NEXT: pmulld %xmm0, %xmm3 +; SSE41-NEXT: pmulld %xmm1, %xmm2 +; SSE41-NEXT: movq %xmm2, 16(%rcx) +; SSE41-NEXT: movdqa %xmm3, (%rcx) +; SSE41-NEXT: movq %xmm7, 16(%rdi) +; SSE41-NEXT: movdqa %xmm4, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v6i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 +; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 +; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpackssdw %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpmovsxwd %xmm3, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 +; AVX1-NEXT: vmovq %xmm2, 16(%rdi) +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v6i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpmovsxwd %xmm2, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vmovq %xmm2, 16(%rdi) +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v6i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpmuludq %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm3 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX512-NEXT: vptestmd %ymm2, %ymm2, %k1 +; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vmovq %xmm2, 16(%rdi) +; AVX512-NEXT: vmovdqa %xmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1) + %val = extractvalue {<6 x i32>, <6 x i1>} %t, 0 + %obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1 + %res = sext <6 x i1> %obit to <6 x i32> + store <6 x i32> %val, <6 x i32>* %p2 + ret <6 x i32> %res +} + +define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v8i32: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm2, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm5, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pxor %xmm8, %xmm8 +; SSE2-NEXT: pcmpeqd %xmm8, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm7, %xmm7 +; SSE2-NEXT: pxor %xmm7, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSE2-NEXT: pcmpeqd %xmm8, %xmm2 +; SSE2-NEXT: pxor %xmm7, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-NEXT: movdqa %xmm1, 16(%rdi) +; SSE2-NEXT: movdqa %xmm4, (%rdi) +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v8i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm2, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm5, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: pxor %xmm8, %xmm8 +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm7, %xmm7 +; SSSE3-NEXT: pxor %xmm7, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm5, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSSE3-NEXT: pcmpeqd %xmm8, %xmm2 +; SSSE3-NEXT: pxor %xmm7, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) +; SSSE3-NEXT: movdqa %xmm4, (%rdi) +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v8i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: pmuludq %xmm2, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7] +; SSE41-NEXT: pxor %xmm8, %xmm8 +; SSE41-NEXT: pcmpeqd %xmm8, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm7, %xmm7 +; SSE41-NEXT: pxor %xmm7, %xmm4 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm5, %xmm6 +; SSE41-NEXT: movdqa %xmm1, %xmm5 +; SSE41-NEXT: pmuludq %xmm3, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7] +; SSE41-NEXT: pcmpeqd %xmm8, %xmm5 +; SSE41-NEXT: pxor %xmm7, %xmm5 +; SSE41-NEXT: pmulld %xmm2, %xmm0 +; SSE41-NEXT: pmulld %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, 16(%rdi) +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: movdqa %xmm4, %xmm0 +; SSE41-NEXT: movdqa %xmm5, %xmm1 +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v8i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7] +; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6 +; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 +; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpackssdw %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm1 +; AVX1-NEXT: vpmovsxwd %xmm3, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpmovsxwd %xmm2, %ymm0 +; AVX2-NEXT: vmovdqa %ymm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v8i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpmuludq %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm3 +; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7] +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7] +; AVX512-NEXT: vptestmd %ymm2, %ymm2, %k1 +; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %ymm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1) + %val = extractvalue {<8 x i32>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i32> %val, <8 x i32>* %p2 + ret <8 x i32> %res +} + +define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind { +; SSE2-LABEL: umulo_v16i32: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm8 +; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm8 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm10, %xmm9 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-NEXT: pxor %xmm10, %xmm10 +; SSE2-NEXT: pcmpeqd %xmm10, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm11, %xmm11 +; SSE2-NEXT: pxor %xmm11, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm5, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm13, %xmm12 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1] +; SSE2-NEXT: pcmpeqd %xmm10, %xmm15 +; SSE2-NEXT: pxor %xmm11, %xmm15 +; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm6, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm14, %xmm13 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSE2-NEXT: pcmpeqd %xmm10, %xmm5 +; SSE2-NEXT: pxor %xmm11, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm7, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm14, %xmm7 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; SSE2-NEXT: pcmpeqd %xmm10, %xmm6 +; SSE2-NEXT: pxor %xmm11, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE2-NEXT: movdqa %xmm3, 48(%rdi) +; SSE2-NEXT: movdqa %xmm2, 32(%rdi) +; SSE2-NEXT: movdqa %xmm1, 16(%rdi) +; SSE2-NEXT: movdqa %xmm8, (%rdi) +; SSE2-NEXT: movdqa %xmm15, %xmm1 +; SSE2-NEXT: movdqa %xmm5, %xmm2 +; SSE2-NEXT: movdqa %xmm6, %xmm3 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v16i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm8 +; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm4, %xmm8 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm10, %xmm9 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSSE3-NEXT: pxor %xmm10, %xmm10 +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm11, %xmm11 +; SSSE3-NEXT: pxor %xmm11, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm5, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm13, %xmm12 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1] +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm15 +; SSSE3-NEXT: pxor %xmm11, %xmm15 +; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm6, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm14, %xmm13 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm5 +; SSSE3-NEXT: pxor %xmm11, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm7, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm14, %xmm7 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1] +; SSSE3-NEXT: pcmpeqd %xmm10, %xmm6 +; SSSE3-NEXT: pxor %xmm11, %xmm6 +; SSSE3-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSSE3-NEXT: movdqa %xmm3, 48(%rdi) +; SSSE3-NEXT: movdqa %xmm2, 32(%rdi) +; SSSE3-NEXT: movdqa %xmm1, 16(%rdi) +; SSSE3-NEXT: movdqa %xmm8, (%rdi) +; SSSE3-NEXT: movdqa %xmm15, %xmm1 +; SSSE3-NEXT: movdqa %xmm5, %xmm2 +; SSSE3-NEXT: movdqa %xmm6, %xmm3 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v16i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm8, %xmm9 +; SSE41-NEXT: movdqa %xmm0, %xmm8 +; SSE41-NEXT: pmuludq %xmm4, %xmm8 +; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7] +; SSE41-NEXT: pxor %xmm12, %xmm12 +; SSE41-NEXT: pcmpeqd %xmm12, %xmm8 +; SSE41-NEXT: pcmpeqd %xmm13, %xmm13 +; SSE41-NEXT: pxor %xmm13, %xmm8 +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm9, %xmm10 +; SSE41-NEXT: movdqa %xmm1, %xmm9 +; SSE41-NEXT: pmuludq %xmm5, %xmm9 +; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3],xmm9[4,5],xmm10[6,7] +; SSE41-NEXT: pcmpeqd %xmm12, %xmm9 +; SSE41-NEXT: pxor %xmm13, %xmm9 +; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm6[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm2[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm10, %xmm11 +; SSE41-NEXT: movdqa %xmm2, %xmm10 +; SSE41-NEXT: pmuludq %xmm6, %xmm10 +; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3],xmm10[4,5],xmm11[6,7] +; SSE41-NEXT: pcmpeqd %xmm12, %xmm10 +; SSE41-NEXT: pxor %xmm13, %xmm10 +; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm7[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm11, %xmm14 +; SSE41-NEXT: movdqa %xmm3, %xmm11 +; SSE41-NEXT: pmuludq %xmm7, %xmm11 +; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1],xmm14[2,3],xmm11[4,5],xmm14[6,7] +; SSE41-NEXT: pcmpeqd %xmm12, %xmm11 +; SSE41-NEXT: pxor %xmm13, %xmm11 +; SSE41-NEXT: pmulld %xmm4, %xmm0 +; SSE41-NEXT: pmulld %xmm5, %xmm1 +; SSE41-NEXT: pmulld %xmm6, %xmm2 +; SSE41-NEXT: pmulld %xmm7, %xmm3 +; SSE41-NEXT: movdqa %xmm3, 48(%rdi) +; SSE41-NEXT: movdqa %xmm2, 32(%rdi) +; SSE41-NEXT: movdqa %xmm1, 16(%rdi) +; SSE41-NEXT: movdqa %xmm0, (%rdi) +; SSE41-NEXT: movdqa %xmm8, %xmm0 +; SSE41-NEXT: movdqa %xmm9, %xmm1 +; SSE41-NEXT: movdqa %xmm10, %xmm2 +; SSE41-NEXT: movdqa %xmm11, %xmm3 +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v16i32: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm12 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm6, %xmm7, %xmm6 +; AVX1-NEXT: vpmuludq %xmm10, %xmm12, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7] +; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm6, %xmm6 +; AVX1-NEXT: vpcmpeqd %xmm9, %xmm9, %xmm9 +; AVX1-NEXT: vpxor %xmm9, %xmm6, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm3[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm7, %xmm4, %xmm4 +; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2,3],xmm7[4,5],xmm4[6,7] +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpackssdw %xmm6, %xmm4, %xmm11 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm4[1,1,3,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm6[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 +; AVX1-NEXT: vpxor %xmm9, %xmm5, %xmm13 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5 +; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm7 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7] +; AVX1-NEXT: vpcmpeqd %xmm8, %xmm5, %xmm5 +; AVX1-NEXT: vpxor %xmm9, %xmm5, %xmm5 +; AVX1-NEXT: vpackssdw %xmm13, %xmm5, %xmm5 +; AVX1-NEXT: vpmulld %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm2 +; AVX1-NEXT: vpmulld %xmm10, %xmm12, %xmm0 +; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm3 +; AVX1-NEXT: vpmovsxwd %xmm5, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpmovsxwd %xmm11, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm11[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 +; AVX1-NEXT: vmovaps %ymm3, 32(%rdi) +; AVX1-NEXT: vmovaps %ymm2, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v16i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuludq %ymm4, %ymm5, %ymm4 +; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm5 +; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7] +; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5 +; AVX2-NEXT: vpcmpeqd %ymm5, %ymm4, %ymm4 +; AVX2-NEXT: vpcmpeqd %ymm6, %ymm6, %ymm6 +; AVX2-NEXT: vpxor %ymm6, %ymm4, %ymm4 +; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7 +; AVX2-NEXT: vpackssdw %xmm7, %xmm4, %xmm4 +; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm2[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpmuludq %ymm7, %ymm8, %ymm7 +; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm8 +; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,1,3,3,5,5,7,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3],ymm8[4],ymm7[5],ymm8[6],ymm7[7] +; AVX2-NEXT: vpcmpeqd %ymm5, %ymm7, %ymm5 +; AVX2-NEXT: vpxor %ymm6, %ymm5, %ymm5 +; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6 +; AVX2-NEXT: vpackssdw %xmm6, %xmm5, %xmm5 +; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm3 +; AVX2-NEXT: vpmovsxwd %xmm5, %ymm0 +; AVX2-NEXT: vpmovsxwd %xmm4, %ymm1 +; AVX2-NEXT: vmovdqa %ymm3, 32(%rdi) +; AVX2-NEXT: vmovdqa %ymm2, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v16i32: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vpshufd {{.*#+}} zmm3 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; AVX512-NEXT: vpshufd {{.*#+}} zmm4 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; AVX512-NEXT: vpmuludq %zmm3, %zmm4, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [1,17,3,19,5,21,7,23,9,25,11,27,13,29,15,31] +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k1 +; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1 +; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1) + %val = extractvalue {<16 x i32>, <16 x i1>} %t, 0 + %obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1 + %res = sext <16 x i1> %obit to <16 x i32> + store <16 x i32> %val, <16 x i32>* %p2 + ret <16 x i32> %res +} + +define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind { +; SSE2-LABEL: umulo_v16i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: pmullw %xmm2, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE2-NEXT: pmullw %xmm5, %xmm4 +; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: packuswb %xmm3, %xmm4 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] +; SSE2-NEXT: pmullw %xmm3, %xmm5 +; SSE2-NEXT: psrlw $8, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: packuswb %xmm5, %xmm0 +; SSE2-NEXT: pcmpeqb %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm0, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm3, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSE2-NEXT: pslld $31, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm3 +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: movdqa %xmm4, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v16i8: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSSE3-NEXT: movdqa %xmm0, %xmm3 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSSE3-NEXT: pmullw %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: movdqa %xmm1, %xmm5 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSSE3-NEXT: movdqa %xmm0, %xmm4 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSSE3-NEXT: pmullw %xmm5, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 +; SSSE3-NEXT: packuswb %xmm3, %xmm4 +; SSSE3-NEXT: pxor %xmm2, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm3 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] +; SSSE3-NEXT: movdqa %xmm0, %xmm5 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15] +; SSSE3-NEXT: pmullw %xmm3, %xmm5 +; SSSE3-NEXT: psrlw $8, %xmm5 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSSE3-NEXT: pmullw %xmm1, %xmm0 +; SSSE3-NEXT: psrlw $8, %xmm0 +; SSSE3-NEXT: packuswb %xmm5, %xmm0 +; SSSE3-NEXT: pcmpeqb %xmm2, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm0, %xmm3 +; SSSE3-NEXT: movdqa %xmm3, %xmm1 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSSE3-NEXT: movdqa %xmm3, %xmm2 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; SSSE3-NEXT: pslld $31, %xmm2 +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm3 +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: movdqa %xmm4, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v16i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm3 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: pmullw %xmm2, %xmm5 +; SSE41-NEXT: pand %xmm5, %xmm4 +; SSE41-NEXT: packuswb %xmm3, %xmm4 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] +; SSE41-NEXT: pmullw %xmm1, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: psrlw $8, %xmm5 +; SSE41-NEXT: packuswb %xmm0, %xmm5 +; SSE41-NEXT: pcmpeqb %xmm2, %xmm5 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm5, %xmm3 +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,2,3] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,1,2,3] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; SSE41-NEXT: pslld $31, %xmm3 +; SSE41-NEXT: psrad $31, %xmm3 +; SSE41-NEXT: movdqa %xmm4, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v16i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] +; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX1-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; AVX1-NEXT: vmovdqa %xmm2, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v16i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm2 +; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpslld $31, %ymm1, %ymm1 +; AVX2-NEXT: vpsrad $31, %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa %xmm2, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v16i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm1 +; AVX512-NEXT: vpsrlw $8, %ymm1, %ymm0 +; AVX512-NEXT: vpmovwb %ymm0, %xmm0 +; AVX512-NEXT: vptestmb %xmm0, %xmm0, %k1 +; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512-NEXT: vpmovwb %ymm1, (%rdi) +; AVX512-NEXT: retq + %t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1) + %val = extractvalue {<16 x i8>, <16 x i1>} %t, 0 + %obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1 + %res = sext <16 x i1> %obit to <16 x i32> + store <16 x i8> %val, <16 x i8>* %p2 + ret <16 x i32> %res +} + +define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind { +; SSE2-LABEL: umulo_v8i16: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pmullw %xmm1, %xmm2 +; SSE2-NEXT: pmulhuw %xmm1, %xmm0 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqw %xmm0, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: pslld $31, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: movdqa %xmm2, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v8i16: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pmullw %xmm1, %xmm2 +; SSSE3-NEXT: pmulhuw %xmm1, %xmm0 +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpeqw %xmm0, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm3, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSSE3-NEXT: pslld $31, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm1 +; SSSE3-NEXT: movdqa %xmm2, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v8i16: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmullw %xmm1, %xmm2 +; SSE41-NEXT: pmulhuw %xmm1, %xmm0 +; SSE41-NEXT: pxor %xmm3, %xmm3 +; SSE41-NEXT: pcmpeqw %xmm0, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE41-NEXT: pxor %xmm3, %xmm1 +; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE41-NEXT: pslld $31, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: psrad $31, %xmm1 +; SSE41-NEXT: movdqa %xmm2, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v8i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovdqa %xmm2, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v8i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm2 +; AVX2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vmovdqa %xmm2, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v8i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vptestmw %xmm0, %xmm0, %k1 +; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: vmovdqa %xmm2, (%rdi) +; AVX512-NEXT: retq + %t = call {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1) + %val = extractvalue {<8 x i16>, <8 x i1>} %t, 0 + %obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1 + %res = sext <8 x i1> %obit to <8 x i32> + store <8 x i16> %val, <8 x i16>* %p2 + ret <8 x i32> %res +} + +define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind { +; SSE2-LABEL: umulo_v2i64: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %r9 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm2, %rsi +; SSE2-NEXT: movq %xmm0, %rax +; SSE2-NEXT: movq %xmm1, %rdx +; SSE2-NEXT: xorl %ecx, %ecx +; SSE2-NEXT: mulq %rdx +; SSE2-NEXT: movq %rax, %r8 +; SSE2-NEXT: seto %cl +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: xorl %ecx, %ecx +; SSE2-NEXT: movq %r9, %rax +; SSE2-NEXT: mulq %rsi +; SSE2-NEXT: seto %cl +; SSE2-NEXT: movq %rcx, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: movq %r8, %xmm1 +; SSE2-NEXT: movq %rax, %xmm2 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: movdqa %xmm1, (%rdi) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v2i64: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %r9 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm2, %rsi +; SSSE3-NEXT: movq %xmm0, %rax +; SSSE3-NEXT: movq %xmm1, %rdx +; SSSE3-NEXT: xorl %ecx, %ecx +; SSSE3-NEXT: mulq %rdx +; SSSE3-NEXT: movq %rax, %r8 +; SSSE3-NEXT: seto %cl +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: xorl %ecx, %ecx +; SSSE3-NEXT: movq %r9, %rax +; SSSE3-NEXT: mulq %rsi +; SSSE3-NEXT: seto %cl +; SSSE3-NEXT: movq %rcx, %xmm1 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT: movq %r8, %xmm1 +; SSSE3-NEXT: movq %rax, %xmm2 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSSE3-NEXT: movdqa %xmm1, (%rdi) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v2i64: +; SSE41: # %bb.0: +; SSE41-NEXT: movq %xmm0, %r9 +; SSE41-NEXT: movq %xmm1, %rsi +; SSE41-NEXT: pextrq $1, %xmm0, %rax +; SSE41-NEXT: pextrq $1, %xmm1, %rdx +; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: mulq %rdx +; SSE41-NEXT: movq %rax, %r8 +; SSE41-NEXT: seto %cl +; SSE41-NEXT: movq %rcx, %xmm1 +; SSE41-NEXT: xorl %ecx, %ecx +; SSE41-NEXT: movq %r9, %rax +; SSE41-NEXT: mulq %rsi +; SSE41-NEXT: seto %cl +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT: movq %r8, %xmm1 +; SSE41-NEXT: movq %rax, %xmm2 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; SSE41-NEXT: movdqa %xmm2, (%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq %xmm0, %r9 +; AVX1-NEXT: vmovq %xmm1, %rsi +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: mulq %rdx +; AVX1-NEXT: movq %rax, %r8 +; AVX1-NEXT: seto %cl +; AVX1-NEXT: vmovq %rcx, %xmm0 +; AVX1-NEXT: xorl %ecx, %ecx +; AVX1-NEXT: movq %r9, %rax +; AVX1-NEXT: mulq %rsi +; AVX1-NEXT: seto %cl +; AVX1-NEXT: vmovq %rcx, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovq %r8, %xmm1 +; AVX1-NEXT: vmovq %rax, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vmovdqa %xmm1, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq %xmm0, %r9 +; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vpextrq $1, %xmm1, %rdx +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: mulq %rdx +; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: seto %cl +; AVX2-NEXT: vmovq %rcx, %xmm0 +; AVX2-NEXT: xorl %ecx, %ecx +; AVX2-NEXT: movq %r9, %rax +; AVX2-NEXT: mulq %rsi +; AVX2-NEXT: seto %cl +; AVX2-NEXT: vmovq %rcx, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vmovq %r8, %xmm1 +; AVX2-NEXT: vmovq %rax, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX2-NEXT: vmovdqa %xmm1, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq %xmm0, %rcx +; AVX512-NEXT: vmovq %xmm1, %rsi +; AVX512-NEXT: vpextrq $1, %xmm0, %rax +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: mulq %rdx +; AVX512-NEXT: movq %rax, %r8 +; AVX512-NEXT: seto %al +; AVX512-NEXT: kmovd %eax, %k0 +; AVX512-NEXT: kshiftlw $1, %k0, %k0 +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: mulq %rsi +; AVX512-NEXT: seto %cl +; AVX512-NEXT: andl $1, %ecx +; AVX512-NEXT: kmovw %ecx, %k1 +; AVX512-NEXT: korw %k0, %k1, %k1 +; AVX512-NEXT: vmovq %r8, %xmm0 +; AVX512-NEXT: vmovq %rax, %xmm1 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vmovdqa %xmm0, (%rdi) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: retq + %t = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1) + %val = extractvalue {<2 x i64>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i64> %val, <2 x i64>* %p2 + ret <2 x i32> %res +} + +define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { +; SSE2-LABEL: umulo_v4i24: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,2,3] +; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: psrld $24, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm3, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movw %ax, (%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 2(%rdi) +; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: movw %ax, 9(%rdi) +; SSE2-NEXT: movd %xmm6, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm7, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 11(%rdi) +; SSE2-NEXT: shrl $16, %ecx +; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v4i24: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm4[1,1,2,3] +; SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSSE3-NEXT: psrld $24, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: pxor %xmm3, %xmm2 +; SSSE3-NEXT: pxor %xmm3, %xmm1 +; SSSE3-NEXT: por %xmm2, %xmm1 +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: movw %ax, (%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 2(%rdi) +; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: movw %ax, 9(%rdi) +; SSSE3-NEXT: movd %xmm6, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm7, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 11(%rdi) +; SSSE3-NEXT: shrl $16, %ecx +; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v4i24: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSE41-NEXT: pand %xmm0, %xmm2 +; SSE41-NEXT: pand %xmm0, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pmuludq %xmm1, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] +; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm3, %xmm4 +; SSE41-NEXT: pmulld %xmm2, %xmm1 +; SSE41-NEXT: pextrd $3, %xmm1, %eax +; SSE41-NEXT: pextrd $2, %xmm1, %ecx +; SSE41-NEXT: pextrd $1, %xmm1, %edx +; SSE41-NEXT: movd %xmm1, %esi +; SSE41-NEXT: psrld $24, %xmm1 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE41-NEXT: pxor %xmm3, %xmm0 +; SSE41-NEXT: por %xmm4, %xmm0 +; SSE41-NEXT: movw %ax, 9(%rdi) +; SSE41-NEXT: movw %cx, 6(%rdi) +; SSE41-NEXT: movw %dx, 3(%rdi) +; SSE41-NEXT: movw %si, (%rdi) +; SSE41-NEXT: shrl $16, %eax +; SSE41-NEXT: movb %al, 11(%rdi) +; SSE41-NEXT: shrl $16, %ecx +; SSE41-NEXT: movb %cl, 8(%rdi) +; SSE41-NEXT: shrl $16, %edx +; SSE41-NEXT: movb %dl, 5(%rdi) +; SSE41-NEXT: shrl $16, %esi +; SSE41-NEXT: movb %sil, 2(%rdi) +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v4i24: +; AVX1: # %bb.0: +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38] +; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrld $24, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpextrd $3, %xmm1, %eax +; AVX1-NEXT: movw %ax, 9(%rdi) +; AVX1-NEXT: vpextrd $2, %xmm1, %ecx +; AVX1-NEXT: movw %cx, 6(%rdi) +; AVX1-NEXT: vpextrd $1, %xmm1, %edx +; AVX1-NEXT: movw %dx, 3(%rdi) +; AVX1-NEXT: vmovd %xmm1, %esi +; AVX1-NEXT: movw %si, (%rdi) +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: movb %al, 11(%rdi) +; AVX1-NEXT: shrl $16, %ecx +; AVX1-NEXT: movb %cl, 8(%rdi) +; AVX1-NEXT: shrl $16, %edx +; AVX1-NEXT: movb %dl, 5(%rdi) +; AVX1-NEXT: shrl $16, %esi +; AVX1-NEXT: movb %sil, 2(%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v4i24: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsrld $24, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpextrd $3, %xmm1, %eax +; AVX2-NEXT: movw %ax, 9(%rdi) +; AVX2-NEXT: vpextrd $2, %xmm1, %ecx +; AVX2-NEXT: movw %cx, 6(%rdi) +; AVX2-NEXT: vpextrd $1, %xmm1, %edx +; AVX2-NEXT: movw %dx, 3(%rdi) +; AVX2-NEXT: vmovd %xmm1, %esi +; AVX2-NEXT: movw %si, (%rdi) +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: movb %al, 11(%rdi) +; AVX2-NEXT: shrl $16, %ecx +; AVX2-NEXT: movb %cl, 8(%rdi) +; AVX2-NEXT: shrl $16, %edx +; AVX2-NEXT: movb %dl, 5(%rdi) +; AVX2-NEXT: shrl $16, %esi +; AVX2-NEXT: movb %sil, 2(%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v4i24: +; AVX512: # %bb.0: +; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] +; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX512-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1 +; AVX512-NEXT: vpsrld $24, %xmm1, %xmm0 +; AVX512-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: vpextrd $3, %xmm1, %eax +; AVX512-NEXT: movw %ax, 9(%rdi) +; AVX512-NEXT: vpextrd $2, %xmm1, %ecx +; AVX512-NEXT: movw %cx, 6(%rdi) +; AVX512-NEXT: vpextrd $1, %xmm1, %edx +; AVX512-NEXT: movw %dx, 3(%rdi) +; AVX512-NEXT: vmovd %xmm1, %esi +; AVX512-NEXT: movw %si, (%rdi) +; AVX512-NEXT: shrl $16, %eax +; AVX512-NEXT: movb %al, 11(%rdi) +; AVX512-NEXT: shrl $16, %ecx +; AVX512-NEXT: movb %cl, 8(%rdi) +; AVX512-NEXT: shrl $16, %edx +; AVX512-NEXT: movb %dl, 5(%rdi) +; AVX512-NEXT: shrl $16, %esi +; AVX512-NEXT: movb %sil, 2(%rdi) +; AVX512-NEXT: retq + %t = call {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1) + %val = extractvalue {<4 x i24>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i24> %val, <4 x i24>* %p2 + ret <4 x i32> %res +} + +define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind { +; SSE2-LABEL: umulo_v4i1: +; SSE2: # %bb.0: +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pmuludq %xmm1, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm2 +; SSE2-NEXT: pmaddwd %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrld $1, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: pslld $31, %xmm0 +; SSE2-NEXT: movmskps %xmm0, %eax +; SSE2-NEXT: movb %al, (%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v4i1: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] +; SSSE3-NEXT: pand %xmm2, %xmm1 +; SSSE3-NEXT: pand %xmm2, %xmm0 +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pmuludq %xmm1, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm3, %xmm4 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSSE3-NEXT: pxor %xmm3, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm2 +; SSSE3-NEXT: pmaddwd %xmm1, %xmm0 +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: psrld $1, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1 +; SSSE3-NEXT: pxor %xmm4, %xmm1 +; SSSE3-NEXT: por %xmm2, %xmm1 +; SSSE3-NEXT: pslld $31, %xmm0 +; SSSE3-NEXT: movmskps %xmm0, %eax +; SSSE3-NEXT: movb %al, (%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v4i1: +; SSE41: # %bb.0: +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] +; SSE41-NEXT: pand %xmm2, %xmm0 +; SSE41-NEXT: pand %xmm2, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE41-NEXT: pmuludq %xmm2, %xmm3 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pmuludq %xmm1, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7] +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: pxor %xmm3, %xmm4 +; SSE41-NEXT: pmaddwd %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrld $1, %xmm0 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm2 +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: por %xmm4, %xmm2 +; SSE41-NEXT: pslld $31, %xmm1 +; SSE41-NEXT: movmskps %xmm1, %eax +; SSE41-NEXT: movb %al, (%rdi) +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v4i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1] +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] +; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrld $1, %xmm1, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX1-NEXT: vmovmskps %xmm1, %eax +; AVX1-NEXT: movb %al, (%rdi) +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v4i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1] +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2 +; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1 +; AVX2-NEXT: vpsrld $1, %xmm1, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vmovmskps %xmm1, %eax +; AVX2-NEXT: movb %al, (%rdi) +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v4i1: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0 +; AVX512-NEXT: kshiftrw $3, %k0, %k1 +; AVX512-NEXT: kmovd %k1, %r9d +; AVX512-NEXT: andb $1, %r9b +; AVX512-NEXT: vpslld $31, %xmm1, %xmm0 +; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1 +; AVX512-NEXT: kshiftrw $3, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %r10d +; AVX512-NEXT: andb $1, %r10b +; AVX512-NEXT: kshiftrw $2, %k0, %k2 +; AVX512-NEXT: kmovd %k2, %r11d +; AVX512-NEXT: andb $1, %r11b +; AVX512-NEXT: kshiftrw $2, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %ebx +; AVX512-NEXT: andb $1, %bl +; AVX512-NEXT: kshiftrw $1, %k0, %k2 +; AVX512-NEXT: kmovd %k2, %edx +; AVX512-NEXT: andb $1, %dl +; AVX512-NEXT: kshiftrw $1, %k1, %k2 +; AVX512-NEXT: kmovd %k2, %esi +; AVX512-NEXT: andb $1, %sil +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: andb $1, %al +; AVX512-NEXT: kmovd %k1, %ecx +; AVX512-NEXT: andb $1, %cl +; AVX512-NEXT: # kill: def $al killed $al killed $eax +; AVX512-NEXT: mulb %cl +; AVX512-NEXT: movl %eax, %r8d +; AVX512-NEXT: seto %al +; AVX512-NEXT: testb $-2, %r8b +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k1 +; AVX512-NEXT: movl %edx, %eax +; AVX512-NEXT: mulb %sil +; AVX512-NEXT: movl %eax, %edx +; AVX512-NEXT: seto %al +; AVX512-NEXT: testb $-2, %dl +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k1 +; AVX512-NEXT: movl %r11d, %eax +; AVX512-NEXT: mulb %bl +; AVX512-NEXT: movl %eax, %esi +; AVX512-NEXT: seto %al +; AVX512-NEXT: testb $-2, %sil +; AVX512-NEXT: setne %cl +; AVX512-NEXT: orb %al, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftlw $13, %k0, %k0 +; AVX512-NEXT: kshiftrw $13, %k0, %k0 +; AVX512-NEXT: movl %r9d, %eax +; AVX512-NEXT: mulb %r10b +; AVX512-NEXT: # kill: def $al killed $al def $eax +; AVX512-NEXT: seto %cl +; AVX512-NEXT: testb $-2, %al +; AVX512-NEXT: setne %bl +; AVX512-NEXT: orb %cl, %bl +; AVX512-NEXT: kmovd %ebx, %k1 +; AVX512-NEXT: kshiftlw $3, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k1 +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: kmovd %r8d, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k1 +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $14, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $2, %k0, %k1 +; AVX512-NEXT: kmovd %esi, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $13, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $3, %k0, %k1 +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $12, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kmovd %k0, %eax +; AVX512-NEXT: movb %al, (%rdi) +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: retq + %t = call {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1) + %val = extractvalue {<4 x i1>, <4 x i1>} %t, 0 + %obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1 + %res = sext <4 x i1> %obit to <4 x i32> + store <4 x i1> %val, <4 x i1>* %p2 + ret <4 x i32> %res +} + +define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind { +; SSE2-LABEL: umulo_v2i128: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r15 +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %r13 +; SSE2-NEXT: pushq %r12 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: movq %rdx, %r12 +; SSE2-NEXT: movq %rdi, %r11 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; SSE2-NEXT: testq %r10, %r10 +; SSE2-NEXT: setne %dl +; SSE2-NEXT: testq %rcx, %rcx +; SSE2-NEXT: setne %r13b +; SSE2-NEXT: andb %dl, %r13b +; SSE2-NEXT: mulq %r15 +; SSE2-NEXT: movq %rax, %rdi +; SSE2-NEXT: seto %bpl +; SSE2-NEXT: movq %r10, %rax +; SSE2-NEXT: mulq %r12 +; SSE2-NEXT: movq %rax, %rbx +; SSE2-NEXT: seto %cl +; SSE2-NEXT: orb %bpl, %cl +; SSE2-NEXT: addq %rdi, %rbx +; SSE2-NEXT: movq %r12, %rax +; SSE2-NEXT: mulq %r15 +; SSE2-NEXT: movq %rax, %r10 +; SSE2-NEXT: movq %rdx, %r15 +; SSE2-NEXT: addq %rbx, %r15 +; SSE2-NEXT: setb %al +; SSE2-NEXT: orb %cl, %al +; SSE2-NEXT: orb %r13b, %al +; SSE2-NEXT: movzbl %al, %ebp +; SSE2-NEXT: testq %r9, %r9 +; SSE2-NEXT: setne %al +; SSE2-NEXT: testq %rsi, %rsi +; SSE2-NEXT: setne %r13b +; SSE2-NEXT: andb %al, %r13b +; SSE2-NEXT: movq %rsi, %rax +; SSE2-NEXT: mulq %r8 +; SSE2-NEXT: movq %rax, %rsi +; SSE2-NEXT: seto %r12b +; SSE2-NEXT: movq %r9, %rax +; SSE2-NEXT: mulq %r11 +; SSE2-NEXT: movq %rax, %rdi +; SSE2-NEXT: seto %bl +; SSE2-NEXT: orb %r12b, %bl +; SSE2-NEXT: addq %rsi, %rdi +; SSE2-NEXT: movq %r11, %rax +; SSE2-NEXT: mulq %r8 +; SSE2-NEXT: addq %rdi, %rdx +; SSE2-NEXT: setb %cl +; SSE2-NEXT: orb %bl, %cl +; SSE2-NEXT: orb %r13b, %cl +; SSE2-NEXT: movzbl %cl, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: pinsrw $4, %ebp, %xmm0 +; SSE2-NEXT: movq %r10, 16(%r14) +; SSE2-NEXT: movq %rax, (%r14) +; SSE2-NEXT: movq %r15, 24(%r14) +; SSE2-NEXT: movq %rdx, 8(%r14) +; SSE2-NEXT: psllq $63, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r12 +; SSE2-NEXT: popq %r13 +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %r15 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; SSSE3-LABEL: umulo_v2i128: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pushq %rbp +; SSSE3-NEXT: pushq %r15 +; SSSE3-NEXT: pushq %r14 +; SSSE3-NEXT: pushq %r13 +; SSSE3-NEXT: pushq %r12 +; SSSE3-NEXT: pushq %rbx +; SSSE3-NEXT: movq %rcx, %rax +; SSSE3-NEXT: movq %rdx, %r12 +; SSSE3-NEXT: movq %rdi, %r11 +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; SSSE3-NEXT: testq %r10, %r10 +; SSSE3-NEXT: setne %dl +; SSSE3-NEXT: testq %rcx, %rcx +; SSSE3-NEXT: setne %r13b +; SSSE3-NEXT: andb %dl, %r13b +; SSSE3-NEXT: mulq %r15 +; SSSE3-NEXT: movq %rax, %rdi +; SSSE3-NEXT: seto %bpl +; SSSE3-NEXT: movq %r10, %rax +; SSSE3-NEXT: mulq %r12 +; SSSE3-NEXT: movq %rax, %rbx +; SSSE3-NEXT: seto %cl +; SSSE3-NEXT: orb %bpl, %cl +; SSSE3-NEXT: addq %rdi, %rbx +; SSSE3-NEXT: movq %r12, %rax +; SSSE3-NEXT: mulq %r15 +; SSSE3-NEXT: movq %rax, %r10 +; SSSE3-NEXT: movq %rdx, %r15 +; SSSE3-NEXT: addq %rbx, %r15 +; SSSE3-NEXT: setb %al +; SSSE3-NEXT: orb %cl, %al +; SSSE3-NEXT: orb %r13b, %al +; SSSE3-NEXT: movzbl %al, %ebp +; SSSE3-NEXT: testq %r9, %r9 +; SSSE3-NEXT: setne %al +; SSSE3-NEXT: testq %rsi, %rsi +; SSSE3-NEXT: setne %r13b +; SSSE3-NEXT: andb %al, %r13b +; SSSE3-NEXT: movq %rsi, %rax +; SSSE3-NEXT: mulq %r8 +; SSSE3-NEXT: movq %rax, %rsi +; SSSE3-NEXT: seto %r12b +; SSSE3-NEXT: movq %r9, %rax +; SSSE3-NEXT: mulq %r11 +; SSSE3-NEXT: movq %rax, %rdi +; SSSE3-NEXT: seto %bl +; SSSE3-NEXT: orb %r12b, %bl +; SSSE3-NEXT: addq %rsi, %rdi +; SSSE3-NEXT: movq %r11, %rax +; SSSE3-NEXT: mulq %r8 +; SSSE3-NEXT: addq %rdi, %rdx +; SSSE3-NEXT: setb %cl +; SSSE3-NEXT: orb %bl, %cl +; SSSE3-NEXT: orb %r13b, %cl +; SSSE3-NEXT: movzbl %cl, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: pinsrw $4, %ebp, %xmm0 +; SSSE3-NEXT: movq %r10, 16(%r14) +; SSSE3-NEXT: movq %rax, (%r14) +; SSSE3-NEXT: movq %r15, 24(%r14) +; SSSE3-NEXT: movq %rdx, 8(%r14) +; SSSE3-NEXT: psllq $63, %xmm0 +; SSSE3-NEXT: psrad $31, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSSE3-NEXT: popq %rbx +; SSSE3-NEXT: popq %r12 +; SSSE3-NEXT: popq %r13 +; SSSE3-NEXT: popq %r14 +; SSSE3-NEXT: popq %r15 +; SSSE3-NEXT: popq %rbp +; SSSE3-NEXT: retq +; +; SSE41-LABEL: umulo_v2i128: +; SSE41: # %bb.0: +; SSE41-NEXT: pushq %rbp +; SSE41-NEXT: pushq %r15 +; SSE41-NEXT: pushq %r14 +; SSE41-NEXT: pushq %r13 +; SSE41-NEXT: pushq %r12 +; SSE41-NEXT: pushq %rbx +; SSE41-NEXT: movq %rcx, %rax +; SSE41-NEXT: movq %rdx, %r12 +; SSE41-NEXT: movq %rdi, %r11 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; SSE41-NEXT: testq %r10, %r10 +; SSE41-NEXT: setne %dl +; SSE41-NEXT: testq %rcx, %rcx +; SSE41-NEXT: setne %r13b +; SSE41-NEXT: andb %dl, %r13b +; SSE41-NEXT: mulq %r15 +; SSE41-NEXT: movq %rax, %rdi +; SSE41-NEXT: seto %bpl +; SSE41-NEXT: movq %r10, %rax +; SSE41-NEXT: mulq %r12 +; SSE41-NEXT: movq %rax, %rbx +; SSE41-NEXT: seto %cl +; SSE41-NEXT: orb %bpl, %cl +; SSE41-NEXT: addq %rdi, %rbx +; SSE41-NEXT: movq %r12, %rax +; SSE41-NEXT: mulq %r15 +; SSE41-NEXT: movq %rax, %r10 +; SSE41-NEXT: movq %rdx, %r15 +; SSE41-NEXT: addq %rbx, %r15 +; SSE41-NEXT: setb %al +; SSE41-NEXT: orb %cl, %al +; SSE41-NEXT: orb %r13b, %al +; SSE41-NEXT: movzbl %al, %ebp +; SSE41-NEXT: testq %r9, %r9 +; SSE41-NEXT: setne %al +; SSE41-NEXT: testq %rsi, %rsi +; SSE41-NEXT: setne %r13b +; SSE41-NEXT: andb %al, %r13b +; SSE41-NEXT: movq %rsi, %rax +; SSE41-NEXT: mulq %r8 +; SSE41-NEXT: movq %rax, %rsi +; SSE41-NEXT: seto %r12b +; SSE41-NEXT: movq %r9, %rax +; SSE41-NEXT: mulq %r11 +; SSE41-NEXT: movq %rax, %rdi +; SSE41-NEXT: seto %bl +; SSE41-NEXT: orb %r12b, %bl +; SSE41-NEXT: addq %rsi, %rdi +; SSE41-NEXT: movq %r11, %rax +; SSE41-NEXT: mulq %r8 +; SSE41-NEXT: addq %rdi, %rdx +; SSE41-NEXT: setb %cl +; SSE41-NEXT: orb %bl, %cl +; SSE41-NEXT: orb %r13b, %cl +; SSE41-NEXT: movzbl %cl, %ecx +; SSE41-NEXT: movd %ecx, %xmm0 +; SSE41-NEXT: pinsrb $8, %ebp, %xmm0 +; SSE41-NEXT: movq %r10, 16(%r14) +; SSE41-NEXT: movq %rax, (%r14) +; SSE41-NEXT: movq %r15, 24(%r14) +; SSE41-NEXT: movq %rdx, 8(%r14) +; SSE41-NEXT: psllq $63, %xmm0 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE41-NEXT: popq %rbx +; SSE41-NEXT: popq %r12 +; SSE41-NEXT: popq %r13 +; SSE41-NEXT: popq %r14 +; SSE41-NEXT: popq %r15 +; SSE41-NEXT: popq %rbp +; SSE41-NEXT: retq +; +; AVX1-LABEL: umulo_v2i128: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: movq %rcx, %rax +; AVX1-NEXT: movq %rdx, %r12 +; AVX1-NEXT: movq %rdi, %r11 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX1-NEXT: testq %r10, %r10 +; AVX1-NEXT: setne %dl +; AVX1-NEXT: testq %rcx, %rcx +; AVX1-NEXT: setne %r13b +; AVX1-NEXT: andb %dl, %r13b +; AVX1-NEXT: mulq %r15 +; AVX1-NEXT: movq %rax, %rdi +; AVX1-NEXT: seto %bpl +; AVX1-NEXT: movq %r10, %rax +; AVX1-NEXT: mulq %r12 +; AVX1-NEXT: movq %rax, %rbx +; AVX1-NEXT: seto %cl +; AVX1-NEXT: orb %bpl, %cl +; AVX1-NEXT: addq %rdi, %rbx +; AVX1-NEXT: movq %r12, %rax +; AVX1-NEXT: mulq %r15 +; AVX1-NEXT: movq %rax, %r10 +; AVX1-NEXT: movq %rdx, %r15 +; AVX1-NEXT: addq %rbx, %r15 +; AVX1-NEXT: setb %al +; AVX1-NEXT: orb %cl, %al +; AVX1-NEXT: orb %r13b, %al +; AVX1-NEXT: movzbl %al, %ebp +; AVX1-NEXT: testq %r9, %r9 +; AVX1-NEXT: setne %al +; AVX1-NEXT: testq %rsi, %rsi +; AVX1-NEXT: setne %r13b +; AVX1-NEXT: andb %al, %r13b +; AVX1-NEXT: movq %rsi, %rax +; AVX1-NEXT: mulq %r8 +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: seto %r12b +; AVX1-NEXT: movq %r9, %rax +; AVX1-NEXT: mulq %r11 +; AVX1-NEXT: movq %rax, %rdi +; AVX1-NEXT: seto %cl +; AVX1-NEXT: orb %r12b, %cl +; AVX1-NEXT: addq %rsi, %rdi +; AVX1-NEXT: movq %r11, %rax +; AVX1-NEXT: mulq %r8 +; AVX1-NEXT: addq %rdi, %rdx +; AVX1-NEXT: setb %bl +; AVX1-NEXT: orb %cl, %bl +; AVX1-NEXT: orb %r13b, %bl +; AVX1-NEXT: movzbl %bl, %ecx +; AVX1-NEXT: vmovd %ecx, %xmm0 +; AVX1-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0 +; AVX1-NEXT: movq %r10, 16(%r14) +; AVX1-NEXT: movq %rax, (%r14) +; AVX1-NEXT: movq %r15, 24(%r14) +; AVX1-NEXT: movq %rdx, 8(%r14) +; AVX1-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: umulo_v2i128: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: movq %rcx, %rax +; AVX2-NEXT: movq %rdx, %r12 +; AVX2-NEXT: movq %rdi, %r11 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX2-NEXT: testq %r10, %r10 +; AVX2-NEXT: setne %dl +; AVX2-NEXT: testq %rcx, %rcx +; AVX2-NEXT: setne %r13b +; AVX2-NEXT: andb %dl, %r13b +; AVX2-NEXT: mulq %r15 +; AVX2-NEXT: movq %rax, %rdi +; AVX2-NEXT: seto %bpl +; AVX2-NEXT: movq %r10, %rax +; AVX2-NEXT: mulq %r12 +; AVX2-NEXT: movq %rax, %rbx +; AVX2-NEXT: seto %cl +; AVX2-NEXT: orb %bpl, %cl +; AVX2-NEXT: addq %rdi, %rbx +; AVX2-NEXT: movq %r12, %rax +; AVX2-NEXT: mulq %r15 +; AVX2-NEXT: movq %rax, %r10 +; AVX2-NEXT: movq %rdx, %r15 +; AVX2-NEXT: addq %rbx, %r15 +; AVX2-NEXT: setb %al +; AVX2-NEXT: orb %cl, %al +; AVX2-NEXT: orb %r13b, %al +; AVX2-NEXT: movzbl %al, %ebp +; AVX2-NEXT: testq %r9, %r9 +; AVX2-NEXT: setne %al +; AVX2-NEXT: testq %rsi, %rsi +; AVX2-NEXT: setne %r13b +; AVX2-NEXT: andb %al, %r13b +; AVX2-NEXT: movq %rsi, %rax +; AVX2-NEXT: mulq %r8 +; AVX2-NEXT: movq %rax, %rsi +; AVX2-NEXT: seto %r12b +; AVX2-NEXT: movq %r9, %rax +; AVX2-NEXT: mulq %r11 +; AVX2-NEXT: movq %rax, %rdi +; AVX2-NEXT: seto %cl +; AVX2-NEXT: orb %r12b, %cl +; AVX2-NEXT: addq %rsi, %rdi +; AVX2-NEXT: movq %r11, %rax +; AVX2-NEXT: mulq %r8 +; AVX2-NEXT: addq %rdi, %rdx +; AVX2-NEXT: setb %bl +; AVX2-NEXT: orb %cl, %bl +; AVX2-NEXT: orb %r13b, %bl +; AVX2-NEXT: movzbl %bl, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 +; AVX2-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0 +; AVX2-NEXT: movq %r10, 16(%r14) +; AVX2-NEXT: movq %rax, (%r14) +; AVX2-NEXT: movq %r15, 24(%r14) +; AVX2-NEXT: movq %rdx, 8(%r14) +; AVX2-NEXT: vpsllq $63, %xmm0, %xmm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: retq +; +; AVX512-LABEL: umulo_v2i128: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: movq %rcx, %rax +; AVX512-NEXT: movq %rdx, %r12 +; AVX512-NEXT: movq %rdi, %r11 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 +; AVX512-NEXT: testq %r10, %r10 +; AVX512-NEXT: setne %dl +; AVX512-NEXT: testq %rcx, %rcx +; AVX512-NEXT: setne %r13b +; AVX512-NEXT: andb %dl, %r13b +; AVX512-NEXT: mulq %r15 +; AVX512-NEXT: movq %rax, %rdi +; AVX512-NEXT: seto %bpl +; AVX512-NEXT: movq %r10, %rax +; AVX512-NEXT: mulq %r12 +; AVX512-NEXT: movq %rax, %rbx +; AVX512-NEXT: seto %cl +; AVX512-NEXT: orb %bpl, %cl +; AVX512-NEXT: addq %rdi, %rbx +; AVX512-NEXT: movq %r12, %rax +; AVX512-NEXT: mulq %r15 +; AVX512-NEXT: movq %rax, %r10 +; AVX512-NEXT: movq %rdx, %r15 +; AVX512-NEXT: addq %rbx, %r15 +; AVX512-NEXT: setb %al +; AVX512-NEXT: orb %cl, %al +; AVX512-NEXT: orb %r13b, %al +; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: testq %r9, %r9 +; AVX512-NEXT: setne %al +; AVX512-NEXT: testq %rsi, %rsi +; AVX512-NEXT: setne %cl +; AVX512-NEXT: andb %al, %cl +; AVX512-NEXT: movq %rsi, %rax +; AVX512-NEXT: mulq %r8 +; AVX512-NEXT: movq %rax, %rsi +; AVX512-NEXT: seto %bpl +; AVX512-NEXT: movq %r9, %rax +; AVX512-NEXT: mulq %r11 +; AVX512-NEXT: movq %rax, %rdi +; AVX512-NEXT: seto %bl +; AVX512-NEXT: orb %bpl, %bl +; AVX512-NEXT: addq %rsi, %rdi +; AVX512-NEXT: movq %r11, %rax +; AVX512-NEXT: mulq %r8 +; AVX512-NEXT: addq %rdi, %rdx +; AVX512-NEXT: setb %sil +; AVX512-NEXT: orb %bl, %sil +; AVX512-NEXT: orb %cl, %sil +; AVX512-NEXT: movb %sil, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: kmovw -{{[0-9]+}}(%rsp), %k1 +; AVX512-NEXT: movq %r10, 16(%r14) +; AVX512-NEXT: movq %rax, (%r14) +; AVX512-NEXT: movq %r15, 24(%r14) +; AVX512-NEXT: movq %rdx, 8(%r14) +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: retq + %t = call {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1) + %val = extractvalue {<2 x i128>, <2 x i1>} %t, 0 + %obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1 + %res = sext <2 x i1> %obit to <2 x i32> + store <2 x i128> %val, <2 x i128>* %p2 + ret <2 x i32> %res +}