Index: lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -602,7 +602,7 @@ SDLoc dl(N); SDValue Op1 = N->getOperand(0); SDValue Op2 = N->getOperand(1); - unsigned OldBits = Op1.getValueSizeInBits(); + unsigned OldBits = Op1.getScalarValueSizeInBits(); unsigned Opcode = N->getOpcode(); unsigned ShiftOp; @@ -624,7 +624,7 @@ SDValue Op2Promoted = GetPromotedInteger(Op2); EVT PromotedType = Op1Promoted.getValueType(); - unsigned NewBits = Op1Promoted.getValueSizeInBits(); + unsigned NewBits = PromotedType.getScalarSizeInBits(); unsigned SHLAmount = NewBits - OldBits; EVT SHVT = TLI.getShiftAmountTy(PromotedType, DAG.getDataLayout()); SDValue ShiftAmount = DAG.getConstant(SHLAmount, dl, SHVT); Index: lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -2460,6 +2460,10 @@ case ISD::SMAX: case ISD::UMIN: case ISD::UMAX: + case ISD::UADDSAT: + case ISD::SADDSAT: + case ISD::USUBSAT: + case ISD::SSUBSAT: Res = WidenVecRes_Binary(N); break; Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -226,10 +226,6 @@ SCALEF, SCALEFS, - // Integer add/sub with unsigned saturation. - ADDUS, - SUBUS, - // Integer add/sub with signed saturation. ADDS, SUBS, Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -829,6 +829,17 @@ setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom); } + setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal); + setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal); + setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal); + setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal); + // Use widening instead of promotion. + for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8, + MVT::v4i16, MVT::v2i16 }) { + setOperationAction(ISD::UADDSAT, VT, Custom); + setOperationAction(ISD::USUBSAT, VT, Custom); + } + setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); @@ -1200,6 +1211,11 @@ setOperationAction(ISD::SMIN, MVT::v4i64, Custom); setOperationAction(ISD::UMIN, MVT::v4i64, Custom); + setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); + setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); + setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); + setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); + for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom); setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom); @@ -1657,6 +1673,8 @@ setOperationAction(ISD::SMIN, VT, Legal); setOperationAction(ISD::UMIN, VT, Legal); setOperationAction(ISD::SETCC, VT, Custom); + setOperationAction(ISD::UADDSAT, VT, Legal); + setOperationAction(ISD::USUBSAT, VT, Legal); // The condition codes aren't legal in SSE/AVX and under AVX512 we use // setcc all the way to isel and prefer SETGT in some isel patterns. @@ -19169,7 +19187,7 @@ break; } - SDValue Result = DAG.getNode(X86ISD::SUBUS, dl, VT, Op0, Op1); + SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1); return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result, DAG.getConstant(0, dl, VT)); } @@ -23388,6 +23406,13 @@ return split256IntArith(Op, DAG); } +static SDValue LowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG) { + assert(Op.getSimpleValueType().is256BitVector() && + Op.getSimpleValueType().isInteger() && + "Only handle AVX 256-bit vector integer operation"); + return split256IntArith(Op, DAG); +} + static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getSimpleValueType(); if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) { @@ -26169,6 +26194,8 @@ case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); case ISD::ADD: case ISD::SUB: return LowerADD_SUB(Op, DAG); + case ISD::UADDSAT: + case ISD::USUBSAT: return LowerUADDSAT_USUBSAT(Op, DAG); case ISD::SMAX: case ISD::SMIN: case ISD::UMAX: @@ -26250,11 +26277,12 @@ } return; } + case ISD::UADDSAT: + case ISD::USUBSAT: case X86ISD::VPMADDWD: - case X86ISD::ADDUS: - case X86ISD::SUBUS: case X86ISD::AVG: { - // Legalize types for X86ISD::AVG/ADDUS/SUBUS/VPMADDWD by widening. + // Legalize types for ISD::UADDSAT/USUBSAT and X86ISD::AVG/VPMADDWD + // by widening. assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); EVT VT = N->getValueType(0); @@ -26988,8 +27016,6 @@ case X86ISD::ANDNP: return "X86ISD::ANDNP"; case X86ISD::BLENDI: return "X86ISD::BLENDI"; case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND"; - case X86ISD::ADDUS: return "X86ISD::ADDUS"; - case X86ISD::SUBUS: return "X86ISD::SUBUS"; case X86ISD::HADD: return "X86ISD::HADD"; case X86ISD::HSUB: return "X86ISD::HSUB"; case X86ISD::FHADD: return "X86ISD::FHADD"; @@ -34008,9 +34034,9 @@ SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); SDValue CondRHS = Cond->getOperand(1); - auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL, - ArrayRef Ops) { - return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops); + auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL, + ArrayRef Ops) { + return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops); }; // Look for a general sub with unsigned saturation first. @@ -34019,22 +34045,22 @@ if ((CC == ISD::SETUGE || CC == ISD::SETUGT) && Other->getOpcode() == ISD::SUB && OpRHS == CondRHS) return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS }, - SUBUSBuilder); + USUBSATBuilder); if (auto *OpRHSBV = dyn_cast(OpRHS)) { if (isa(CondRHS)) { // If the RHS is a constant we have to reverse the const // canonicalization. // x > C-1 ? x+-C : 0 --> subus x, C - auto MatchSUBUS = [](ConstantSDNode *Op, ConstantSDNode *Cond) { + auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) { return Cond->getAPIntValue() == (-Op->getAPIntValue() - 1); }; if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && - ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchSUBUS)) { + ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT)) { OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), OpRHS); return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS }, - SUBUSBuilder); + USUBSATBuilder); } // Another special case: If C was a sign bit, the sub has been @@ -34050,7 +34076,7 @@ // Note that we have to rebuild the RHS constant here to ensure we // don't rely on particular values of undef lanes. return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS }, - SUBUSBuilder); + USUBSATBuilder); } } } @@ -34083,9 +34109,9 @@ if (Other.getNode() && Other.getOpcode() == ISD::ADD) { SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1); - auto ADDUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL, - ArrayRef Ops) { - return DAG.getNode(X86ISD::ADDUS, DL, Ops[0].getValueType(), Ops); + auto UADDSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL, + ArrayRef Ops) { + return DAG.getNode(ISD::UADDSAT, DL, Ops[0].getValueType(), Ops); }; // Canonicalize condition operands. @@ -34100,20 +34126,20 @@ if (CC == ISD::SETULE && Other == CondRHS && (OpLHS == CondLHS || OpRHS == CondLHS)) return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS }, - ADDUSBuilder); + UADDSATBuilder); if (isa(OpRHS) && isa(CondRHS) && CondLHS == OpLHS) { // If the RHS is a constant we have to reverse the const // canonicalization. // x > ~C ? x+C : ~0 --> addus x, C - auto MatchADDUS = [](ConstantSDNode *Op, ConstantSDNode *Cond) { + auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) { return Cond->getAPIntValue() == ~Op->getAPIntValue(); }; if (CC == ISD::SETULE && - ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchADDUS)) + ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT)) return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS }, - ADDUSBuilder); + UADDSATBuilder); } } } @@ -40696,16 +40722,16 @@ } else return SDValue(); - auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL, - ArrayRef Ops) { - return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops); + auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL, + ArrayRef Ops) { + return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops); }; // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with // special preprocessing in some cases. if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64) return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, - { SubusLHS, SubusRHS }, SUBUSBuilder); + { SubusLHS, SubusRHS }, USUBSATBuilder); // Special preprocessing case can be only applied // if the value was zero extended from 16 bit, @@ -40737,7 +40763,7 @@ SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType); SDValue Psubus = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType, - { NewSubusLHS, NewSubusRHS }, SUBUSBuilder); + { NewSubusLHS, NewSubusRHS }, USUBSATBuilder); // Zero extend the result, it may be used somewhere as 32 bit, // if not zext and following trunc will shrink. return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType); Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -4834,9 +4834,9 @@ SchedWriteVecALU, HasBWI, 1>; defm VPSUBS : avx512_binop_rm_vl_bw<0xE8, 0xE9, "vpsubs", X86subs, SchedWriteVecALU, HasBWI, 0>; -defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", X86addus, +defm VPADDUS : avx512_binop_rm_vl_bw<0xDC, 0xDD, "vpaddus", uaddsat, SchedWriteVecALU, HasBWI, 1>; -defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", X86subus, +defm VPSUBUS : avx512_binop_rm_vl_bw<0xD8, 0xD9, "vpsubus", usubsat, SchedWriteVecALU, HasBWI, 0>; defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul, SchedWritePMULLD, HasAVX512, 1>, T8PD; Index: lib/Target/X86/X86InstrFragmentsSIMD.td =================================================================== --- lib/Target/X86/X86InstrFragmentsSIMD.td +++ lib/Target/X86/X86InstrFragmentsSIMD.td @@ -227,8 +227,6 @@ SDTCisVec<1>, SDTCisSameAs<2, 1>]>; -def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp, [SDNPCommutative]>; -def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>; def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp, [SDNPCommutative]>; def X86subs : SDNode<"X86ISD::SUBS", SDTIntBinOp>; def X86mulhrs : SDNode<"X86ISD::MULHRS", SDTIntBinOp, [SDNPCommutative]>; Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -3627,9 +3627,9 @@ SchedWriteVecALU, 1, NoVLX_Or_NoBWI>; defm PADDSW : PDI_binop_all<0xED, "paddsw", X86adds, v8i16, v16i16, SchedWriteVecALU, 1, NoVLX_Or_NoBWI>; -defm PADDUSB : PDI_binop_all<0xDC, "paddusb", X86addus, v16i8, v32i8, +defm PADDUSB : PDI_binop_all<0xDC, "paddusb", uaddsat, v16i8, v32i8, SchedWriteVecALU, 1, NoVLX_Or_NoBWI>; -defm PADDUSW : PDI_binop_all<0xDD, "paddusw", X86addus, v8i16, v16i16, +defm PADDUSW : PDI_binop_all<0xDD, "paddusw", uaddsat, v8i16, v16i16, SchedWriteVecALU, 1, NoVLX_Or_NoBWI>; defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16, SchedWriteVecIMul, 1, NoVLX_Or_NoBWI>; @@ -3649,9 +3649,9 @@ SchedWriteVecALU, 0, NoVLX_Or_NoBWI>; defm PSUBSW : PDI_binop_all<0xE9, "psubsw", X86subs, v8i16, v16i16, SchedWriteVecALU, 0, NoVLX_Or_NoBWI>; -defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8, +defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", usubsat, v16i8, v32i8, SchedWriteVecALU, 0, NoVLX_Or_NoBWI>; -defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16, +defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", usubsat, v8i16, v16i16, SchedWriteVecALU, 0, NoVLX_Or_NoBWI>; defm PMINUB : PDI_binop_all<0xDA, "pminub", umin, v16i8, v32i8, SchedWriteVecALU, 1, NoVLX_Or_NoBWI>; Index: test/CodeGen/X86/uadd_sat_vec.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/uadd_sat_vec.ll @@ -0,0 +1,1330 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512 + +declare <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8>, <1 x i8>) +declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) +declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) +declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>) +declare <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8>, <12 x i8>) +declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8>, <32 x i8>) +declare <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8>, <64 x i8>) + +declare <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16>, <1 x i16>) +declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) +declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) +declare <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16>, <12 x i16>) +declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>) +declare <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16>, <32 x i16>) + +declare <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1>, <16 x i1>) +declare <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4>, <16 x i4>) + +declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) +declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i24> @llvm.uadd.sat.v4i24(<4 x i24>, <4 x i24>) +declare <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128>, <2 x i128>) + +; Legal types, depending on architecture. + +define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) { +; SSE-LABEL: v16i8: +; SSE: # %bb.0: +; SSE-NEXT: paddusb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v16i8: +; AVX: # %bb.0: +; AVX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %x, <16 x i8> %y) + ret <16 x i8> %z +} + +define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) { +; SSE-LABEL: v32i8: +; SSE: # %bb.0: +; SSE-NEXT: paddusb %xmm2, %xmm0 +; SSE-NEXT: paddusb %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: v32i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddusb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v32i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %z = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %x, <32 x i8> %y) + ret <32 x i8> %z +} + +define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) { +; SSE-LABEL: v64i8: +; SSE: # %bb.0: +; SSE-NEXT: paddusb %xmm4, %xmm0 +; SSE-NEXT: paddusb %xmm5, %xmm1 +; SSE-NEXT: paddusb %xmm6, %xmm2 +; SSE-NEXT: paddusb %xmm7, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v64i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpaddusb %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpaddusb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddusb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpaddusb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v64i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpaddusb %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpaddusb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v64i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddusb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %z = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %x, <64 x i8> %y) + ret <64 x i8> %z +} + +define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) { +; SSE-LABEL: v8i16: +; SSE: # %bb.0: +; SSE-NEXT: paddusw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %x, <8 x i16> %y) + ret <8 x i16> %z +} + +define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) { +; SSE-LABEL: v16i16: +; SSE: # %bb.0: +; SSE-NEXT: paddusw %xmm2, %xmm0 +; SSE-NEXT: paddusw %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddusw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v16i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %z = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %x, <16 x i16> %y) + ret <16 x i16> %z +} + +define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) { +; SSE-LABEL: v32i16: +; SSE: # %bb.0: +; SSE-NEXT: paddusw %xmm4, %xmm0 +; SSE-NEXT: paddusw %xmm5, %xmm1 +; SSE-NEXT: paddusw %xmm6, %xmm2 +; SSE-NEXT: paddusw %xmm7, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v32i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpaddusw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpaddusw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpaddusw %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpaddusw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v32i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpaddusw %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpaddusw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v32i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpaddusw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %z = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %x, <32 x i16> %y) + ret <32 x i16> %z +} + +; Too narrow vectors, legalized by widening. + +define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) { +; SSE-LABEL: v8i8: +; SSE: # %bb.0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: paddusb %xmm0, %xmm1 +; SSE-NEXT: movq %xmm1, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v8i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v8i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v8i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX512-NEXT: vpmovwb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <8 x i8>, <8 x i8>* %px + %y = load <8 x i8>, <8 x i8>* %py + %z = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %x, <8 x i8> %y) + store <8 x i8> %z, <8 x i8>* %pz + ret void +} + +define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) { +; SSE-LABEL: v4i8: +; SSE: # %bb.0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: paddusb %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v4i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v4i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovd %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX512-NEXT: vpmovdb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <4 x i8>, <4 x i8>* %px + %y = load <4 x i8>, <4 x i8>* %py + %z = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %x, <4 x i8> %y) + store <4 x i8> %z, <4 x i8>* %pz + ret void +} + +define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) { +; SSE2-LABEL: v2i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movzwl (%rdi), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzwl (%rsi), %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: paddusb %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movw %ax, (%rdx) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v2i8: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movzwl (%rdi), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzwl (%rsi), %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: paddusb %xmm0, %xmm1 +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: movw %ax, (%rdx) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v2i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movzwl (%rdi), %eax +; SSE41-NEXT: movd %eax, %xmm0 +; SSE41-NEXT: movzwl (%rsi), %eax +; SSE41-NEXT: movd %eax, %xmm1 +; SSE41-NEXT: paddusb %xmm0, %xmm1 +; SSE41-NEXT: pextrw $0, %xmm1, (%rdx) +; SSE41-NEXT: retq +; +; AVX1-LABEL: v2i8: +; AVX1: # %bb.0: +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: movzwl (%rsi), %eax +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrw $0, %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i8: +; AVX2: # %bb.0: +; AVX2-NEXT: movzwl (%rdi), %eax +; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: movzwl (%rsi), %eax +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrw $0, %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i8: +; AVX512: # %bb.0: +; AVX512-NEXT: movzwl (%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: movzwl (%rsi), %eax +; AVX512-NEXT: vmovd %eax, %xmm1 +; AVX512-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vpmovqb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <2 x i8>, <2 x i8>* %px + %y = load <2 x i8>, <2 x i8>* %py + %z = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %x, <2 x i8> %y) + store <2 x i8> %z, <2 x i8>* %pz + ret void +} + +define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) { +; SSE-LABEL: v4i16: +; SSE: # %bb.0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: paddusw %xmm0, %xmm1 +; SSE-NEXT: movq %xmm1, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v4i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v4i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512-NEXT: vpmovdw %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <4 x i16>, <4 x i16>* %px + %y = load <4 x i16>, <4 x i16>* %py + %z = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %x, <4 x i16> %y) + store <4 x i16> %z, <4 x i16>* %pz + ret void +} + +define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) { +; SSE-LABEL: v2i16: +; SSE: # %bb.0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: paddusw %xmm0, %xmm1 +; SSE-NEXT: movd %xmm1, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v2i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovd %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512-NEXT: vpaddusw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; AVX512-NEXT: vpmovqw %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <2 x i16>, <2 x i16>* %px + %y = load <2 x i16>, <2 x i16>* %py + %z = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %x, <2 x i16> %y) + store <2 x i16> %z, <2 x i16>* %pz + ret void +} + +define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) { +; SSE-LABEL: v12i8: +; SSE: # %bb.0: +; SSE-NEXT: paddusb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v12i8: +; AVX: # %bb.0: +; AVX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <12 x i8> @llvm.uadd.sat.v12i8(<12 x i8> %x, <12 x i8> %y) + ret <12 x i8> %z +} + +define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) { +; SSE-LABEL: v12i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa (%rdi), %xmm0 +; SSE-NEXT: movdqa 16(%rdi), %xmm1 +; SSE-NEXT: paddusw (%rsi), %xmm0 +; SSE-NEXT: paddusw 16(%rsi), %xmm1 +; SSE-NEXT: movq %xmm1, 16(%rdx) +; SSE-NEXT: movdqa %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v12i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vpaddusw (%rsi), %xmm0, %xmm0 +; AVX1-NEXT: vpaddusw 16(%rsi), %xmm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, 16(%rdx) +; AVX1-NEXT: vmovdqa %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v12i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpaddusw (%rsi), %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vmovq %xmm1, 16(%rdx) +; AVX2-NEXT: vmovdqa %xmm0, (%rdx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v12i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpaddusw (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vmovq %xmm1, 16(%rdx) +; AVX512-NEXT: vmovdqa %xmm0, (%rdx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = load <12 x i16>, <12 x i16>* %px + %y = load <12 x i16>, <12 x i16>* %py + %z = call <12 x i16> @llvm.uadd.sat.v12i16(<12 x i16> %x, <12 x i16> %y) + store <12 x i16> %z, <12 x i16>* %pz + ret void +} + +; Scalarization + +define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) { +; SSE-LABEL: v1i8: +; SSE: # %bb.0: +; SSE-NEXT: movb (%rdi), %al +; SSE-NEXT: addb (%rsi), %al +; SSE-NEXT: movb $-1, %cl +; SSE-NEXT: jb .LBB13_2 +; SSE-NEXT: # %bb.1: +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: .LBB13_2: +; SSE-NEXT: movb %cl, (%rdx) +; SSE-NEXT: retq +; +; AVX-LABEL: v1i8: +; AVX: # %bb.0: +; AVX-NEXT: movb (%rdi), %al +; AVX-NEXT: addb (%rsi), %al +; AVX-NEXT: movb $-1, %cl +; AVX-NEXT: jb .LBB13_2 +; AVX-NEXT: # %bb.1: +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: .LBB13_2: +; AVX-NEXT: movb %cl, (%rdx) +; AVX-NEXT: retq + %x = load <1 x i8>, <1 x i8>* %px + %y = load <1 x i8>, <1 x i8>* %py + %z = call <1 x i8> @llvm.uadd.sat.v1i8(<1 x i8> %x, <1 x i8> %y) + store <1 x i8> %z, <1 x i8>* %pz + ret void +} + +define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) { +; SSE-LABEL: v1i16: +; SSE: # %bb.0: +; SSE-NEXT: movzwl (%rdi), %eax +; SSE-NEXT: addw (%rsi), %ax +; SSE-NEXT: movl $65535, %ecx # imm = 0xFFFF +; SSE-NEXT: cmovael %eax, %ecx +; SSE-NEXT: movw %cx, (%rdx) +; SSE-NEXT: retq +; +; AVX-LABEL: v1i16: +; AVX: # %bb.0: +; AVX-NEXT: movzwl (%rdi), %eax +; AVX-NEXT: addw (%rsi), %ax +; AVX-NEXT: movl $65535, %ecx # imm = 0xFFFF +; AVX-NEXT: cmovael %eax, %ecx +; AVX-NEXT: movw %cx, (%rdx) +; AVX-NEXT: retq + %x = load <1 x i16>, <1 x i16>* %px + %y = load <1 x i16>, <1 x i16>* %py + %z = call <1 x i16> @llvm.uadd.sat.v1i16(<1 x i16> %x, <1 x i16> %y) + store <1 x i16> %z, <1 x i16>* %pz + ret void +} + +; Promotion + +define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) { +; SSE-LABEL: v16i4: +; SSE: # %bb.0: +; SSE-NEXT: psllw $4, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: psllw $4, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: paddusb %xmm1, %xmm0 +; SSE-NEXT: psrlw $4, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v16i4: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <16 x i4> @llvm.uadd.sat.v16i4(<16 x i4> %x, <16 x i4> %y) + ret <16 x i4> %z +} + +define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) { +; SSE-LABEL: v16i1: +; SSE: # %bb.0: +; SSE-NEXT: psllw $7, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: psllw $7, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: paddusb %xmm1, %xmm0 +; SSE-NEXT: psrlw $7, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v16i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpaddusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v16i1: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: .cfi_def_cfa_offset 24 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: .cfi_def_cfa_offset 32 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: .cfi_def_cfa_offset 40 +; AVX512-NEXT: .cfi_offset %rbx, -40 +; AVX512-NEXT: .cfi_offset %r14, -32 +; AVX512-NEXT: .cfi_offset %r15, -24 +; AVX512-NEXT: .cfi_offset %rbp, -16 +; AVX512-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX512-NEXT: vpmovb2m %xmm0, %k0 +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm0 +; AVX512-NEXT: vpmovb2m %xmm0, %k1 +; AVX512-NEXT: kshiftrw $4, %k0, %k2 +; AVX512-NEXT: kshiftrw $4, %k1, %k3 +; AVX512-NEXT: kshiftrw $3, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %r15d +; AVX512-NEXT: kshiftrw $3, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %r9d +; AVX512-NEXT: kshiftrw $2, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %eax +; AVX512-NEXT: kshiftrw $2, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %ebp +; AVX512-NEXT: kmovd %k0, %ecx +; AVX512-NEXT: kmovd %k1, %esi +; AVX512-NEXT: kshiftrw $1, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %edi +; AVX512-NEXT: kshiftrw $1, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %edx +; AVX512-NEXT: shlb $7, %dl +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: addb %dl, %dil +; AVX512-NEXT: movb $-1, %r8b +; AVX512-NEXT: movb $-1, %bl +; AVX512-NEXT: jb .LBB16_2 +; AVX512-NEXT: # %bb.1: +; AVX512-NEXT: movl %edi, %ebx +; AVX512-NEXT: .LBB16_2: +; AVX512-NEXT: kshiftrw $5, %k0, %k4 +; AVX512-NEXT: kshiftrw $5, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %edi +; AVX512-NEXT: kmovd %k3, %r11d +; AVX512-NEXT: shrb $7, %bl +; AVX512-NEXT: kmovd %ebx, %k6 +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: addb %sil, %cl +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_4 +; AVX512-NEXT: # %bb.3: +; AVX512-NEXT: movl %ecx, %edx +; AVX512-NEXT: .LBB16_4: +; AVX512-NEXT: kshiftrw $6, %k0, %k2 +; AVX512-NEXT: kshiftrw $6, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r14d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kshiftrw $1, %k4, %k5 +; AVX512-NEXT: kxorw %k6, %k5, %k5 +; AVX512-NEXT: kshiftlw $15, %k5, %k5 +; AVX512-NEXT: kshiftrw $14, %k5, %k5 +; AVX512-NEXT: kxorw %k5, %k4, %k6 +; AVX512-NEXT: kshiftrw $2, %k6, %k7 +; AVX512-NEXT: shlb $7, %bpl +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: addb %bpl, %al +; AVX512-NEXT: movb $-1, %cl +; AVX512-NEXT: jb .LBB16_6 +; AVX512-NEXT: # %bb.5: +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: .LBB16_6: +; AVX512-NEXT: kshiftrw $7, %k0, %k4 +; AVX512-NEXT: kshiftrw $7, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %eax +; AVX512-NEXT: kmovd %k3, %r10d +; AVX512-NEXT: shrb $7, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $13, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $3, %k6, %k7 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %r15b +; AVX512-NEXT: addb %r9b, %r15b +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_8 +; AVX512-NEXT: # %bb.7: +; AVX512-NEXT: movl %r15d, %edx +; AVX512-NEXT: .LBB16_8: +; AVX512-NEXT: kshiftrw $8, %k0, %k2 +; AVX512-NEXT: kshiftrw $8, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %ecx +; AVX512-NEXT: kmovd %k5, %r9d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $12, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $4, %k6, %k7 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: addb %r11b, %dil +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_10 +; AVX512-NEXT: # %bb.9: +; AVX512-NEXT: movl %edi, %edx +; AVX512-NEXT: .LBB16_10: +; AVX512-NEXT: kshiftrw $9, %k0, %k4 +; AVX512-NEXT: kshiftrw $9, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %edi +; AVX512-NEXT: kmovd %k3, %ebx +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $11, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $5, %k6, %k7 +; AVX512-NEXT: shlb $7, %r14b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: addb %r14b, %sil +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_12 +; AVX512-NEXT: # %bb.11: +; AVX512-NEXT: movl %esi, %edx +; AVX512-NEXT: .LBB16_12: +; AVX512-NEXT: kshiftrw $10, %k0, %k2 +; AVX512-NEXT: kshiftrw $10, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r11d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $10, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $6, %k6, %k7 +; AVX512-NEXT: shlb $7, %r10b +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: addb %r10b, %al +; AVX512-NEXT: movb $-1, %bpl +; AVX512-NEXT: jb .LBB16_14 +; AVX512-NEXT: # %bb.13: +; AVX512-NEXT: movl %eax, %ebp +; AVX512-NEXT: .LBB16_14: +; AVX512-NEXT: kshiftrw $11, %k0, %k4 +; AVX512-NEXT: kshiftrw $11, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %r15d +; AVX512-NEXT: kmovd %k3, %r10d +; AVX512-NEXT: shrb $7, %bpl +; AVX512-NEXT: kmovd %ebp, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $9, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $7, %k6, %k7 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: addb %r9b, %cl +; AVX512-NEXT: movb $-1, %al +; AVX512-NEXT: jb .LBB16_16 +; AVX512-NEXT: # %bb.15: +; AVX512-NEXT: movl %ecx, %eax +; AVX512-NEXT: .LBB16_16: +; AVX512-NEXT: kshiftrw $12, %k0, %k2 +; AVX512-NEXT: kshiftrw $12, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %ecx +; AVX512-NEXT: kmovd %k5, %r9d +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $8, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $8, %k6, %k7 +; AVX512-NEXT: shlb $7, %bl +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: addb %bl, %dil +; AVX512-NEXT: movb $-1, %bl +; AVX512-NEXT: jb .LBB16_18 +; AVX512-NEXT: # %bb.17: +; AVX512-NEXT: movl %edi, %ebx +; AVX512-NEXT: .LBB16_18: +; AVX512-NEXT: kshiftrw $13, %k0, %k4 +; AVX512-NEXT: kshiftrw $13, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %eax +; AVX512-NEXT: kmovd %k3, %r14d +; AVX512-NEXT: shrb $7, %bl +; AVX512-NEXT: kmovd %ebx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $7, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $9, %k6, %k7 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: addb %r11b, %sil +; AVX512-NEXT: movb $-1, %dil +; AVX512-NEXT: jb .LBB16_20 +; AVX512-NEXT: # %bb.19: +; AVX512-NEXT: movl %esi, %edi +; AVX512-NEXT: .LBB16_20: +; AVX512-NEXT: kshiftrw $14, %k0, %k2 +; AVX512-NEXT: kshiftrw $14, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r11d +; AVX512-NEXT: shrb $7, %dil +; AVX512-NEXT: kmovd %edi, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $6, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k4 +; AVX512-NEXT: kshiftrw $10, %k4, %k5 +; AVX512-NEXT: shlb $7, %r10b +; AVX512-NEXT: shlb $7, %r15b +; AVX512-NEXT: addb %r10b, %r15b +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_22 +; AVX512-NEXT: # %bb.21: +; AVX512-NEXT: movl %r15d, %edx +; AVX512-NEXT: .LBB16_22: +; AVX512-NEXT: kshiftrw $15, %k0, %k0 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k2, %ebx +; AVX512-NEXT: kmovd %k3, %edi +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k5, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $5, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k4, %k2 +; AVX512-NEXT: kshiftrw $11, %k2, %k3 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: addb %r9b, %cl +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_24 +; AVX512-NEXT: # %bb.23: +; AVX512-NEXT: movl %ecx, %edx +; AVX512-NEXT: .LBB16_24: +; AVX512-NEXT: kmovd %k0, %ecx +; AVX512-NEXT: kmovd %k1, %ebp +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k0 +; AVX512-NEXT: kxorw %k0, %k3, %k0 +; AVX512-NEXT: kshiftlw $15, %k0, %k0 +; AVX512-NEXT: kshiftrw $4, %k0, %k0 +; AVX512-NEXT: kxorw %k0, %k2, %k0 +; AVX512-NEXT: kshiftrw $12, %k0, %k1 +; AVX512-NEXT: shlb $7, %r14b +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: addb %r14b, %al +; AVX512-NEXT: movb $-1, %dl +; AVX512-NEXT: jb .LBB16_26 +; AVX512-NEXT: # %bb.25: +; AVX512-NEXT: movl %eax, %edx +; AVX512-NEXT: .LBB16_26: +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $3, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $13, %k0, %k1 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: addb %r11b, %sil +; AVX512-NEXT: movb $-1, %al +; AVX512-NEXT: jb .LBB16_28 +; AVX512-NEXT: # %bb.27: +; AVX512-NEXT: movl %esi, %eax +; AVX512-NEXT: .LBB16_28: +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $2, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $14, %k0, %k1 +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: shlb $7, %bl +; AVX512-NEXT: addb %dil, %bl +; AVX512-NEXT: movb $-1, %al +; AVX512-NEXT: jb .LBB16_30 +; AVX512-NEXT: # %bb.29: +; AVX512-NEXT: movl %ebx, %eax +; AVX512-NEXT: .LBB16_30: +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $1, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftlw $1, %k0, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k0 +; AVX512-NEXT: shlb $7, %bpl +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: addb %bpl, %cl +; AVX512-NEXT: jb .LBB16_32 +; AVX512-NEXT: # %bb.31: +; AVX512-NEXT: movl %ecx, %r8d +; AVX512-NEXT: .LBB16_32: +; AVX512-NEXT: shrb $7, %r8b +; AVX512-NEXT: kmovd %r8d, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: vpmovm2b %k0, %xmm0 +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: .cfi_def_cfa_offset 32 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: .cfi_def_cfa_offset 24 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 8 +; AVX512-NEXT: retq + %z = call <16 x i1> @llvm.uadd.sat.v16i1(<16 x i1> %x, <16 x i1> %y) + ret <16 x i1> %z +} + +; Expanded + +define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) { +; SSE2-LABEL: v4i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: addl %eax, %ecx +; SSE2-NEXT: movl $-1, %eax +; SSE2-NEXT: cmovbl %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v4i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: addl %eax, %ecx +; SSSE3-NEXT: movl $-1, %eax +; SSSE3-NEXT: cmovbl %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v4i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pextrd $1, %xmm1, %eax +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: addl %eax, %ecx +; SSE41-NEXT: movl $-1, %eax +; SSE41-NEXT: cmovbl %eax, %ecx +; SSE41-NEXT: movd %xmm1, %edx +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: addl %edx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm1, %ecx +; SSE41-NEXT: pextrd $2, %xmm0, %edx +; SSE41-NEXT: addl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm1, %ecx +; SSE41-NEXT: pextrd $3, %xmm0, %edx +; SSE41-NEXT: addl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm1, %eax +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: movl $-1, %eax +; AVX-NEXT: cmovbl %eax, %ecx +; AVX-NEXT: vmovd %xmm1, %edx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: addl %edx, %esi +; AVX-NEXT: cmovbl %eax, %esi +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $2, %xmm1, %ecx +; AVX-NEXT: vpextrd $2, %xmm0, %edx +; AVX-NEXT: addl %ecx, %edx +; AVX-NEXT: cmovbl %eax, %edx +; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $3, %xmm1, %ecx +; AVX-NEXT: vpextrd $3, %xmm0, %edx +; AVX-NEXT: addl %ecx, %edx +; AVX-NEXT: cmovbl %eax, %edx +; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX-NEXT: retq + %z = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + ret <4 x i32> %z +} + +define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) { +; SSE2-LABEL: v2i32: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: addq %rax, %rcx +; SSE2-NEXT: movq $-1, %rax +; SSE2-NEXT: cmovbq %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %rcx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rdx +; SSE2-NEXT: addq %rcx, %rdx +; SSE2-NEXT: cmovbq %rax, %rdx +; SSE2-NEXT: movq %rdx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE2-NEXT: psrlq $32, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v2i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: psllq $32, %xmm1 +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: psllq $32, %xmm0 +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: addq %rax, %rcx +; SSSE3-NEXT: movq $-1, %rax +; SSSE3-NEXT: cmovbq %rax, %rcx +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %rcx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rdx +; SSSE3-NEXT: addq %rcx, %rdx +; SSSE3-NEXT: cmovbq %rax, %rdx +; SSSE3-NEXT: movq %rdx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSSE3-NEXT: psrlq $32, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: pextrq $1, %xmm1, %rax +; SSE41-NEXT: psllq $32, %xmm0 +; SSE41-NEXT: pextrq $1, %xmm0, %rcx +; SSE41-NEXT: addq %rax, %rcx +; SSE41-NEXT: movq $-1, %rax +; SSE41-NEXT: cmovbq %rax, %rcx +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: movq %xmm1, %rcx +; SSE41-NEXT: movq %xmm0, %rdx +; SSE41-NEXT: addq %rcx, %rdx +; SSE41-NEXT: cmovbq %rax, %rdx +; SSE41-NEXT: movq %rdx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE41-NEXT: psrlq $32, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v2i32: +; AVX: # %bb.0: +; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX-NEXT: vpextrq $1, %xmm1, %rax +; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX-NEXT: vpextrq $1, %xmm0, %rcx +; AVX-NEXT: addq %rax, %rcx +; AVX-NEXT: movq $-1, %rax +; AVX-NEXT: cmovbq %rax, %rcx +; AVX-NEXT: vmovq %rcx, %xmm2 +; AVX-NEXT: vmovq %xmm1, %rcx +; AVX-NEXT: vmovq %xmm0, %rdx +; AVX-NEXT: addq %rcx, %rdx +; AVX-NEXT: cmovbq %rax, %rdx +; AVX-NEXT: vmovq %rdx, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %x, <2 x i32> %y) + ret <2 x i32> %z +} + +define <4 x i24> @v4i24(<4 x i24> %x, <4 x i24> %y) { +; SSE2-LABEL: v4i24: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $8, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pslld $8, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: addl %eax, %ecx +; SSE2-NEXT: movl $-1, %eax +; SSE2-NEXT: cmovbl %eax, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %ecx +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %edx +; SSE2-NEXT: addl %ecx, %edx +; SSE2-NEXT: cmovbl %eax, %edx +; SSE2-NEXT: movd %edx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: psrld $8, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v4i24: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pslld $8, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pslld $8, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: addl %eax, %ecx +; SSSE3-NEXT: movl $-1, %eax +; SSSE3-NEXT: cmovbl %eax, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %ecx +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm0, %edx +; SSSE3-NEXT: addl %ecx, %edx +; SSSE3-NEXT: cmovbl %eax, %edx +; SSSE3-NEXT: movd %edx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSSE3-NEXT: psrld $8, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v4i24: +; SSE41: # %bb.0: +; SSE41-NEXT: pslld $8, %xmm1 +; SSE41-NEXT: pextrd $1, %xmm1, %eax +; SSE41-NEXT: pslld $8, %xmm0 +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: addl %eax, %ecx +; SSE41-NEXT: movl $-1, %eax +; SSE41-NEXT: cmovbl %eax, %ecx +; SSE41-NEXT: movd %xmm1, %edx +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: addl %edx, %esi +; SSE41-NEXT: cmovbl %eax, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm1, %ecx +; SSE41-NEXT: pextrd $2, %xmm0, %edx +; SSE41-NEXT: addl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $2, %edx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm1, %ecx +; SSE41-NEXT: pextrd $3, %xmm0, %edx +; SSE41-NEXT: addl %ecx, %edx +; SSE41-NEXT: cmovbl %eax, %edx +; SSE41-NEXT: pinsrd $3, %edx, %xmm2 +; SSE41-NEXT: psrld $8, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v4i24: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $8, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $1, %xmm1, %eax +; AVX-NEXT: vpslld $8, %xmm0, %xmm0 +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: movl $-1, %eax +; AVX-NEXT: cmovbl %eax, %ecx +; AVX-NEXT: vmovd %xmm1, %edx +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: addl %edx, %esi +; AVX-NEXT: cmovbl %eax, %esi +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $2, %xmm1, %ecx +; AVX-NEXT: vpextrd $2, %xmm0, %edx +; AVX-NEXT: addl %ecx, %edx +; AVX-NEXT: cmovbl %eax, %edx +; AVX-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $3, %xmm1, %ecx +; AVX-NEXT: vpextrd $3, %xmm0, %edx +; AVX-NEXT: addl %ecx, %edx +; AVX-NEXT: cmovbl %eax, %edx +; AVX-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; AVX-NEXT: vpsrld $8, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <4 x i24> @llvm.uadd.sat.v4i24(<4 x i24> %x, <4 x i24> %y) + ret <4 x i24> %z +} + +define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) { +; SSE-LABEL: v2i128: +; SSE: # %bb.0: +; SSE-NEXT: movq %rdi, %rax +; SSE-NEXT: addq %r9, %rsi +; SSE-NEXT: adcq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: movq $-1, %rdi +; SSE-NEXT: cmovbq %rdi, %rsi +; SSE-NEXT: cmovbq %rdi, %rdx +; SSE-NEXT: addq {{[0-9]+}}(%rsp), %rcx +; SSE-NEXT: adcq {{[0-9]+}}(%rsp), %r8 +; SSE-NEXT: cmovbq %rdi, %r8 +; SSE-NEXT: cmovbq %rdi, %rcx +; SSE-NEXT: movq %r8, 24(%rax) +; SSE-NEXT: movq %rcx, 16(%rax) +; SSE-NEXT: movq %rdx, 8(%rax) +; SSE-NEXT: movq %rsi, (%rax) +; SSE-NEXT: retq +; +; AVX-LABEL: v2i128: +; AVX: # %bb.0: +; AVX-NEXT: movq %rdi, %rax +; AVX-NEXT: addq %r9, %rsi +; AVX-NEXT: adcq {{[0-9]+}}(%rsp), %rdx +; AVX-NEXT: movq $-1, %rdi +; AVX-NEXT: cmovbq %rdi, %rsi +; AVX-NEXT: cmovbq %rdi, %rdx +; AVX-NEXT: addq {{[0-9]+}}(%rsp), %rcx +; AVX-NEXT: adcq {{[0-9]+}}(%rsp), %r8 +; AVX-NEXT: cmovbq %rdi, %r8 +; AVX-NEXT: cmovbq %rdi, %rcx +; AVX-NEXT: movq %r8, 24(%rax) +; AVX-NEXT: movq %rcx, 16(%rax) +; AVX-NEXT: movq %rdx, 8(%rax) +; AVX-NEXT: movq %rsi, (%rax) +; AVX-NEXT: retq + %z = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %x, <2 x i128> %y) + ret <2 x i128> %z +} Index: test/CodeGen/X86/usub_sat_vec.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/usub_sat_vec.ll @@ -0,0 +1,1328 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512 + +declare <1 x i8> @llvm.usub.sat.v1i8(<1 x i8>, <1 x i8>) +declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>) +declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) +declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>) +declare <12 x i8> @llvm.usub.sat.v12i8(<12 x i8>, <12 x i8>) +declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.usub.sat.v32i8(<32 x i8>, <32 x i8>) +declare <64 x i8> @llvm.usub.sat.v64i8(<64 x i8>, <64 x i8>) + +declare <1 x i16> @llvm.usub.sat.v1i16(<1 x i16>, <1 x i16>) +declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) +declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) +declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) +declare <12 x i16> @llvm.usub.sat.v12i16(<12 x i16>, <12 x i16>) +declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>) +declare <32 x i16> @llvm.usub.sat.v32i16(<32 x i16>, <32 x i16>) + +declare <16 x i1> @llvm.usub.sat.v16i1(<16 x i1>, <16 x i1>) +declare <16 x i4> @llvm.usub.sat.v16i4(<16 x i4>, <16 x i4>) + +declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) +declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>) +declare <4 x i24> @llvm.usub.sat.v4i24(<4 x i24>, <4 x i24>) +declare <2 x i128> @llvm.usub.sat.v2i128(<2 x i128>, <2 x i128>) + +; Legal types, depending on architecture. + +define <16 x i8> @v16i8(<16 x i8> %x, <16 x i8> %y) { +; SSE-LABEL: v16i8: +; SSE: # %bb.0: +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v16i8: +; AVX: # %bb.0: +; AVX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %x, <16 x i8> %y) + ret <16 x i8> %z +} + +define <32 x i8> @v32i8(<32 x i8> %x, <32 x i8> %y) { +; SSE-LABEL: v32i8: +; SSE: # %bb.0: +; SSE-NEXT: psubusb %xmm2, %xmm0 +; SSE-NEXT: psubusb %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: v32i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpsubusb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v32i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v32i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %z = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %x, <32 x i8> %y) + ret <32 x i8> %z +} + +define <64 x i8> @v64i8(<64 x i8> %x, <64 x i8> %y) { +; SSE-LABEL: v64i8: +; SSE: # %bb.0: +; SSE-NEXT: psubusb %xmm4, %xmm0 +; SSE-NEXT: psubusb %xmm5, %xmm1 +; SSE-NEXT: psubusb %xmm6, %xmm2 +; SSE-NEXT: psubusb %xmm7, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v64i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpsubusb %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpsubusb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpsubusb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsubusb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v64i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsubusb %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsubusb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v64i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubusb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %z = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %x, <64 x i8> %y) + ret <64 x i8> %z +} + +define <8 x i16> @v8i16(<8 x i16> %x, <8 x i16> %y) { +; SSE-LABEL: v8i16: +; SSE: # %bb.0: +; SSE-NEXT: psubusw %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %x, <8 x i16> %y) + ret <8 x i16> %z +} + +define <16 x i16> @v16i16(<16 x i16> %x, <16 x i16> %y) { +; SSE-LABEL: v16i16: +; SSE: # %bb.0: +; SSE-NEXT: psubusw %xmm2, %xmm0 +; SSE-NEXT: psubusw %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpsubusw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v16i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v16i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: retq + %z = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %x, <16 x i16> %y) + ret <16 x i16> %z +} + +define <32 x i16> @v32i16(<32 x i16> %x, <32 x i16> %y) { +; SSE-LABEL: v32i16: +; SSE: # %bb.0: +; SSE-NEXT: psubusw %xmm4, %xmm0 +; SSE-NEXT: psubusw %xmm5, %xmm1 +; SSE-NEXT: psubusw %xmm6, %xmm2 +; SSE-NEXT: psubusw %xmm7, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v32i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpsubusw %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpsubusw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 +; AVX1-NEXT: vpsubusw %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpsubusw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v32i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsubusw %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpsubusw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v32i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsubusw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: retq + %z = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %x, <32 x i16> %y) + ret <32 x i16> %z +} + +; Too narrow vectors, legalized by widening. + +define void @v8i8(<8 x i8>* %px, <8 x i8>* %py, <8 x i8>* %pz) { +; SSE-LABEL: v8i8: +; SSE: # %bb.0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: movq %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v8i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v8i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v8i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX512-NEXT: vpmovwb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <8 x i8>, <8 x i8>* %px + %y = load <8 x i8>, <8 x i8>* %py + %z = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %x, <8 x i8> %y) + store <8 x i8> %z, <8 x i8>* %pz + ret void +} + +define void @v4i8(<4 x i8>* %px, <4 x i8>* %py, <4 x i8>* %pz) { +; SSE-LABEL: v4i8: +; SSE: # %bb.0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: movd %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v4i8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v4i8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovd %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX512-NEXT: vpmovdb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <4 x i8>, <4 x i8>* %px + %y = load <4 x i8>, <4 x i8>* %py + %z = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %x, <4 x i8> %y) + store <4 x i8> %z, <4 x i8>* %pz + ret void +} + +define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) { +; SSE2-LABEL: v2i8: +; SSE2: # %bb.0: +; SSE2-NEXT: movzwl (%rdi), %eax +; SSE2-NEXT: movd %eax, %xmm0 +; SSE2-NEXT: movzwl (%rsi), %eax +; SSE2-NEXT: movd %eax, %xmm1 +; SSE2-NEXT: psubusb %xmm1, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: movw %ax, (%rdx) +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v2i8: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movzwl (%rdi), %eax +; SSSE3-NEXT: movd %eax, %xmm0 +; SSSE3-NEXT: movzwl (%rsi), %eax +; SSSE3-NEXT: movd %eax, %xmm1 +; SSSE3-NEXT: psubusb %xmm1, %xmm0 +; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: movw %ax, (%rdx) +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v2i8: +; SSE41: # %bb.0: +; SSE41-NEXT: movzwl (%rdi), %eax +; SSE41-NEXT: movd %eax, %xmm0 +; SSE41-NEXT: movzwl (%rsi), %eax +; SSE41-NEXT: movd %eax, %xmm1 +; SSE41-NEXT: psubusb %xmm1, %xmm0 +; SSE41-NEXT: pextrw $0, %xmm0, (%rdx) +; SSE41-NEXT: retq +; +; AVX1-LABEL: v2i8: +; AVX1: # %bb.0: +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: movzwl (%rsi), %eax +; AVX1-NEXT: vmovd %eax, %xmm1 +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpextrw $0, %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i8: +; AVX2: # %bb.0: +; AVX2-NEXT: movzwl (%rdi), %eax +; AVX2-NEXT: vmovd %eax, %xmm0 +; AVX2-NEXT: movzwl (%rsi), %eax +; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpextrw $0, %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i8: +; AVX512: # %bb.0: +; AVX512-NEXT: movzwl (%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: movzwl (%rsi), %eax +; AVX512-NEXT: vmovd %eax, %xmm1 +; AVX512-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; AVX512-NEXT: vpmovqb %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <2 x i8>, <2 x i8>* %px + %y = load <2 x i8>, <2 x i8>* %py + %z = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %x, <2 x i8> %y) + store <2 x i8> %z, <2 x i8>* %pz + ret void +} + +define void @v4i16(<4 x i16>* %px, <4 x i16>* %py, <4 x i16>* %pz) { +; SSE-LABEL: v4i16: +; SSE: # %bb.0: +; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: psubusw %xmm1, %xmm0 +; SSE-NEXT: movq %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v4i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v4i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v4i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512-NEXT: vpmovdw %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <4 x i16>, <4 x i16>* %px + %y = load <4 x i16>, <4 x i16>* %py + %z = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %x, <4 x i16> %y) + store <4 x i16> %z, <4 x i16>* %pz + ret void +} + +define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) { +; SSE-LABEL: v2i16: +; SSE: # %bb.0: +; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: psubusw %xmm1, %xmm0 +; SSE-NEXT: movd %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v2i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX1-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovd %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v2i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX2-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vmovd %xmm0, (%rdx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: v2i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX512-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX512-NEXT: vpsubusw %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; AVX512-NEXT: vpmovqw %xmm0, (%rdx) +; AVX512-NEXT: retq + %x = load <2 x i16>, <2 x i16>* %px + %y = load <2 x i16>, <2 x i16>* %py + %z = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %x, <2 x i16> %y) + store <2 x i16> %z, <2 x i16>* %pz + ret void +} + +define <12 x i8> @v12i8(<12 x i8> %x, <12 x i8> %y) { +; SSE-LABEL: v12i8: +; SSE: # %bb.0: +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v12i8: +; AVX: # %bb.0: +; AVX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <12 x i8> @llvm.usub.sat.v12i8(<12 x i8> %x, <12 x i8> %y) + ret <12 x i8> %z +} + +define void @v12i16(<12 x i16>* %px, <12 x i16>* %py, <12 x i16>* %pz) { +; SSE-LABEL: v12i16: +; SSE: # %bb.0: +; SSE-NEXT: movdqa (%rdi), %xmm0 +; SSE-NEXT: movdqa 16(%rdi), %xmm1 +; SSE-NEXT: psubusw (%rsi), %xmm0 +; SSE-NEXT: psubusw 16(%rsi), %xmm1 +; SSE-NEXT: movq %xmm1, 16(%rdx) +; SSE-NEXT: movdqa %xmm0, (%rdx) +; SSE-NEXT: retq +; +; AVX1-LABEL: v12i16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vpsubusw (%rsi), %xmm0, %xmm0 +; AVX1-NEXT: vpsubusw 16(%rsi), %xmm1, %xmm1 +; AVX1-NEXT: vmovq %xmm1, 16(%rdx) +; AVX1-NEXT: vmovdqa %xmm0, (%rdx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: v12i16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %ymm0 +; AVX2-NEXT: vpsubusw (%rsi), %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vmovq %xmm1, 16(%rdx) +; AVX2-NEXT: vmovdqa %xmm0, (%rdx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: v12i16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vpsubusw (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vmovq %xmm1, 16(%rdx) +; AVX512-NEXT: vmovdqa %xmm0, (%rdx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %x = load <12 x i16>, <12 x i16>* %px + %y = load <12 x i16>, <12 x i16>* %py + %z = call <12 x i16> @llvm.usub.sat.v12i16(<12 x i16> %x, <12 x i16> %y) + store <12 x i16> %z, <12 x i16>* %pz + ret void +} + +; Scalarization + +define void @v1i8(<1 x i8>* %px, <1 x i8>* %py, <1 x i8>* %pz) { +; SSE-LABEL: v1i8: +; SSE: # %bb.0: +; SSE-NEXT: movb (%rdi), %al +; SSE-NEXT: subb (%rsi), %al +; SSE-NEXT: jae .LBB13_2 +; SSE-NEXT: # %bb.1: +; SSE-NEXT: xorl %eax, %eax +; SSE-NEXT: .LBB13_2: +; SSE-NEXT: movb %al, (%rdx) +; SSE-NEXT: retq +; +; AVX-LABEL: v1i8: +; AVX: # %bb.0: +; AVX-NEXT: movb (%rdi), %al +; AVX-NEXT: subb (%rsi), %al +; AVX-NEXT: jae .LBB13_2 +; AVX-NEXT: # %bb.1: +; AVX-NEXT: xorl %eax, %eax +; AVX-NEXT: .LBB13_2: +; AVX-NEXT: movb %al, (%rdx) +; AVX-NEXT: retq + %x = load <1 x i8>, <1 x i8>* %px + %y = load <1 x i8>, <1 x i8>* %py + %z = call <1 x i8> @llvm.usub.sat.v1i8(<1 x i8> %x, <1 x i8> %y) + store <1 x i8> %z, <1 x i8>* %pz + ret void +} + +define void @v1i16(<1 x i16>* %px, <1 x i16>* %py, <1 x i16>* %pz) { +; SSE-LABEL: v1i16: +; SSE: # %bb.0: +; SSE-NEXT: movzwl (%rdi), %eax +; SSE-NEXT: xorl %ecx, %ecx +; SSE-NEXT: subw (%rsi), %ax +; SSE-NEXT: cmovbl %ecx, %eax +; SSE-NEXT: movw %ax, (%rdx) +; SSE-NEXT: retq +; +; AVX-LABEL: v1i16: +; AVX: # %bb.0: +; AVX-NEXT: movzwl (%rdi), %eax +; AVX-NEXT: xorl %ecx, %ecx +; AVX-NEXT: subw (%rsi), %ax +; AVX-NEXT: cmovbl %ecx, %eax +; AVX-NEXT: movw %ax, (%rdx) +; AVX-NEXT: retq + %x = load <1 x i16>, <1 x i16>* %px + %y = load <1 x i16>, <1 x i16>* %py + %z = call <1 x i16> @llvm.usub.sat.v1i16(<1 x i16> %x, <1 x i16> %y) + store <1 x i16> %z, <1 x i16>* %pz + ret void +} + +; Promotion + +define <16 x i4> @v16i4(<16 x i4> %x, <16 x i4> %y) { +; SSE-LABEL: v16i4: +; SSE: # %bb.0: +; SSE-NEXT: psllw $4, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: psllw $4, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: psrlw $4, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v16i4: +; AVX: # %bb.0: +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <16 x i4> @llvm.usub.sat.v16i4(<16 x i4> %x, <16 x i4> %y) + ret <16 x i4> %z +} + +define <16 x i1> @v16i1(<16 x i1> %x, <16 x i1> %y) { +; SSE-LABEL: v16i1: +; SSE: # %bb.0: +; SSE-NEXT: psllw $7, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; SSE-NEXT: pand %xmm2, %xmm1 +; SSE-NEXT: psllw $7, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: psubusb %xmm1, %xmm0 +; SSE-NEXT: psrlw $7, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16i1: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: v16i1: +; AVX2: # %bb.0: +; AVX2-NEXT: vpsllw $7, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] +; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpsubusb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: v16i1: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: .cfi_def_cfa_offset 24 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: .cfi_def_cfa_offset 32 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: .cfi_def_cfa_offset 40 +; AVX512-NEXT: .cfi_offset %rbx, -40 +; AVX512-NEXT: .cfi_offset %r14, -32 +; AVX512-NEXT: .cfi_offset %r15, -24 +; AVX512-NEXT: .cfi_offset %rbp, -16 +; AVX512-NEXT: vpsllw $7, %xmm0, %xmm0 +; AVX512-NEXT: vpmovb2m %xmm0, %k0 +; AVX512-NEXT: vpsllw $7, %xmm1, %xmm0 +; AVX512-NEXT: vpmovb2m %xmm0, %k1 +; AVX512-NEXT: kshiftrw $4, %k0, %k2 +; AVX512-NEXT: kshiftrw $4, %k1, %k3 +; AVX512-NEXT: kshiftrw $3, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %r15d +; AVX512-NEXT: kshiftrw $3, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %r9d +; AVX512-NEXT: kshiftrw $2, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %eax +; AVX512-NEXT: kshiftrw $2, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %ebp +; AVX512-NEXT: kmovd %k0, %ecx +; AVX512-NEXT: kmovd %k1, %esi +; AVX512-NEXT: kshiftrw $1, %k0, %k4 +; AVX512-NEXT: kmovd %k4, %edi +; AVX512-NEXT: kshiftrw $1, %k1, %k4 +; AVX512-NEXT: kmovd %k4, %edx +; AVX512-NEXT: shlb $7, %dl +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: xorl %r8d, %r8d +; AVX512-NEXT: subb %dl, %dil +; AVX512-NEXT: movl $0, %ebx +; AVX512-NEXT: jb .LBB16_2 +; AVX512-NEXT: # %bb.1: +; AVX512-NEXT: movl %edi, %ebx +; AVX512-NEXT: .LBB16_2: +; AVX512-NEXT: kshiftrw $5, %k0, %k4 +; AVX512-NEXT: kshiftrw $5, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %edi +; AVX512-NEXT: kmovd %k3, %r11d +; AVX512-NEXT: shrb $7, %bl +; AVX512-NEXT: kmovd %ebx, %k6 +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: subb %sil, %cl +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_4 +; AVX512-NEXT: # %bb.3: +; AVX512-NEXT: movl %ecx, %edx +; AVX512-NEXT: .LBB16_4: +; AVX512-NEXT: kshiftrw $6, %k0, %k2 +; AVX512-NEXT: kshiftrw $6, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r14d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kshiftrw $1, %k4, %k5 +; AVX512-NEXT: kxorw %k6, %k5, %k5 +; AVX512-NEXT: kshiftlw $15, %k5, %k5 +; AVX512-NEXT: kshiftrw $14, %k5, %k5 +; AVX512-NEXT: kxorw %k5, %k4, %k6 +; AVX512-NEXT: kshiftrw $2, %k6, %k7 +; AVX512-NEXT: shlb $7, %bpl +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: subb %bpl, %al +; AVX512-NEXT: movl $0, %ecx +; AVX512-NEXT: jb .LBB16_6 +; AVX512-NEXT: # %bb.5: +; AVX512-NEXT: movl %eax, %ecx +; AVX512-NEXT: .LBB16_6: +; AVX512-NEXT: kshiftrw $7, %k0, %k4 +; AVX512-NEXT: kshiftrw $7, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %eax +; AVX512-NEXT: kmovd %k3, %r10d +; AVX512-NEXT: shrb $7, %cl +; AVX512-NEXT: kmovd %ecx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $13, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $3, %k6, %k7 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %r15b +; AVX512-NEXT: subb %r9b, %r15b +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_8 +; AVX512-NEXT: # %bb.7: +; AVX512-NEXT: movl %r15d, %edx +; AVX512-NEXT: .LBB16_8: +; AVX512-NEXT: kshiftrw $8, %k0, %k2 +; AVX512-NEXT: kshiftrw $8, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %ecx +; AVX512-NEXT: kmovd %k5, %r9d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $12, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $4, %k6, %k7 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: subb %r11b, %dil +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_10 +; AVX512-NEXT: # %bb.9: +; AVX512-NEXT: movl %edi, %edx +; AVX512-NEXT: .LBB16_10: +; AVX512-NEXT: kshiftrw $9, %k0, %k4 +; AVX512-NEXT: kshiftrw $9, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %edi +; AVX512-NEXT: kmovd %k3, %ebx +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $11, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $5, %k6, %k7 +; AVX512-NEXT: shlb $7, %r14b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: subb %r14b, %sil +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_12 +; AVX512-NEXT: # %bb.11: +; AVX512-NEXT: movl %esi, %edx +; AVX512-NEXT: .LBB16_12: +; AVX512-NEXT: kshiftrw $10, %k0, %k2 +; AVX512-NEXT: kshiftrw $10, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r11d +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $10, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $6, %k6, %k7 +; AVX512-NEXT: shlb $7, %r10b +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: subb %r10b, %al +; AVX512-NEXT: movl $0, %ebp +; AVX512-NEXT: jb .LBB16_14 +; AVX512-NEXT: # %bb.13: +; AVX512-NEXT: movl %eax, %ebp +; AVX512-NEXT: .LBB16_14: +; AVX512-NEXT: kshiftrw $11, %k0, %k4 +; AVX512-NEXT: kshiftrw $11, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %r15d +; AVX512-NEXT: kmovd %k3, %r10d +; AVX512-NEXT: shrb $7, %bpl +; AVX512-NEXT: kmovd %ebp, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $9, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $7, %k6, %k7 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: subb %r9b, %cl +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: jb .LBB16_16 +; AVX512-NEXT: # %bb.15: +; AVX512-NEXT: movl %ecx, %eax +; AVX512-NEXT: .LBB16_16: +; AVX512-NEXT: kshiftrw $12, %k0, %k2 +; AVX512-NEXT: kshiftrw $12, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %ecx +; AVX512-NEXT: kmovd %k5, %r9d +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $8, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k6 +; AVX512-NEXT: kshiftrw $8, %k6, %k7 +; AVX512-NEXT: shlb $7, %bl +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: subb %bl, %dil +; AVX512-NEXT: movl $0, %ebx +; AVX512-NEXT: jb .LBB16_18 +; AVX512-NEXT: # %bb.17: +; AVX512-NEXT: movl %edi, %ebx +; AVX512-NEXT: .LBB16_18: +; AVX512-NEXT: kshiftrw $13, %k0, %k4 +; AVX512-NEXT: kshiftrw $13, %k1, %k5 +; AVX512-NEXT: kmovd %k2, %eax +; AVX512-NEXT: kmovd %k3, %r14d +; AVX512-NEXT: shrb $7, %bl +; AVX512-NEXT: kmovd %ebx, %k2 +; AVX512-NEXT: kxorw %k2, %k7, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $7, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k6, %k6 +; AVX512-NEXT: kshiftrw $9, %k6, %k7 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: subb %r11b, %sil +; AVX512-NEXT: movl $0, %edi +; AVX512-NEXT: jb .LBB16_20 +; AVX512-NEXT: # %bb.19: +; AVX512-NEXT: movl %esi, %edi +; AVX512-NEXT: .LBB16_20: +; AVX512-NEXT: kshiftrw $14, %k0, %k2 +; AVX512-NEXT: kshiftrw $14, %k1, %k3 +; AVX512-NEXT: kmovd %k4, %esi +; AVX512-NEXT: kmovd %k5, %r11d +; AVX512-NEXT: shrb $7, %dil +; AVX512-NEXT: kmovd %edi, %k4 +; AVX512-NEXT: kxorw %k4, %k7, %k4 +; AVX512-NEXT: kshiftlw $15, %k4, %k4 +; AVX512-NEXT: kshiftrw $6, %k4, %k4 +; AVX512-NEXT: kxorw %k4, %k6, %k4 +; AVX512-NEXT: kshiftrw $10, %k4, %k5 +; AVX512-NEXT: shlb $7, %r10b +; AVX512-NEXT: shlb $7, %r15b +; AVX512-NEXT: subb %r10b, %r15b +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_22 +; AVX512-NEXT: # %bb.21: +; AVX512-NEXT: movl %r15d, %edx +; AVX512-NEXT: .LBB16_22: +; AVX512-NEXT: kshiftrw $15, %k0, %k0 +; AVX512-NEXT: kshiftrw $15, %k1, %k1 +; AVX512-NEXT: kmovd %k2, %ebx +; AVX512-NEXT: kmovd %k3, %edi +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k5, %k2 +; AVX512-NEXT: kshiftlw $15, %k2, %k2 +; AVX512-NEXT: kshiftrw $5, %k2, %k2 +; AVX512-NEXT: kxorw %k2, %k4, %k2 +; AVX512-NEXT: kshiftrw $11, %k2, %k3 +; AVX512-NEXT: shlb $7, %r9b +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: subb %r9b, %cl +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_24 +; AVX512-NEXT: # %bb.23: +; AVX512-NEXT: movl %ecx, %edx +; AVX512-NEXT: .LBB16_24: +; AVX512-NEXT: kmovd %k0, %ecx +; AVX512-NEXT: kmovd %k1, %ebp +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k0 +; AVX512-NEXT: kxorw %k0, %k3, %k0 +; AVX512-NEXT: kshiftlw $15, %k0, %k0 +; AVX512-NEXT: kshiftrw $4, %k0, %k0 +; AVX512-NEXT: kxorw %k0, %k2, %k0 +; AVX512-NEXT: kshiftrw $12, %k0, %k1 +; AVX512-NEXT: shlb $7, %r14b +; AVX512-NEXT: shlb $7, %al +; AVX512-NEXT: subb %r14b, %al +; AVX512-NEXT: movl $0, %edx +; AVX512-NEXT: jb .LBB16_26 +; AVX512-NEXT: # %bb.25: +; AVX512-NEXT: movl %eax, %edx +; AVX512-NEXT: .LBB16_26: +; AVX512-NEXT: shrb $7, %dl +; AVX512-NEXT: kmovd %edx, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $3, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $13, %k0, %k1 +; AVX512-NEXT: shlb $7, %r11b +; AVX512-NEXT: shlb $7, %sil +; AVX512-NEXT: subb %r11b, %sil +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: jb .LBB16_28 +; AVX512-NEXT: # %bb.27: +; AVX512-NEXT: movl %esi, %eax +; AVX512-NEXT: .LBB16_28: +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $2, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftrw $14, %k0, %k1 +; AVX512-NEXT: shlb $7, %dil +; AVX512-NEXT: shlb $7, %bl +; AVX512-NEXT: subb %dil, %bl +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: jb .LBB16_30 +; AVX512-NEXT: # %bb.29: +; AVX512-NEXT: movl %ebx, %eax +; AVX512-NEXT: .LBB16_30: +; AVX512-NEXT: shrb $7, %al +; AVX512-NEXT: kmovd %eax, %k2 +; AVX512-NEXT: kxorw %k2, %k1, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: kshiftrw $1, %k1, %k1 +; AVX512-NEXT: kxorw %k1, %k0, %k0 +; AVX512-NEXT: kshiftlw $1, %k0, %k0 +; AVX512-NEXT: kshiftrw $1, %k0, %k0 +; AVX512-NEXT: shlb $7, %bpl +; AVX512-NEXT: shlb $7, %cl +; AVX512-NEXT: subb %bpl, %cl +; AVX512-NEXT: jb .LBB16_32 +; AVX512-NEXT: # %bb.31: +; AVX512-NEXT: movl %ecx, %r8d +; AVX512-NEXT: .LBB16_32: +; AVX512-NEXT: shrb $7, %r8b +; AVX512-NEXT: kmovd %r8d, %k1 +; AVX512-NEXT: kshiftlw $15, %k1, %k1 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: vpmovm2b %k0, %xmm0 +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: .cfi_def_cfa_offset 32 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: .cfi_def_cfa_offset 24 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: .cfi_def_cfa_offset 16 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: .cfi_def_cfa_offset 8 +; AVX512-NEXT: retq + %z = call <16 x i1> @llvm.usub.sat.v16i1(<16 x i1> %x, <16 x i1> %y) + ret <16 x i1> %z +} + +; Expanded + +define <4 x i32> @v4i32(<4 x i32> %x, <4 x i32> %y) { +; SSE2-LABEL: v4i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v4i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v4i32: +; SSE41: # %bb.0: +; SSE41-NEXT: pextrd $1, %xmm1, %eax +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: movd %xmm1, %eax +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: subl %eax, %esi +; SSE41-NEXT: cmovbl %edx, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm1, %eax +; SSE41-NEXT: pextrd $2, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $2, %ecx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm1, %eax +; SSE41-NEXT: pextrd $3, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $3, %ecx, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v4i32: +; AVX: # %bb.0: +; AVX-NEXT: vpextrd $1, %xmm1, %eax +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vmovd %xmm1, %eax +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: subl %eax, %esi +; AVX-NEXT: cmovbl %edx, %esi +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $2, %xmm1, %eax +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $3, %xmm1, %eax +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX-NEXT: retq + %z = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y) + ret <4 x i32> %z +} + +define <2 x i32> @v2i32(<2 x i32> %x, <2 x i32> %y) { +; SSE2-LABEL: v2i32: +; SSE2: # %bb.0: +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: movq %xmm1, %rax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movq %xmm0, %rcx +; SSE2-NEXT: subq %rax, %rcx +; SSE2-NEXT: cmovbq %rdx, %rcx +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSE2-NEXT: psrlq $32, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v2i32: +; SSSE3: # %bb.0: +; SSSE3-NEXT: psllq $32, %xmm1 +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: psllq $32, %xmm0 +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSSE3-NEXT: movq %xmm1, %rax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movq %xmm0, %rcx +; SSSE3-NEXT: subq %rax, %rcx +; SSSE3-NEXT: cmovbq %rdx, %rcx +; SSSE3-NEXT: movq %rcx, %xmm0 +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0] +; SSSE3-NEXT: psrlq $32, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v2i32: +; SSE41: # %bb.0: +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: pextrq $1, %xmm1, %rax +; SSE41-NEXT: psllq $32, %xmm0 +; SSE41-NEXT: pextrq $1, %xmm0, %rcx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm2 +; SSE41-NEXT: movq %xmm1, %rax +; SSE41-NEXT: movq %xmm0, %rcx +; SSE41-NEXT: subq %rax, %rcx +; SSE41-NEXT: cmovbq %rdx, %rcx +; SSE41-NEXT: movq %rcx, %xmm0 +; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE41-NEXT: psrlq $32, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v2i32: +; AVX: # %bb.0: +; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX-NEXT: vpextrq $1, %xmm1, %rax +; AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX-NEXT: vpextrq $1, %xmm0, %rcx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm2 +; AVX-NEXT: vmovq %xmm1, %rax +; AVX-NEXT: vmovq %xmm0, %rcx +; AVX-NEXT: subq %rax, %rcx +; AVX-NEXT: cmovbq %rdx, %rcx +; AVX-NEXT: vmovq %rcx, %xmm0 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %x, <2 x i32> %y) + ret <2 x i32> %z +} + +define <4 x i24> @v4i24(<4 x i24> %x, <4 x i24> %y) { +; SSE2-LABEL: v4i24: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $8, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pslld $8, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSE2-NEXT: movd %xmm2, %ecx +; SSE2-NEXT: xorl %edx, %edx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm3, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSE2-NEXT: movd %xmm1, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: subl %eax, %ecx +; SSE2-NEXT: cmovbl %edx, %ecx +; SSE2-NEXT: movd %ecx, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE2-NEXT: psrld $8, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; SSSE3-LABEL: v4i24: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pslld $8, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %eax +; SSSE3-NEXT: pslld $8, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; SSSE3-NEXT: movd %xmm2, %ecx +; SSSE3-NEXT: xorl %edx, %edx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm3, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm3 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; SSSE3-NEXT: movd %xmm1, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: subl %eax, %ecx +; SSSE3-NEXT: cmovbl %edx, %ecx +; SSSE3-NEXT: movd %ecx, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSSE3-NEXT: psrld $8, %xmm2 +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: v4i24: +; SSE41: # %bb.0: +; SSE41-NEXT: pslld $8, %xmm1 +; SSE41-NEXT: pextrd $1, %xmm1, %eax +; SSE41-NEXT: pslld $8, %xmm0 +; SSE41-NEXT: pextrd $1, %xmm0, %ecx +; SSE41-NEXT: xorl %edx, %edx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: movd %xmm1, %eax +; SSE41-NEXT: movd %xmm0, %esi +; SSE41-NEXT: subl %eax, %esi +; SSE41-NEXT: cmovbl %edx, %esi +; SSE41-NEXT: movd %esi, %xmm2 +; SSE41-NEXT: pinsrd $1, %ecx, %xmm2 +; SSE41-NEXT: pextrd $2, %xmm1, %eax +; SSE41-NEXT: pextrd $2, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $2, %ecx, %xmm2 +; SSE41-NEXT: pextrd $3, %xmm1, %eax +; SSE41-NEXT: pextrd $3, %xmm0, %ecx +; SSE41-NEXT: subl %eax, %ecx +; SSE41-NEXT: cmovbl %edx, %ecx +; SSE41-NEXT: pinsrd $3, %ecx, %xmm2 +; SSE41-NEXT: psrld $8, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: retq +; +; AVX-LABEL: v4i24: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $8, %xmm1, %xmm1 +; AVX-NEXT: vpextrd $1, %xmm1, %eax +; AVX-NEXT: vpslld $8, %xmm0, %xmm0 +; AVX-NEXT: vpextrd $1, %xmm0, %ecx +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vmovd %xmm1, %eax +; AVX-NEXT: vmovd %xmm0, %esi +; AVX-NEXT: subl %eax, %esi +; AVX-NEXT: cmovbl %edx, %esi +; AVX-NEXT: vmovd %esi, %xmm2 +; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $2, %xmm1, %eax +; AVX-NEXT: vpextrd $2, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2 +; AVX-NEXT: vpextrd $3, %xmm1, %eax +; AVX-NEXT: vpextrd $3, %xmm0, %ecx +; AVX-NEXT: subl %eax, %ecx +; AVX-NEXT: cmovbl %edx, %ecx +; AVX-NEXT: vpinsrd $3, %ecx, %xmm2, %xmm0 +; AVX-NEXT: vpsrld $8, %xmm0, %xmm0 +; AVX-NEXT: retq + %z = call <4 x i24> @llvm.usub.sat.v4i24(<4 x i24> %x, <4 x i24> %y) + ret <4 x i24> %z +} + +define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) { +; SSE-LABEL: v2i128: +; SSE: # %bb.0: +; SSE-NEXT: movq %rdi, %rax +; SSE-NEXT: xorl %edi, %edi +; SSE-NEXT: subq %r9, %rsi +; SSE-NEXT: sbbq {{[0-9]+}}(%rsp), %rdx +; SSE-NEXT: cmovbq %rdi, %rsi +; SSE-NEXT: cmovbq %rdi, %rdx +; SSE-NEXT: subq {{[0-9]+}}(%rsp), %rcx +; SSE-NEXT: sbbq {{[0-9]+}}(%rsp), %r8 +; SSE-NEXT: cmovbq %rdi, %r8 +; SSE-NEXT: cmovbq %rdi, %rcx +; SSE-NEXT: movq %r8, 24(%rax) +; SSE-NEXT: movq %rcx, 16(%rax) +; SSE-NEXT: movq %rdx, 8(%rax) +; SSE-NEXT: movq %rsi, (%rax) +; SSE-NEXT: retq +; +; AVX-LABEL: v2i128: +; AVX: # %bb.0: +; AVX-NEXT: movq %rdi, %rax +; AVX-NEXT: xorl %edi, %edi +; AVX-NEXT: subq %r9, %rsi +; AVX-NEXT: sbbq {{[0-9]+}}(%rsp), %rdx +; AVX-NEXT: cmovbq %rdi, %rsi +; AVX-NEXT: cmovbq %rdi, %rdx +; AVX-NEXT: subq {{[0-9]+}}(%rsp), %rcx +; AVX-NEXT: sbbq {{[0-9]+}}(%rsp), %r8 +; AVX-NEXT: cmovbq %rdi, %r8 +; AVX-NEXT: cmovbq %rdi, %rcx +; AVX-NEXT: movq %r8, 24(%rax) +; AVX-NEXT: movq %rcx, 16(%rax) +; AVX-NEXT: movq %rdx, 8(%rax) +; AVX-NEXT: movq %rsi, (%rax) +; AVX-NEXT: retq + %z = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %x, <2 x i128> %y) + ret <2 x i128> %z +}