Index: llvm/include/llvm/CodeGen/ISDOpcodes.h =================================================================== --- llvm/include/llvm/CodeGen/ISDOpcodes.h +++ llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -264,6 +264,14 @@ /// resulting value is this minimum value. SADDSAT, UADDSAT, + /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 + /// integers with the same bit width (W). If the true value of LHS - RHS + /// exceeds the largest value that can be represented by W bits, the + /// resulting value is this maximum value. Otherwise, if this value is less + /// than the smallest value that can be represented by W bits, the + /// resulting value is this minimum value. + SSUBSAT, USUBSAT, + /// Simple binary floating point operators. FADD, FSUB, FMUL, FDIV, FREM, Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -3730,9 +3730,10 @@ SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const; - /// Method for building the DAG expansion of ISD::[US]ADDSAT. This method - /// accepts integers or vectors of integers as its arguments. - SDValue getExpandedSaturationAddition(SDNode *Node, SelectionDAG &DAG) const; + /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This + /// method accepts integers or vectors of integers as its arguments. + SDValue getExpandedSaturationAdditionSubtraction(SDNode *Node, + SelectionDAG &DAG) const; //===--------------------------------------------------------------------===// // Instruction Emitting Hooks Index: llvm/include/llvm/IR/Intrinsics.td =================================================================== --- llvm/include/llvm/IR/Intrinsics.td +++ llvm/include/llvm/IR/Intrinsics.td @@ -716,6 +716,12 @@ def int_uadd_sat : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, Commutative]>; +def int_ssub_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; +def int_usub_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; //===------------------------- Memory Use Markers -------------------------===// // Index: llvm/include/llvm/Target/TargetSelectionDAG.td =================================================================== --- llvm/include/llvm/Target/TargetSelectionDAG.td +++ llvm/include/llvm/Target/TargetSelectionDAG.td @@ -375,6 +375,8 @@ def saddsat : SDNode<"ISD::SADDSAT" , SDTIntBinOp, [SDNPCommutative]>; def uaddsat : SDNode<"ISD::UADDSAT" , SDTIntBinOp, [SDNPCommutative]>; +def ssubsat : SDNode<"ISD::SSUBSAT" , SDTIntBinOp>; +def usubsat : SDNode<"ISD::USUBSAT" , SDTIntBinOp>; def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>; def sext_invec : SDNode<"ISD::SIGN_EXTEND_VECTOR_INREG", SDTExtInvec>; Index: llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1115,7 +1115,9 @@ Node->getValueType(0)); break; case ISD::SADDSAT: - case ISD::UADDSAT: { + case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: { Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; } @@ -3300,8 +3302,10 @@ break; } case ISD::SADDSAT: - case ISD::UADDSAT: { - Results.push_back(TLI.getExpandedSaturationAddition(Node, DAG)); + case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: { + Results.push_back(TLI.getExpandedSaturationAdditionSubtraction(Node, DAG)); break; } case ISD::SADDO: Index: llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -142,7 +142,9 @@ case ISD::SUBCARRY: Res = PromoteIntRes_ADDSUBCARRY(N, ResNo); break; case ISD::SADDSAT: - case ISD::UADDSAT: Res = PromoteIntRes_ADDSAT(N); break; + case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: Res = PromoteIntRes_ADDSUBSAT(N); break; case ISD::ATOMIC_LOAD: Res = PromoteIntRes_Atomic0(cast(N)); break; @@ -549,11 +551,11 @@ return SDValue(Res.getNode(), 1); } -SDValue DAGTypeLegalizer::PromoteIntRes_ADDSAT(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSAT(SDNode *N) { // For promoting iN -> iM, this can be expanded by // 1. ANY_EXTEND iN to iM // 2. SHL by M-N - // 3. U/SADDSAT + // 3. [US][ADD|SUB]SAT // 4. L/ASHR by M-N SDLoc dl(N); SDValue Op1 = N->getOperand(0); @@ -561,9 +563,20 @@ unsigned OldBits = Op1.getValueSizeInBits(); unsigned Opcode = N->getOpcode(); - assert((Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT) && - "Expected opcode to be SADDSAT or UADDSAT"); - unsigned ShiftOp = Opcode == ISD::SADDSAT ? ISD::SRA : ISD::SRL; + unsigned ShiftOp; + switch (Opcode) { + case ISD::SADDSAT: + case ISD::SSUBSAT: + ShiftOp = ISD::SRA; + break; + case ISD::UADDSAT: + case ISD::USUBSAT: + ShiftOp = ISD::SRL; + break; + default: + llvm_unreachable("Expected opcode to be signed or unsigned saturation " + "addition or subtraction"); + } SDValue Op1Promoted = GetPromotedInteger(Op1); SDValue Op2Promoted = GetPromotedInteger(Op2); @@ -1505,7 +1518,9 @@ case ISD::SMULO: ExpandIntRes_XMULO(N, Lo, Hi); break; case ISD::SADDSAT: - case ISD::UADDSAT: ExpandIntRes_ADDSAT(N, Lo, Hi); break; + case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: ExpandIntRes_ADDSUBSAT(N, Lo, Hi); break; } // If Lo/Hi is null, the sub-method took care of registering results etc. @@ -2468,9 +2483,9 @@ ReplaceValueWith(SDValue(N, 1), R.getValue(2)); } -void DAGTypeLegalizer::ExpandIntRes_ADDSAT(SDNode *N, SDValue &Lo, - SDValue &Hi) { - SDValue Result = TLI.getExpandedSaturationAddition(N, DAG); +void DAGTypeLegalizer::ExpandIntRes_ADDSUBSAT(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Result = TLI.getExpandedSaturationAdditionSubtraction(N, DAG); SplitInteger(Result, Lo, Hi); } Index: llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -330,7 +330,7 @@ SDValue PromoteIntRes_UNDEF(SDNode *N); SDValue PromoteIntRes_VAARG(SDNode *N); SDValue PromoteIntRes_XMULO(SDNode *N, unsigned ResNo); - SDValue PromoteIntRes_ADDSAT(SDNode *N); + SDValue PromoteIntRes_ADDSUBSAT(SDNode *N); // Integer Operand Promotion. bool PromoteIntegerOperand(SDNode *N, unsigned OpNo); @@ -415,7 +415,7 @@ void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_XMULO (SDNode *N, SDValue &Lo, SDValue &Hi); - void ExpandIntRes_ADDSAT (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ADDSUBSAT (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandIntRes_ATOMIC_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi); Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -391,6 +391,8 @@ case ISD::FCANONICALIZE: case ISD::SADDSAT: case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); break; case ISD::FP_ROUND_INREG: Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -124,6 +124,8 @@ case ISD::SADDSAT: case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: case ISD::FPOW: case ISD::FREM: @@ -807,6 +809,8 @@ case ISD::UMAX: case ISD::SADDSAT: case ISD::UADDSAT: + case ISD::SSUBSAT: + case ISD::USUBSAT: SplitVecRes_BinOp(N, Lo, Hi); break; case ISD::FMA: Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5783,6 +5783,18 @@ setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2)); return nullptr; } + case Intrinsic::ssub_sat: { + SDValue Op1 = getValue(I.getArgOperand(0)); + SDValue Op2 = getValue(I.getArgOperand(1)); + setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2)); + return nullptr; + } + case Intrinsic::usub_sat: { + SDValue Op1 = getValue(I.getArgOperand(0)); + SDValue Op2 = getValue(I.getArgOperand(1)); + setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2)); + return nullptr; + } case Intrinsic::stacksave: { SDValue Op = getRoot(); Res = DAG.getNode( Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -286,6 +286,8 @@ case ISD::SADDSAT: return "saddsat"; case ISD::UADDSAT: return "uaddsat"; + case ISD::SSUBSAT: return "ssubsat"; + case ISD::USUBSAT: return "usubsat"; // Conversion operators. case ISD::SIGN_EXTEND: return "sign_extend"; Index: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -4904,11 +4904,27 @@ return SDValue(); } -SDValue TargetLowering::getExpandedSaturationAddition(SDNode *Node, - SelectionDAG &DAG) const { +SDValue TargetLowering::getExpandedSaturationAdditionSubtraction( + SDNode *Node, SelectionDAG &DAG) const { unsigned Opcode = Node->getOpcode(); - assert((Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT) && - "Expected method to receive SADDSAT or UADDSAT node."); + unsigned OverflowOp; + switch (Opcode) { + case ISD::SADDSAT: + OverflowOp = ISD::SADDO; + break; + case ISD::UADDSAT: + OverflowOp = ISD::UADDO; + break; + case ISD::SSUBSAT: + OverflowOp = ISD::SSUBO; + break; + case ISD::USUBSAT: + OverflowOp = ISD::USUBO; + break; + default: + llvm_unreachable("Expected method to receive signed or unsigned saturation " + "addition or subtraction node."); + } assert(Node->getNumOperands() == 2 && "Expected node to have 2 operands."); SDLoc dl(Node); @@ -4923,31 +4939,35 @@ assert(LHS.getValueType() == RHS.getValueType() && "Expected both operands to be the same type"); - unsigned OverflowOp = Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::UADDO; unsigned BitWidth = LHS.getValueSizeInBits(); EVT ResultType = LHS.getValueType(); EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ResultType); SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(ResultType, BoolVT), LHS, RHS); - SDValue Sum = Result.getValue(0); + SDValue SumDiff = Result.getValue(0); SDValue Overflow = Result.getValue(1); SDValue Zero = DAG.getConstant(0, dl, ResultType); - if (Opcode == ISD::SADDSAT) { - // SatMax -> Overflow && Sum < 0 - // SatMin -> Overflow && Sum > 0 + if (Opcode == ISD::UADDSAT) { + // Just need to check overflow for SatMax. + APInt MaxVal = APInt::getMaxValue(BitWidth); + SDValue SatMax = DAG.getConstant(MaxVal, dl, ResultType); + return DAG.getSelect(dl, ResultType, Overflow, SatMax, SumDiff); + } else if (Opcode == ISD::USUBSAT) { + // Just need to check overflow for SatMin. + APInt MinVal = APInt::getMinValue(BitWidth); + SDValue SatMin = DAG.getConstant(MinVal, dl, ResultType); + return DAG.getSelect(dl, ResultType, Overflow, SatMin, SumDiff); + } else { + // SatMax -> Overflow && SumDiff < 0 + // SatMin -> Overflow && SumDiff > 0 APInt MinVal = APInt::getSignedMinValue(BitWidth); APInt MaxVal = APInt::getSignedMaxValue(BitWidth); SDValue SatMin = DAG.getConstant(MinVal, dl, ResultType); SDValue SatMax = DAG.getConstant(MaxVal, dl, ResultType); - SDValue SumNeg = DAG.getSetCC(dl, BoolVT, Sum, Zero, ISD::SETLT); + SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); Result = DAG.getSelect(dl, ResultType, SumNeg, SatMax, SatMin); - return DAG.getSelect(dl, ResultType, Overflow, Result, Sum); - } else { - // Just need to check overflow for SatMax. - APInt MaxVal = APInt::getMaxValue(BitWidth); - SDValue SatMax = DAG.getConstant(MaxVal, dl, ResultType); - return DAG.getSelect(dl, ResultType, Overflow, SatMax, Sum); + return DAG.getSelect(dl, ResultType, Overflow, Result, SumDiff); } } Index: llvm/lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- llvm/lib/CodeGen/TargetLoweringBase.cpp +++ llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -612,6 +612,8 @@ setOperationAction(ISD::ABS, VT, Expand); setOperationAction(ISD::SADDSAT, VT, Expand); setOperationAction(ISD::UADDSAT, VT, Expand); + setOperationAction(ISD::SSUBSAT, VT, Expand); + setOperationAction(ISD::USUBSAT, VT, Expand); // Overflow operations default to expand setOperationAction(ISD::SADDO, VT, Expand); Index: llvm/lib/IR/Verifier.cpp =================================================================== --- llvm/lib/IR/Verifier.cpp +++ llvm/lib/IR/Verifier.cpp @@ -4475,15 +4475,17 @@ break; } case Intrinsic::sadd_sat: - case Intrinsic::uadd_sat: { + case Intrinsic::uadd_sat: + case Intrinsic::ssub_sat: + case Intrinsic::usub_sat: { Value *Op1 = CS.getArgOperand(0); Value *Op2 = CS.getArgOperand(1); - Assert( - Op1->getType()->isIntOrIntVectorTy(), - "first operand of [us]add_sat must be an int type or vector of ints"); - Assert( - Op2->getType()->isIntOrIntVectorTy(), - "second operand of [us]add_sat must be an int type or vector of ints"); + Assert(Op1->getType()->isIntOrIntVectorTy(), + "first operand of [us][add|sub]_sat must be an int type or vector " + "of ints"); + Assert(Op2->getType()->isIntOrIntVectorTy(), + "second operand of [us][add|sub]_sat must be an int type or vector " + "of ints"); break; } }; Index: llvm/test/CodeGen/X86/ssub_sat.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/ssub_sat.ll @@ -0,0 +1,267 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s +; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32 + +declare i4 @llvm.ssub.sat.i4 (i4, i4) +declare i32 @llvm.ssub.sat.i32 (i32, i32) +declare i64 @llvm.ssub.sat.i64 (i64, i64) +declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) + +define i32 @func(i32 %x, i32 %y) { +; CHECK-LABEL: func: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: movl %edi, %ecx +; CHECK-NEXT: subl %esi, %ecx +; CHECK-NEXT: setns %al +; CHECK-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %esi, %edi +; CHECK-NEXT: cmovnol %edi, %eax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pushl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: .cfi_offset %esi, -8 +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: xorl %ecx, %ecx +; CHECK32-NEXT: movl %eax, %esi +; CHECK32-NEXT: subl %edx, %esi +; CHECK32-NEXT: setns %cl +; CHECK32-NEXT: addl $2147483647, %ecx # imm = 0x7FFFFFFF +; CHECK32-NEXT: subl %edx, %eax +; CHECK32-NEXT: cmovol %ecx, %eax +; CHECK32-NEXT: popl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 4 +; CHECK32-NEXT: retl + %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y); + ret i32 %tmp; +} + +define i64 @func2(i64 %x, i64 %y) { +; CHECK-LABEL: func2: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: subq %rsi, %rax +; CHECK-NEXT: setns %cl +; CHECK-NEXT: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF +; CHECK-NEXT: addq %rcx, %rax +; CHECK-NEXT: subq %rsi, %rdi +; CHECK-NEXT: cmovnoq %rdi, %rax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func2: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pushl %ebp +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: pushl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: pushl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 16 +; CHECK32-NEXT: pushl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 20 +; CHECK32-NEXT: .cfi_offset %esi, -20 +; CHECK32-NEXT: .cfi_offset %edi, -16 +; CHECK32-NEXT: .cfi_offset %ebx, -12 +; CHECK32-NEXT: .cfi_offset %ebp, -8 +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ebx +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: movl %ebx, %ebp +; CHECK32-NEXT: sbbl %esi, %ebp +; CHECK32-NEXT: movl %ebp, %eax +; CHECK32-NEXT: sarl $31, %eax +; CHECK32-NEXT: xorl %ecx, %ecx +; CHECK32-NEXT: testl %ebp, %ebp +; CHECK32-NEXT: setns %cl +; CHECK32-NEXT: movl %ecx, %edx +; CHECK32-NEXT: addl $2147483647, %edx # imm = 0x7FFFFFFF +; CHECK32-NEXT: testl %ebx, %ebx +; CHECK32-NEXT: setns %bl +; CHECK32-NEXT: cmpb %cl, %bl +; CHECK32-NEXT: setne %cl +; CHECK32-NEXT: testl %esi, %esi +; CHECK32-NEXT: setns %ch +; CHECK32-NEXT: cmpb %ch, %bl +; CHECK32-NEXT: setne %ch +; CHECK32-NEXT: testb %cl, %ch +; CHECK32-NEXT: cmovel %ebp, %edx +; CHECK32-NEXT: cmovel %edi, %eax +; CHECK32-NEXT: popl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 16 +; CHECK32-NEXT: popl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: popl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: popl %ebp +; CHECK32-NEXT: .cfi_def_cfa_offset 4 +; CHECK32-NEXT: retl + %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y); + ret i64 %tmp; +} + +define i4 @func3(i4 %x, i4 %y) { +; CHECK-LABEL: func3: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: shlb $4, %sil +; CHECK-NEXT: shlb $4, %al +; CHECK-NEXT: movl %eax, %ecx +; CHECK-NEXT: subb %sil, %cl +; CHECK-NEXT: setns %cl +; CHECK-NEXT: subb %sil, %al +; CHECK-NEXT: jno .LBB2_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: addb $127, %cl +; CHECK-NEXT: movl %ecx, %eax +; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: sarb $4, %al +; CHECK-NEXT: # kill: def $al killed $al killed $eax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func3: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al +; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %dl +; CHECK32-NEXT: shlb $4, %dl +; CHECK32-NEXT: shlb $4, %al +; CHECK32-NEXT: movl %eax, %ecx +; CHECK32-NEXT: subb %dl, %cl +; CHECK32-NEXT: setns %cl +; CHECK32-NEXT: subb %dl, %al +; CHECK32-NEXT: jno .LBB2_2 +; CHECK32-NEXT: # %bb.1: +; CHECK32-NEXT: addb $127, %cl +; CHECK32-NEXT: movl %ecx, %eax +; CHECK32-NEXT: .LBB2_2: +; CHECK32-NEXT: sarb $4, %al +; CHECK32-NEXT: retl + %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y); + ret i4 %tmp; +} + +define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: vec: +; CHECK: # %bb.0: +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,2,3] +; CHECK-NEXT: movd %xmm2, %ecx +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] +; CHECK-NEXT: movd %xmm2, %r8d +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: movl %r8d, %esi +; CHECK-NEXT: subl %ecx, %esi +; CHECK-NEXT: setns %dl +; CHECK-NEXT: addl $2147483647, %edx # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %ecx, %r8d +; CHECK-NEXT: cmovol %edx, %r8d +; CHECK-NEXT: movd %xmm1, %edx +; CHECK-NEXT: movd %xmm0, %ecx +; CHECK-NEXT: xorl %esi, %esi +; CHECK-NEXT: movl %ecx, %edi +; CHECK-NEXT: subl %edx, %edi +; CHECK-NEXT: setns %sil +; CHECK-NEXT: addl $2147483647, %esi # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %edx, %ecx +; CHECK-NEXT: cmovol %esi, %ecx +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK-NEXT: movd %xmm2, %edx +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; CHECK-NEXT: movd %xmm2, %eax +; CHECK-NEXT: xorl %edi, %edi +; CHECK-NEXT: movl %eax, %esi +; CHECK-NEXT: subl %edx, %esi +; CHECK-NEXT: setns %dil +; CHECK-NEXT: addl $2147483647, %edi # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %edx, %eax +; CHECK-NEXT: cmovol %edi, %eax +; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,1,2,3] +; CHECK-NEXT: movd %xmm1, %r9d +; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; CHECK-NEXT: movd %xmm0, %edx +; CHECK-NEXT: xorl %edi, %edi +; CHECK-NEXT: movl %edx, %esi +; CHECK-NEXT: subl %r9d, %esi +; CHECK-NEXT: setns %dil +; CHECK-NEXT: addl $2147483647, %edi # imm = 0x7FFFFFFF +; CHECK-NEXT: subl %r9d, %edx +; CHECK-NEXT: cmovol %edi, %edx +; CHECK-NEXT: movd %edx, %xmm0 +; CHECK-NEXT: movd %eax, %xmm1 +; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-NEXT: movd %ecx, %xmm0 +; CHECK-NEXT: movd %r8d, %xmm2 +; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-NEXT: retq +; +; CHECK32-LABEL: vec: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pushl %ebp +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: pushl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: pushl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 16 +; CHECK32-NEXT: pushl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 20 +; CHECK32-NEXT: .cfi_offset %esi, -20 +; CHECK32-NEXT: .cfi_offset %edi, -16 +; CHECK32-NEXT: .cfi_offset %ebx, -12 +; CHECK32-NEXT: .cfi_offset %ebp, -8 +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: xorl %eax, %eax +; CHECK32-NEXT: movl %ecx, %esi +; CHECK32-NEXT: subl %edx, %esi +; CHECK32-NEXT: setns %al +; CHECK32-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK32-NEXT: subl %edx, %ecx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: cmovol %eax, %ecx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK32-NEXT: xorl %eax, %eax +; CHECK32-NEXT: movl %edx, %edi +; CHECK32-NEXT: subl %esi, %edi +; CHECK32-NEXT: setns %al +; CHECK32-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK32-NEXT: subl %esi, %edx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK32-NEXT: cmovol %eax, %edx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: xorl %eax, %eax +; CHECK32-NEXT: movl %esi, %ebx +; CHECK32-NEXT: subl %edi, %ebx +; CHECK32-NEXT: setns %al +; CHECK32-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF +; CHECK32-NEXT: subl %edi, %esi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: cmovol %eax, %esi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: xorl %ebx, %ebx +; CHECK32-NEXT: movl %edi, %ebp +; CHECK32-NEXT: subl %eax, %ebp +; CHECK32-NEXT: setns %bl +; CHECK32-NEXT: addl $2147483647, %ebx # imm = 0x7FFFFFFF +; CHECK32-NEXT: subl %eax, %edi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: cmovol %ebx, %edi +; CHECK32-NEXT: movl %ecx, 12(%eax) +; CHECK32-NEXT: movl %edx, 8(%eax) +; CHECK32-NEXT: movl %esi, 4(%eax) +; CHECK32-NEXT: movl %edi, (%eax) +; CHECK32-NEXT: popl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 16 +; CHECK32-NEXT: popl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: popl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: popl %ebp +; CHECK32-NEXT: .cfi_def_cfa_offset 4 +; CHECK32-NEXT: retl $4 + %tmp = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y); + ret <4 x i32> %tmp; +} Index: llvm/test/CodeGen/X86/usub_sat.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/usub_sat.ll @@ -0,0 +1,158 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s +; RUN: llc < %s -mcpu=generic -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=CHECK32 + +declare i4 @llvm.usub.sat.i4 (i4, i4) +declare i32 @llvm.usub.sat.i32 (i32, i32) +declare i64 @llvm.usub.sat.i64 (i64, i64) +declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) + +define i32 @func(i32 %x, i32 %y) { +; CHECK-LABEL: func: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: subl %esi, %edi +; CHECK-NEXT: cmovael %edi, %eax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: xorl %ecx, %ecx +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: cmovbl %ecx, %eax +; CHECK32-NEXT: retl + %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y); + ret i32 %tmp; +} + +define i64 @func2(i64 %x, i64 %y) { +; CHECK-LABEL: func2: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: subq %rsi, %rdi +; CHECK-NEXT: cmovaeq %rdi, %rax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func2: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: xorl %ecx, %ecx +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: sbbl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: cmovbl %ecx, %edx +; CHECK32-NEXT: cmovbl %ecx, %eax +; CHECK32-NEXT: retl + %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y); + ret i64 %tmp; +} + +define i4 @func3(i4 %x, i4 %y) { +; CHECK-LABEL: func3: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: shlb $4, %sil +; CHECK-NEXT: shlb $4, %al +; CHECK-NEXT: subb %sil, %al +; CHECK-NEXT: jae .LBB2_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: .LBB2_2: +; CHECK-NEXT: shrb $4, %al +; CHECK-NEXT: # kill: def $al killed $al killed $eax +; CHECK-NEXT: retq +; +; CHECK32-LABEL: func3: +; CHECK32: # %bb.0: +; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al +; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %cl +; CHECK32-NEXT: shlb $4, %cl +; CHECK32-NEXT: shlb $4, %al +; CHECK32-NEXT: subb %cl, %al +; CHECK32-NEXT: jae .LBB2_2 +; CHECK32-NEXT: # %bb.1: +; CHECK32-NEXT: xorl %eax, %eax +; CHECK32-NEXT: .LBB2_2: +; CHECK32-NEXT: shrb $4, %al +; CHECK32-NEXT: # kill: def $al killed $al killed $eax +; CHECK32-NEXT: retl + %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y); + ret i4 %tmp; +} + +define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) { +; CHECK-LABEL: vec: +; CHECK: # %bb.0: +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3] +; CHECK-NEXT: movd %xmm2, %eax +; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3] +; CHECK-NEXT: movd %xmm2, %ecx +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: subl %eax, %ecx +; CHECK-NEXT: cmovbl %edx, %ecx +; CHECK-NEXT: movd %ecx, %xmm2 +; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; CHECK-NEXT: movd %xmm3, %eax +; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; CHECK-NEXT: movd %xmm3, %ecx +; CHECK-NEXT: subl %eax, %ecx +; CHECK-NEXT: cmovbl %edx, %ecx +; CHECK-NEXT: movd %ecx, %xmm3 +; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; CHECK-NEXT: movd %xmm1, %eax +; CHECK-NEXT: movd %xmm0, %ecx +; CHECK-NEXT: subl %eax, %ecx +; CHECK-NEXT: cmovbl %edx, %ecx +; CHECK-NEXT: movd %ecx, %xmm2 +; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; CHECK-NEXT: movd %xmm1, %eax +; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; CHECK-NEXT: movd %xmm0, %ecx +; CHECK-NEXT: subl %eax, %ecx +; CHECK-NEXT: cmovbl %edx, %ecx +; CHECK-NEXT: movd %ecx, %xmm0 +; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: retq +; +; CHECK32-LABEL: vec: +; CHECK32: # %bb.0: +; CHECK32-NEXT: pushl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: pushl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: pushl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 16 +; CHECK32-NEXT: .cfi_offset %esi, -16 +; CHECK32-NEXT: .cfi_offset %edi, -12 +; CHECK32-NEXT: .cfi_offset %ebx, -8 +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi +; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: xorl %ebx, %ebx +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %edi +; CHECK32-NEXT: cmovbl %ebx, %edi +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %esi +; CHECK32-NEXT: cmovbl %ebx, %esi +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %edx +; CHECK32-NEXT: cmovbl %ebx, %edx +; CHECK32-NEXT: subl {{[0-9]+}}(%esp), %ecx +; CHECK32-NEXT: cmovbl %ebx, %ecx +; CHECK32-NEXT: movl %ecx, 12(%eax) +; CHECK32-NEXT: movl %edx, 8(%eax) +; CHECK32-NEXT: movl %esi, 4(%eax) +; CHECK32-NEXT: movl %edi, (%eax) +; CHECK32-NEXT: popl %esi +; CHECK32-NEXT: .cfi_def_cfa_offset 12 +; CHECK32-NEXT: popl %edi +; CHECK32-NEXT: .cfi_def_cfa_offset 8 +; CHECK32-NEXT: popl %ebx +; CHECK32-NEXT: .cfi_def_cfa_offset 4 +; CHECK32-NEXT: retl $4 + %tmp = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y); + ret <4 x i32> %tmp; +}