diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1966,6 +1966,23 @@ auto CC = cast(CondV.getOperand(2)); ISD::CondCode CCVal = CC->get(); + // Special case for a select of 2 constants that have a diffence of 1. + // Normally this is done by DAGCombine, but if the select is introduced by + // type legalization or op legalization, we miss it. Restricting to SETLT + // case for now because that is what signed saturating add/sub need. + // FIXME: We don't need the condition to be SETLT or even a SETCC, + // but we would probably want to swap the true/false values if the condition + // is SETGE/SETLE to avoid an XORI. + if (isa(TrueV) && isa(FalseV) && + CCVal == ISD::SETLT) { + const APInt &TrueVal = cast(TrueV)->getAPIntValue(); + const APInt &FalseVal = cast(FalseV)->getAPIntValue(); + if (TrueVal - 1 == FalseVal) + return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV); + if (TrueVal + 1 == FalseVal) + return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV); + } + translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll @@ -14,18 +14,15 @@ ; RV32I-LABEL: func: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: add a3, a0, a1 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bgez a3, .LBB0_2 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: slt a2, a0, a2 +; RV32I-NEXT: slti a1, a1, 0 +; RV32I-NEXT: beq a1, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: slt a2, a3, a2 -; RV32I-NEXT: slti a1, a1, 0 -; RV32I-NEXT: bne a1, a2, .LBB0_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: mv a0, a3 -; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func: @@ -48,18 +45,15 @@ ; RV32IZbb-LABEL: func: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: mv a2, a0 -; RV32IZbb-NEXT: add a3, a0, a1 -; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bgez a3, .LBB0_2 +; RV32IZbb-NEXT: add a0, a0, a1 +; RV32IZbb-NEXT: slt a2, a0, a2 +; RV32IZbb-NEXT: slti a1, a1, 0 +; RV32IZbb-NEXT: beq a1, a2, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a0, a0, -1 +; RV32IZbb-NEXT: slti a0, a0, 0 +; RV32IZbb-NEXT: lui a1, 524288 +; RV32IZbb-NEXT: sub a0, a1, a0 ; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: slt a2, a3, a2 -; RV32IZbb-NEXT: slti a1, a1, 0 -; RV32IZbb-NEXT: bne a1, a2, .LBB0_4 -; RV32IZbb-NEXT: # %bb.3: -; RV32IZbb-NEXT: mv a0, a3 -; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func: @@ -82,40 +76,34 @@ ; RV32I-NEXT: add a5, a4, a3 ; RV32I-NEXT: add a0, a0, a2 ; RV32I-NEXT: sltu a1, a0, a1 -; RV32I-NEXT: add a2, a5, a1 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: bgez a2, .LBB1_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: xor a5, a4, a2 +; RV32I-NEXT: add a1, a5, a1 +; RV32I-NEXT: xor a2, a4, a1 ; RV32I-NEXT: xor a3, a4, a3 ; RV32I-NEXT: not a3, a3 -; RV32I-NEXT: and a3, a3, a5 -; RV32I-NEXT: bltz a3, .LBB1_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: bgez a2, .LBB1_2 +; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: slti a0, a1, 0 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: sub a2, a2, a0 +; RV32I-NEXT: srai a0, a1, 31 ; RV32I-NEXT: mv a1, a2 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB1_4: -; RV32I-NEXT: srai a0, a2, 31 +; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: add a3, a0, a1 -; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bgez a3, .LBB1_2 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: slt a2, a0, a2 +; RV64I-NEXT: slti a1, a1, 0 +; RV64I-NEXT: beq a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: slt a2, a3, a2 -; RV64I-NEXT: slti a1, a1, 0 -; RV64I-NEXT: bne a1, a2, .LBB1_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: mv a0, a3 -; RV64I-NEXT: .LBB1_4: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func2: @@ -125,39 +113,33 @@ ; RV32IZbb-NEXT: add a5, a4, a3 ; RV32IZbb-NEXT: add a0, a0, a2 ; RV32IZbb-NEXT: sltu a1, a0, a1 -; RV32IZbb-NEXT: add a2, a5, a1 -; RV32IZbb-NEXT: lui a1, 524288 +; RV32IZbb-NEXT: add a1, a5, a1 +; RV32IZbb-NEXT: xor a2, a4, a1 +; RV32IZbb-NEXT: xor a3, a4, a3 +; RV32IZbb-NEXT: andn a2, a2, a3 ; RV32IZbb-NEXT: bgez a2, .LBB1_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a1, a1, -1 -; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: xor a5, a4, a2 -; RV32IZbb-NEXT: xor a3, a4, a3 -; RV32IZbb-NEXT: andn a3, a5, a3 -; RV32IZbb-NEXT: bltz a3, .LBB1_4 -; RV32IZbb-NEXT: # %bb.3: +; RV32IZbb-NEXT: slti a0, a1, 0 +; RV32IZbb-NEXT: lui a2, 524288 +; RV32IZbb-NEXT: sub a2, a2, a0 +; RV32IZbb-NEXT: srai a0, a1, 31 ; RV32IZbb-NEXT: mv a1, a2 -; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB1_4: -; RV32IZbb-NEXT: srai a0, a2, 31 +; RV32IZbb-NEXT: .LBB1_2: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func2: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: mv a2, a0 -; RV64IZbb-NEXT: add a3, a0, a1 -; RV64IZbb-NEXT: addi a0, zero, -1 -; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bgez a3, .LBB1_2 +; RV64IZbb-NEXT: add a0, a0, a1 +; RV64IZbb-NEXT: slt a2, a0, a2 +; RV64IZbb-NEXT: slti a1, a1, 0 +; RV64IZbb-NEXT: beq a1, a2, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: addi a0, a0, -1 +; RV64IZbb-NEXT: slti a0, a0, 0 +; RV64IZbb-NEXT: addi a1, zero, -1 +; RV64IZbb-NEXT: slli a1, a1, 63 +; RV64IZbb-NEXT: sub a0, a1, a0 ; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: slt a2, a3, a2 -; RV64IZbb-NEXT: slti a1, a1, 0 -; RV64IZbb-NEXT: bne a1, a2, .LBB1_4 -; RV64IZbb-NEXT: # %bb.3: -; RV64IZbb-NEXT: mv a0, a3 -; RV64IZbb-NEXT: .LBB1_4: ; RV64IZbb-NEXT: ret %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y); ret i64 %tmp; diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll @@ -14,19 +14,16 @@ ; RV32I-LABEL: func32: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a3, a0 -; RV32I-NEXT: mul a2, a1, a2 -; RV32I-NEXT: add a1, a0, a2 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bgez a1, .LBB0_2 +; RV32I-NEXT: mul a1, a1, a2 +; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: slt a2, a0, a3 +; RV32I-NEXT: slti a1, a1, 0 +; RV32I-NEXT: beq a1, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: slt a3, a1, a3 -; RV32I-NEXT: slti a2, a2, 0 -; RV32I-NEXT: bne a2, a3, .LBB0_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func32: @@ -51,19 +48,16 @@ ; RV32IZbb-LABEL: func32: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: mv a3, a0 -; RV32IZbb-NEXT: mul a2, a1, a2 -; RV32IZbb-NEXT: add a1, a0, a2 -; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bgez a1, .LBB0_2 +; RV32IZbb-NEXT: mul a1, a1, a2 +; RV32IZbb-NEXT: add a0, a0, a1 +; RV32IZbb-NEXT: slt a2, a0, a3 +; RV32IZbb-NEXT: slti a1, a1, 0 +; RV32IZbb-NEXT: beq a1, a2, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a0, a0, -1 +; RV32IZbb-NEXT: slti a0, a0, 0 +; RV32IZbb-NEXT: lui a1, 524288 +; RV32IZbb-NEXT: sub a0, a1, a0 ; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: slt a3, a1, a3 -; RV32IZbb-NEXT: slti a2, a2, 0 -; RV32IZbb-NEXT: bne a2, a3, .LBB0_4 -; RV32IZbb-NEXT: # %bb.3: -; RV32IZbb-NEXT: mv a0, a1 -; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func32: @@ -89,40 +83,34 @@ ; RV32I-NEXT: add a3, a2, a5 ; RV32I-NEXT: add a0, a0, a4 ; RV32I-NEXT: sltu a1, a0, a1 -; RV32I-NEXT: add a3, a3, a1 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: bgez a3, .LBB1_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: xor a4, a2, a3 +; RV32I-NEXT: add a1, a3, a1 +; RV32I-NEXT: xor a3, a2, a1 ; RV32I-NEXT: xor a2, a2, a5 ; RV32I-NEXT: not a2, a2 -; RV32I-NEXT: and a2, a2, a4 -; RV32I-NEXT: bltz a2, .LBB1_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB1_4: -; RV32I-NEXT: srai a0, a3, 31 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: bgez a2, .LBB1_2 +; RV32I-NEXT: # %bb.1: +; RV32I-NEXT: slti a0, a1, 0 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: sub a2, a2, a0 +; RV32I-NEXT: srai a0, a1, 31 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: add a3, a0, a2 -; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bgez a3, .LBB1_2 +; RV64I-NEXT: add a0, a0, a2 +; RV64I-NEXT: slt a1, a0, a1 +; RV64I-NEXT: slti a2, a2, 0 +; RV64I-NEXT: beq a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: slt a1, a3, a1 -; RV64I-NEXT: slti a2, a2, 0 -; RV64I-NEXT: bne a2, a1, .LBB1_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: mv a0, a3 -; RV64I-NEXT: .LBB1_4: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func64: @@ -132,39 +120,33 @@ ; RV32IZbb-NEXT: add a3, a2, a5 ; RV32IZbb-NEXT: add a0, a0, a4 ; RV32IZbb-NEXT: sltu a1, a0, a1 -; RV32IZbb-NEXT: add a3, a3, a1 -; RV32IZbb-NEXT: lui a1, 524288 -; RV32IZbb-NEXT: bgez a3, .LBB1_2 +; RV32IZbb-NEXT: add a1, a3, a1 +; RV32IZbb-NEXT: xor a3, a2, a1 +; RV32IZbb-NEXT: xor a2, a2, a5 +; RV32IZbb-NEXT: andn a2, a3, a2 +; RV32IZbb-NEXT: bgez a2, .LBB1_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a1, a1, -1 +; RV32IZbb-NEXT: slti a0, a1, 0 +; RV32IZbb-NEXT: lui a2, 524288 +; RV32IZbb-NEXT: sub a2, a2, a0 +; RV32IZbb-NEXT: srai a0, a1, 31 +; RV32IZbb-NEXT: mv a1, a2 ; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: xor a4, a2, a3 -; RV32IZbb-NEXT: xor a2, a2, a5 -; RV32IZbb-NEXT: andn a2, a4, a2 -; RV32IZbb-NEXT: bltz a2, .LBB1_4 -; RV32IZbb-NEXT: # %bb.3: -; RV32IZbb-NEXT: mv a1, a3 -; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB1_4: -; RV32IZbb-NEXT: srai a0, a3, 31 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func64: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: mv a1, a0 -; RV64IZbb-NEXT: add a3, a0, a2 -; RV64IZbb-NEXT: addi a0, zero, -1 -; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bgez a3, .LBB1_2 +; RV64IZbb-NEXT: add a0, a0, a2 +; RV64IZbb-NEXT: slt a1, a0, a1 +; RV64IZbb-NEXT: slti a2, a2, 0 +; RV64IZbb-NEXT: beq a2, a1, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: addi a0, a0, -1 +; RV64IZbb-NEXT: slti a0, a0, 0 +; RV64IZbb-NEXT: addi a1, zero, -1 +; RV64IZbb-NEXT: slli a1, a1, 63 +; RV64IZbb-NEXT: sub a0, a1, a0 ; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: slt a1, a3, a1 -; RV64IZbb-NEXT: slti a2, a2, 0 -; RV64IZbb-NEXT: bne a2, a1, .LBB1_4 -; RV64IZbb-NEXT: # %bb.3: -; RV64IZbb-NEXT: mv a0, a3 -; RV64IZbb-NEXT: .LBB1_4: ; RV64IZbb-NEXT: ret %a = mul i64 %y, %z %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z) diff --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll @@ -14,18 +14,15 @@ ; RV32I-LABEL: func: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: sub a3, a0, a1 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bgez a3, .LBB0_2 +; RV32I-NEXT: sgtz a3, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: slt a1, a0, a2 +; RV32I-NEXT: beq a3, a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: sgtz a1, a1 -; RV32I-NEXT: slt a2, a3, a2 -; RV32I-NEXT: bne a1, a2, .LBB0_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: mv a0, a3 -; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func: @@ -48,18 +45,15 @@ ; RV32IZbb-LABEL: func: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: mv a2, a0 -; RV32IZbb-NEXT: sub a3, a0, a1 -; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bgez a3, .LBB0_2 +; RV32IZbb-NEXT: sgtz a3, a1 +; RV32IZbb-NEXT: sub a0, a0, a1 +; RV32IZbb-NEXT: slt a1, a0, a2 +; RV32IZbb-NEXT: beq a3, a1, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a0, a0, -1 +; RV32IZbb-NEXT: slti a0, a0, 0 +; RV32IZbb-NEXT: lui a1, 524288 +; RV32IZbb-NEXT: sub a0, a1, a0 ; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: sgtz a1, a1 -; RV32IZbb-NEXT: slt a2, a3, a2 -; RV32IZbb-NEXT: bne a1, a2, .LBB0_4 -; RV32IZbb-NEXT: # %bb.3: -; RV32IZbb-NEXT: mv a0, a3 -; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func: @@ -80,40 +74,35 @@ ; RV32I-NEXT: mv a4, a1 ; RV32I-NEXT: sltu a1, a0, a2 ; RV32I-NEXT: sub a5, a4, a3 -; RV32I-NEXT: sub a5, a5, a1 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: bgez a5, .LBB1_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: xor a6, a4, a5 +; RV32I-NEXT: sub a1, a5, a1 +; RV32I-NEXT: xor a5, a4, a1 ; RV32I-NEXT: xor a3, a4, a3 -; RV32I-NEXT: and a3, a3, a6 -; RV32I-NEXT: bltz a3, .LBB1_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: and a3, a3, a5 +; RV32I-NEXT: bltz a3, .LBB1_2 +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sub a0, a0, a2 -; RV32I-NEXT: mv a1, a5 ; RV32I-NEXT: ret -; RV32I-NEXT: .LBB1_4: -; RV32I-NEXT: srai a0, a5, 31 +; RV32I-NEXT: .LBB1_2: +; RV32I-NEXT: slti a0, a1, 0 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: sub a2, a2, a0 +; RV32I-NEXT: srai a0, a1, 31 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: ; RV64I-NEXT: mv a2, a0 -; RV64I-NEXT: sub a3, a0, a1 -; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bgez a3, .LBB1_2 +; RV64I-NEXT: sgtz a3, a1 +; RV64I-NEXT: sub a0, a0, a1 +; RV64I-NEXT: slt a1, a0, a2 +; RV64I-NEXT: beq a3, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: sgtz a1, a1 -; RV64I-NEXT: slt a2, a3, a2 -; RV64I-NEXT: bne a1, a2, .LBB1_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: mv a0, a3 -; RV64I-NEXT: .LBB1_4: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func2: @@ -121,40 +110,35 @@ ; RV32IZbb-NEXT: mv a4, a1 ; RV32IZbb-NEXT: sltu a1, a0, a2 ; RV32IZbb-NEXT: sub a5, a4, a3 -; RV32IZbb-NEXT: sub a5, a5, a1 -; RV32IZbb-NEXT: lui a1, 524288 -; RV32IZbb-NEXT: bgez a5, .LBB1_2 -; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a1, a1, -1 -; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: xor a6, a4, a5 +; RV32IZbb-NEXT: sub a1, a5, a1 +; RV32IZbb-NEXT: xor a5, a4, a1 ; RV32IZbb-NEXT: xor a3, a4, a3 -; RV32IZbb-NEXT: and a3, a3, a6 -; RV32IZbb-NEXT: bltz a3, .LBB1_4 -; RV32IZbb-NEXT: # %bb.3: +; RV32IZbb-NEXT: and a3, a3, a5 +; RV32IZbb-NEXT: bltz a3, .LBB1_2 +; RV32IZbb-NEXT: # %bb.1: ; RV32IZbb-NEXT: sub a0, a0, a2 -; RV32IZbb-NEXT: mv a1, a5 ; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB1_4: -; RV32IZbb-NEXT: srai a0, a5, 31 +; RV32IZbb-NEXT: .LBB1_2: +; RV32IZbb-NEXT: slti a0, a1, 0 +; RV32IZbb-NEXT: lui a2, 524288 +; RV32IZbb-NEXT: sub a2, a2, a0 +; RV32IZbb-NEXT: srai a0, a1, 31 +; RV32IZbb-NEXT: mv a1, a2 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func2: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: mv a2, a0 -; RV64IZbb-NEXT: sub a3, a0, a1 -; RV64IZbb-NEXT: addi a0, zero, -1 -; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bgez a3, .LBB1_2 +; RV64IZbb-NEXT: sgtz a3, a1 +; RV64IZbb-NEXT: sub a0, a0, a1 +; RV64IZbb-NEXT: slt a1, a0, a2 +; RV64IZbb-NEXT: beq a3, a1, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: addi a0, a0, -1 +; RV64IZbb-NEXT: slti a0, a0, 0 +; RV64IZbb-NEXT: addi a1, zero, -1 +; RV64IZbb-NEXT: slli a1, a1, 63 +; RV64IZbb-NEXT: sub a0, a1, a0 ; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: sgtz a1, a1 -; RV64IZbb-NEXT: slt a2, a3, a2 -; RV64IZbb-NEXT: bne a1, a2, .LBB1_4 -; RV64IZbb-NEXT: # %bb.3: -; RV64IZbb-NEXT: mv a0, a3 -; RV64IZbb-NEXT: .LBB1_4: ; RV64IZbb-NEXT: ret %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y); ret i64 %tmp; diff --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll @@ -14,19 +14,16 @@ ; RV32I-LABEL: func32: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a3, a0 -; RV32I-NEXT: mul a2, a1, a2 -; RV32I-NEXT: sub a1, a0, a2 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bgez a1, .LBB0_2 +; RV32I-NEXT: mul a0, a1, a2 +; RV32I-NEXT: sgtz a1, a0 +; RV32I-NEXT: sub a0, a3, a0 +; RV32I-NEXT: slt a2, a0, a3 +; RV32I-NEXT: beq a1, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: sub a0, a1, a0 ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: sgtz a2, a2 -; RV32I-NEXT: slt a3, a1, a3 -; RV32I-NEXT: bne a2, a3, .LBB0_4 -; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func32: @@ -51,19 +48,16 @@ ; RV32IZbb-LABEL: func32: ; RV32IZbb: # %bb.0: ; RV32IZbb-NEXT: mv a3, a0 -; RV32IZbb-NEXT: mul a2, a1, a2 -; RV32IZbb-NEXT: sub a1, a0, a2 -; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bgez a1, .LBB0_2 +; RV32IZbb-NEXT: mul a0, a1, a2 +; RV32IZbb-NEXT: sgtz a1, a0 +; RV32IZbb-NEXT: sub a0, a3, a0 +; RV32IZbb-NEXT: slt a2, a0, a3 +; RV32IZbb-NEXT: beq a1, a2, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a0, a0, -1 +; RV32IZbb-NEXT: slti a0, a0, 0 +; RV32IZbb-NEXT: lui a1, 524288 +; RV32IZbb-NEXT: sub a0, a1, a0 ; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: sgtz a2, a2 -; RV32IZbb-NEXT: slt a3, a1, a3 -; RV32IZbb-NEXT: bne a2, a3, .LBB0_4 -; RV32IZbb-NEXT: # %bb.3: -; RV32IZbb-NEXT: mv a0, a1 -; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func32: @@ -87,40 +81,35 @@ ; RV32I-NEXT: mv a2, a1 ; RV32I-NEXT: sltu a1, a0, a4 ; RV32I-NEXT: sub a3, a2, a5 -; RV32I-NEXT: sub a3, a3, a1 -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: bgez a3, .LBB1_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: .LBB1_2: -; RV32I-NEXT: xor a6, a2, a3 +; RV32I-NEXT: sub a1, a3, a1 +; RV32I-NEXT: xor a3, a2, a1 ; RV32I-NEXT: xor a2, a2, a5 -; RV32I-NEXT: and a2, a2, a6 -; RV32I-NEXT: bltz a2, .LBB1_4 -; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: bltz a2, .LBB1_2 +; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sub a0, a0, a4 -; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: ret -; RV32I-NEXT: .LBB1_4: -; RV32I-NEXT: srai a0, a3, 31 +; RV32I-NEXT: .LBB1_2: +; RV32I-NEXT: slti a0, a1, 0 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: sub a2, a2, a0 +; RV32I-NEXT: srai a0, a1, 31 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: ; RV64I-NEXT: mv a1, a0 -; RV64I-NEXT: sub a3, a0, a2 -; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bgez a3, .LBB1_2 +; RV64I-NEXT: sgtz a3, a2 +; RV64I-NEXT: sub a0, a0, a2 +; RV64I-NEXT: slt a1, a0, a1 +; RV64I-NEXT: beq a3, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: addi a1, zero, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: sub a0, a1, a0 ; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: sgtz a2, a2 -; RV64I-NEXT: slt a1, a3, a1 -; RV64I-NEXT: bne a2, a1, .LBB1_4 -; RV64I-NEXT: # %bb.3: -; RV64I-NEXT: mv a0, a3 -; RV64I-NEXT: .LBB1_4: ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func64: @@ -128,40 +117,35 @@ ; RV32IZbb-NEXT: mv a2, a1 ; RV32IZbb-NEXT: sltu a1, a0, a4 ; RV32IZbb-NEXT: sub a3, a2, a5 -; RV32IZbb-NEXT: sub a3, a3, a1 -; RV32IZbb-NEXT: lui a1, 524288 -; RV32IZbb-NEXT: bgez a3, .LBB1_2 -; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: addi a1, a1, -1 -; RV32IZbb-NEXT: .LBB1_2: -; RV32IZbb-NEXT: xor a6, a2, a3 +; RV32IZbb-NEXT: sub a1, a3, a1 +; RV32IZbb-NEXT: xor a3, a2, a1 ; RV32IZbb-NEXT: xor a2, a2, a5 -; RV32IZbb-NEXT: and a2, a2, a6 -; RV32IZbb-NEXT: bltz a2, .LBB1_4 -; RV32IZbb-NEXT: # %bb.3: +; RV32IZbb-NEXT: and a2, a2, a3 +; RV32IZbb-NEXT: bltz a2, .LBB1_2 +; RV32IZbb-NEXT: # %bb.1: ; RV32IZbb-NEXT: sub a0, a0, a4 -; RV32IZbb-NEXT: mv a1, a3 ; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB1_4: -; RV32IZbb-NEXT: srai a0, a3, 31 +; RV32IZbb-NEXT: .LBB1_2: +; RV32IZbb-NEXT: slti a0, a1, 0 +; RV32IZbb-NEXT: lui a2, 524288 +; RV32IZbb-NEXT: sub a2, a2, a0 +; RV32IZbb-NEXT: srai a0, a1, 31 +; RV32IZbb-NEXT: mv a1, a2 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func64: ; RV64IZbb: # %bb.0: ; RV64IZbb-NEXT: mv a1, a0 -; RV64IZbb-NEXT: sub a3, a0, a2 -; RV64IZbb-NEXT: addi a0, zero, -1 -; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bgez a3, .LBB1_2 +; RV64IZbb-NEXT: sgtz a3, a2 +; RV64IZbb-NEXT: sub a0, a0, a2 +; RV64IZbb-NEXT: slt a1, a0, a1 +; RV64IZbb-NEXT: beq a3, a1, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: addi a0, a0, -1 +; RV64IZbb-NEXT: slti a0, a0, 0 +; RV64IZbb-NEXT: addi a1, zero, -1 +; RV64IZbb-NEXT: slli a1, a1, 63 +; RV64IZbb-NEXT: sub a0, a1, a0 ; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: sgtz a2, a2 -; RV64IZbb-NEXT: slt a1, a3, a1 -; RV64IZbb-NEXT: bne a2, a1, .LBB1_4 -; RV64IZbb-NEXT: # %bb.3: -; RV64IZbb-NEXT: mv a0, a3 -; RV64IZbb-NEXT: .LBB1_4: ; RV64IZbb-NEXT: ret %a = mul i64 %y, %z %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)