Index: llvm/lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -347,10 +347,15 @@ // Standalone (codegen-only) immleaf patterns. -// A 12-bit signed immediate plus one where the imm range will be -2047~2048. +// A 12-bit signed immediate plus one where the imm range will be [-2047, 2048]. def simm12_plus1 : ImmLeaf(Imm) && Imm != -2048) || Imm == 2048;}]>; +// A 12-bit signed immediate sub one and exlcud zero, +// where the imm range will be [-2049, 0), (0, 2046]. +def simm12_sub1_exc0 : ImmLeaf= -2049 && Imm < 0) || (Imm > 0 && Imm <= 2046);}]>; + // A 6-bit constant greater than 32. def uimm6gt32 : ImmLeaf(Imm) && Imm > 32; @@ -373,9 +378,9 @@ N->getValueType(0)); }]>; -// Return an immediate value plus 32. -def ImmPlus32 : SDNodeXFormgetTargetConstant(N->getSExtValue() + 32, SDLoc(N), +// Return an immediate value plus 1. +def ImmPlus1 : SDNodeXFormgetTargetConstant(N->getSExtValue() + 1, SDLoc(N), N->getValueType(0)); }]>; @@ -1208,6 +1213,10 @@ def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>; def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>; def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>; +def : Pat<(setgt GPR:$rs1, simm12_sub1_exc0:$imm), + (XORI (SLTI GPR:$rs1, (ImmPlus1 simm12_sub1_exc0:$imm)), 1)>; +def : Pat<(setugt GPR:$rs1, simm12_sub1_exc0:$imm), + (XORI (SLTIU GPR:$rs1, (ImmPlus1 simm12_sub1_exc0:$imm)), 1)>; def IntCCtoRISCVCC : SDNodeXForm(N->getOperand(2))->get(); Index: llvm/test/CodeGen/RISCV/double-fcmp-strict.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -109,8 +109,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -120,8 +120,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -494,8 +494,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -505,8 +505,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -770,8 +770,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -781,8 +781,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -1103,8 +1103,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -1114,8 +1114,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret Index: llvm/test/CodeGen/RISCV/double-fcmp.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-fcmp.ll +++ llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -119,8 +119,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -130,8 +130,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -452,8 +452,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -463,8 +463,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret Index: llvm/test/CodeGen/RISCV/float-fcmp-strict.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-fcmp-strict.ll +++ llvm/test/CodeGen/RISCV/float-fcmp-strict.ll @@ -109,8 +109,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -120,8 +120,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -478,8 +478,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -489,8 +489,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -754,8 +754,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -765,8 +765,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -1071,8 +1071,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -1082,8 +1082,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret Index: llvm/test/CodeGen/RISCV/float-fcmp.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-fcmp.ll +++ llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -119,8 +119,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -130,8 +130,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -436,8 +436,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -447,8 +447,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret Index: llvm/test/CodeGen/RISCV/fpclamptosat.ll =================================================================== --- llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -35,7 +35,8 @@ ; RV32IF-NEXT: li a3, -1 ; RV32IF-NEXT: beq a1, a3, .LBB0_6 ; RV32IF-NEXT: # %bb.5: # %entry -; RV32IF-NEXT: slt a1, a3, a1 +; RV32IF-NEXT: slti a1, a1, 0 +; RV32IF-NEXT: xori a1, a1, 1 ; RV32IF-NEXT: beqz a1, .LBB0_7 ; RV32IF-NEXT: j .LBB0_8 ; RV32IF-NEXT: .LBB0_6: @@ -391,7 +392,8 @@ ; RV32-NEXT: li a3, -1 ; RV32-NEXT: beq a1, a3, .LBB6_6 ; RV32-NEXT: # %bb.5: # %entry -; RV32-NEXT: slt a1, a3, a1 +; RV32-NEXT: slti a1, a1, 0 +; RV32-NEXT: xori a1, a1, 1 ; RV32-NEXT: beqz a1, .LBB6_7 ; RV32-NEXT: j .LBB6_8 ; RV32-NEXT: .LBB6_6: @@ -1137,7 +1139,8 @@ ; RV32IF-NEXT: and a3, a3, a2 ; RV32IF-NEXT: beq a3, a6, .LBB18_10 ; RV32IF-NEXT: .LBB18_9: # %entry -; RV32IF-NEXT: slt a4, a6, a2 +; RV32IF-NEXT: slti a2, a2, 0 +; RV32IF-NEXT: xori a4, a2, 1 ; RV32IF-NEXT: .LBB18_10: # %entry ; RV32IF-NEXT: bnez a4, .LBB18_12 ; RV32IF-NEXT: # %bb.11: # %entry @@ -1172,7 +1175,8 @@ ; RV64IF-NEXT: slli a3, a2, 63 ; RV64IF-NEXT: beq a1, a2, .LBB18_6 ; RV64IF-NEXT: # %bb.5: # %entry -; RV64IF-NEXT: slt a1, a2, a1 +; RV64IF-NEXT: slti a1, a1, 0 +; RV64IF-NEXT: xori a1, a1, 1 ; RV64IF-NEXT: beqz a1, .LBB18_7 ; RV64IF-NEXT: j .LBB18_8 ; RV64IF-NEXT: .LBB18_6: @@ -1233,7 +1237,8 @@ ; RV32IFD-NEXT: and a3, a3, a2 ; RV32IFD-NEXT: beq a3, a6, .LBB18_10 ; RV32IFD-NEXT: .LBB18_9: # %entry -; RV32IFD-NEXT: slt a4, a6, a2 +; RV32IFD-NEXT: slti a2, a2, 0 +; RV32IFD-NEXT: xori a4, a2, 1 ; RV32IFD-NEXT: .LBB18_10: # %entry ; RV32IFD-NEXT: bnez a4, .LBB18_12 ; RV32IFD-NEXT: # %bb.11: # %entry @@ -1569,7 +1574,8 @@ ; RV32-NEXT: and a3, a3, a2 ; RV32-NEXT: beq a3, a6, .LBB21_10 ; RV32-NEXT: .LBB21_9: # %entry -; RV32-NEXT: slt a4, a6, a2 +; RV32-NEXT: slti a2, a2, 0 +; RV32-NEXT: xori a4, a2, 1 ; RV32-NEXT: .LBB21_10: # %entry ; RV32-NEXT: bnez a4, .LBB21_12 ; RV32-NEXT: # %bb.11: # %entry @@ -1806,7 +1812,8 @@ ; RV32-NEXT: and a3, a3, a2 ; RV32-NEXT: beq a3, a6, .LBB24_10 ; RV32-NEXT: .LBB24_9: # %entry -; RV32-NEXT: slt a4, a6, a2 +; RV32-NEXT: slti a2, a2, 0 +; RV32-NEXT: xori a4, a2, 1 ; RV32-NEXT: .LBB24_10: # %entry ; RV32-NEXT: bnez a4, .LBB24_12 ; RV32-NEXT: # %bb.11: # %entry @@ -1843,7 +1850,8 @@ ; RV64-NEXT: slli a3, a2, 63 ; RV64-NEXT: beq a1, a2, .LBB24_6 ; RV64-NEXT: # %bb.5: # %entry -; RV64-NEXT: slt a1, a2, a1 +; RV64-NEXT: slti a1, a1, 0 +; RV64-NEXT: xori a1, a1, 1 ; RV64-NEXT: beqz a1, .LBB24_7 ; RV64-NEXT: j .LBB24_8 ; RV64-NEXT: .LBB24_6: Index: llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll =================================================================== --- llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -1481,7 +1481,8 @@ ; CHECK-NEXT: slli a3, a0, 63 ; CHECK-NEXT: beq a1, a0, .LBB18_11 ; CHECK-NEXT: .LBB18_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 +; CHECK-NEXT: slti a1, a1, 0 +; CHECK-NEXT: xori a1, a1, 1 ; CHECK-NEXT: bne s1, a0, .LBB18_12 ; CHECK-NEXT: .LBB18_9: ; CHECK-NEXT: sltu a0, a3, s0 @@ -1496,7 +1497,8 @@ ; CHECK-NEXT: sltu a1, a3, a2 ; CHECK-NEXT: beq s1, a0, .LBB18_9 ; CHECK-NEXT: .LBB18_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slti a0, s1, 0 +; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: bnez a0, .LBB18_14 ; CHECK-NEXT: .LBB18_13: # %entry ; CHECK-NEXT: mv s0, a3 @@ -1692,7 +1694,8 @@ ; CHECK-NEXT: slli a3, a0, 63 ; CHECK-NEXT: beq a1, a0, .LBB21_11 ; CHECK-NEXT: .LBB21_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 +; CHECK-NEXT: slti a1, a1, 0 +; CHECK-NEXT: xori a1, a1, 1 ; CHECK-NEXT: bne s1, a0, .LBB21_12 ; CHECK-NEXT: .LBB21_9: ; CHECK-NEXT: sltu a0, a3, s0 @@ -1707,7 +1710,8 @@ ; CHECK-NEXT: sltu a1, a3, a2 ; CHECK-NEXT: beq s1, a0, .LBB21_9 ; CHECK-NEXT: .LBB21_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slti a0, s1, 0 +; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: bnez a0, .LBB21_14 ; CHECK-NEXT: .LBB21_13: # %entry ; CHECK-NEXT: mv s0, a3 @@ -1905,7 +1909,8 @@ ; CHECK-NEXT: slli a3, a0, 63 ; CHECK-NEXT: beq a1, a0, .LBB24_11 ; CHECK-NEXT: .LBB24_8: # %entry -; CHECK-NEXT: slt a1, a0, a1 +; CHECK-NEXT: slti a1, a1, 0 +; CHECK-NEXT: xori a1, a1, 1 ; CHECK-NEXT: bne s1, a0, .LBB24_12 ; CHECK-NEXT: .LBB24_9: ; CHECK-NEXT: sltu a0, a3, s0 @@ -1920,7 +1925,8 @@ ; CHECK-NEXT: sltu a1, a3, a2 ; CHECK-NEXT: beq s1, a0, .LBB24_9 ; CHECK-NEXT: .LBB24_12: # %entry -; CHECK-NEXT: slt a0, a0, s1 +; CHECK-NEXT: slti a0, s1, 0 +; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: bnez a0, .LBB24_14 ; CHECK-NEXT: .LBB24_13: # %entry ; CHECK-NEXT: mv s0, a3 Index: llvm/test/CodeGen/RISCV/i32-icmp.ll =================================================================== --- llvm/test/CodeGen/RISCV/i32-icmp.ll +++ llvm/test/CodeGen/RISCV/i32-icmp.ll @@ -136,6 +136,39 @@ ret i32 %2 } +define i32 @icmp_ugt_constant_2047(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_2047: +; RV32I: # %bb.0: +; RV32I-NEXT: li a1, 2047 +; RV32I-NEXT: sltu a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, 2047 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_2046(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_2046: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, 2047 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, 2046 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_neg_2049(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_neg_2049: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, -2048 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, -2049 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + define i32 @icmp_uge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_uge: ; RV32I: # %bb.0: @@ -157,6 +190,36 @@ ret i32 %2 } +define i32 @icmp_ult_constant_2047(i32 %a) nounwind { +; RV32I-LABEL: icmp_ult_constant_2047: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, 2047 +; RV32I-NEXT: ret + %1 = icmp ult i32 %a, 2047 + %2 = zext i1 %1 to i32 + ret i32 %2 +} +define i32 @icmp_ult_constant_2048(i32 %a) nounwind { +; RV32I-LABEL: icmp_ult_constant_2048: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a0, a0, 11 +; RV32I-NEXT: seqz a0, a0 +; RV32I-NEXT: ret + %1 = icmp ult i32 %a, 2048 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ult_constant_neg_2048(i32 %a) nounwind { +; RV32I-LABEL: icmp_ult_constant_neg_2048: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, -2048 +; RV32I-NEXT: ret + %1 = icmp ult i32 %a, -2048 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + define i32 @icmp_ule(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_ule: ; RV32I: # %bb.0: @@ -178,6 +241,62 @@ ret i32 %2 } +define i32 @icmp_sgt_constant(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, 6 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 5 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_2046(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_2046: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, 2047 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 2046 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_2047(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_2047: +; RV32I: # %bb.0: +; RV32I-NEXT: li a1, 2047 +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 2047 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_neg_2049(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_neg_2049: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, -2048 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, -2049 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_neg_2050(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_neg_2050: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 1048575 +; RV32I-NEXT: addi a1, a1, 2046 +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, -2050 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + define i32 @icmp_sge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sge: ; RV32I: # %bb.0: @@ -209,5 +328,3 @@ %2 = zext i1 %1 to i32 ret i32 %2 } - -; TODO: check variants with an immediate? Index: llvm/test/CodeGen/RISCV/select-constant-xor.ll =================================================================== --- llvm/test/CodeGen/RISCV/select-constant-xor.ll +++ llvm/test/CodeGen/RISCV/select-constant-xor.ll @@ -48,8 +48,8 @@ define i32 @selecti64i32(i64 %a) { ; RV32-LABEL: selecti64i32: ; RV32: # %bb.0: -; RV32-NEXT: li a0, -1 -; RV32-NEXT: slt a0, a0, a1 +; RV32-NEXT: slti a0, a1, 0 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: sub a0, a1, a0 ; RV32-NEXT: ret Index: llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll =================================================================== --- llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll +++ llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll @@ -106,8 +106,8 @@ ; ; RV64-LABEL: pos_sel_special_constant: ; RV64: # %bb.0: -; RV64-NEXT: li a1, -1 -; RV64-NEXT: slt a0, a1, a0 +; RV64-NEXT: slti a0, a0, 0 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: slli a0, a0, 9 ; RV64-NEXT: ret %tmp.1 = icmp sgt i32 %a, -1 Index: llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll =================================================================== --- llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -195,8 +195,8 @@ ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: neg a0, a0 ; RV32-NEXT: andi a0, a0, 15 -; RV32-NEXT: li a1, 3 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 4 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: test_urem_odd_setne: @@ -205,8 +205,8 @@ ; RV64-NEXT: addw a0, a1, a0 ; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a0, a0, 15 -; RV64-NEXT: li a1, 3 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 4 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32M-LABEL: test_urem_odd_setne: @@ -215,8 +215,8 @@ ; RV32M-NEXT: add a0, a1, a0 ; RV32M-NEXT: neg a0, a0 ; RV32M-NEXT: andi a0, a0, 15 -; RV32M-NEXT: li a1, 3 -; RV32M-NEXT: sltu a0, a1, a0 +; RV32M-NEXT: sltiu a0, a0, 4 +; RV32M-NEXT: xori a0, a0, 1 ; RV32M-NEXT: ret ; ; RV64M-LABEL: test_urem_odd_setne: @@ -225,8 +225,8 @@ ; RV64M-NEXT: addw a0, a1, a0 ; RV64M-NEXT: negw a0, a0 ; RV64M-NEXT: andi a0, a0, 15 -; RV64M-NEXT: li a1, 3 -; RV64M-NEXT: sltu a0, a1, a0 +; RV64M-NEXT: sltiu a0, a0, 4 +; RV64M-NEXT: xori a0, a0, 1 ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_urem_odd_setne: @@ -235,8 +235,8 @@ ; RV32MV-NEXT: add a0, a1, a0 ; RV32MV-NEXT: neg a0, a0 ; RV32MV-NEXT: andi a0, a0, 15 -; RV32MV-NEXT: li a1, 3 -; RV32MV-NEXT: sltu a0, a1, a0 +; RV32MV-NEXT: sltiu a0, a0, 4 +; RV32MV-NEXT: xori a0, a0, 1 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_urem_odd_setne: @@ -245,8 +245,8 @@ ; RV64MV-NEXT: addw a0, a1, a0 ; RV64MV-NEXT: negw a0, a0 ; RV64MV-NEXT: andi a0, a0, 15 -; RV64MV-NEXT: li a1, 3 -; RV64MV-NEXT: sltu a0, a1, a0 +; RV64MV-NEXT: sltiu a0, a0, 4 +; RV64MV-NEXT: xori a0, a0, 1 ; RV64MV-NEXT: ret %urem = urem i4 %X, 5 %cmp = icmp ne i4 %urem, 0 @@ -261,8 +261,8 @@ ; RV32-NEXT: li a1, 307 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: andi a0, a0, 511 -; RV32-NEXT: li a1, 1 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 2 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -274,8 +274,8 @@ ; RV64-NEXT: li a1, 307 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: andi a0, a0, 511 -; RV64-NEXT: li a1, 1 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 2 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -285,8 +285,8 @@ ; RV32M-NEXT: li a1, 307 ; RV32M-NEXT: mul a0, a0, a1 ; RV32M-NEXT: andi a0, a0, 511 -; RV32M-NEXT: li a1, 1 -; RV32M-NEXT: sltu a0, a1, a0 +; RV32M-NEXT: sltiu a0, a0, 2 +; RV32M-NEXT: xori a0, a0, 1 ; RV32M-NEXT: ret ; ; RV64M-LABEL: test_urem_negative_odd: @@ -294,8 +294,8 @@ ; RV64M-NEXT: li a1, 307 ; RV64M-NEXT: mulw a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 511 -; RV64M-NEXT: li a1, 1 -; RV64M-NEXT: sltu a0, a1, a0 +; RV64M-NEXT: sltiu a0, a0, 2 +; RV64M-NEXT: xori a0, a0, 1 ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_urem_negative_odd: @@ -303,8 +303,8 @@ ; RV32MV-NEXT: li a1, 307 ; RV32MV-NEXT: mul a0, a0, a1 ; RV32MV-NEXT: andi a0, a0, 511 -; RV32MV-NEXT: li a1, 1 -; RV32MV-NEXT: sltu a0, a1, a0 +; RV32MV-NEXT: sltiu a0, a0, 2 +; RV32MV-NEXT: xori a0, a0, 1 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_urem_negative_odd: @@ -312,8 +312,8 @@ ; RV64MV-NEXT: li a1, 307 ; RV64MV-NEXT: mulw a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 511 -; RV64MV-NEXT: li a1, 1 -; RV64MV-NEXT: sltu a0, a1, a0 +; RV64MV-NEXT: sltiu a0, a0, 2 +; RV64MV-NEXT: xori a0, a0, 1 ; RV64MV-NEXT: ret %urem = urem i9 %X, -5 %cmp = icmp ne i9 %urem, 0 @@ -344,22 +344,22 @@ ; RV32-NEXT: srli a0, a0, 22 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: li a1, 341 -; RV32-NEXT: sltu s3, a1, a0 +; RV32-NEXT: sltiu a0, a0, 342 +; RV32-NEXT: xori s3, a0, 1 ; RV32-NEXT: li a1, 819 ; RV32-NEXT: mv a0, s1 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: addi a0, a0, -1638 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: li a1, 1 -; RV32-NEXT: sltu s1, a1, a0 +; RV32-NEXT: sltiu a0, a0, 2 +; RV32-NEXT: xori s1, a0, 1 ; RV32-NEXT: li a1, 1463 ; RV32-NEXT: mv a0, s2 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: addi a0, a0, -1463 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: li a1, 292 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 293 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: neg a1, s3 ; RV32-NEXT: neg a0, a0 ; RV32-NEXT: neg a2, s1 @@ -404,22 +404,22 @@ ; RV64-NEXT: srli a0, a0, 54 ; RV64-NEXT: or a0, a0, a1 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: li a1, 341 -; RV64-NEXT: sltu s3, a1, a0 +; RV64-NEXT: sltiu a0, a0, 342 +; RV64-NEXT: xori s3, a0, 1 ; RV64-NEXT: li a1, 819 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: addiw a0, a0, -1638 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: li a1, 1 -; RV64-NEXT: sltu s2, a1, a0 +; RV64-NEXT: sltiu a0, a0, 2 +; RV64-NEXT: xori s2, a0, 1 ; RV64-NEXT: li a1, 1463 ; RV64-NEXT: mv a0, s1 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: addiw a0, a0, -1463 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: li a1, 292 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 293 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: negw a1, s3 ; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a1, a1, 2047 @@ -456,20 +456,20 @@ ; RV32M-NEXT: srli a2, a2, 22 ; RV32M-NEXT: or a2, a2, a4 ; RV32M-NEXT: andi a2, a2, 2047 -; RV32M-NEXT: li a4, 341 -; RV32M-NEXT: sltu a2, a4, a2 +; RV32M-NEXT: sltiu a2, a2, 342 +; RV32M-NEXT: xori a2, a2, 1 ; RV32M-NEXT: li a4, 819 ; RV32M-NEXT: mul a1, a1, a4 ; RV32M-NEXT: addi a1, a1, -1638 ; RV32M-NEXT: andi a1, a1, 2047 -; RV32M-NEXT: li a4, 1 -; RV32M-NEXT: sltu a1, a4, a1 +; RV32M-NEXT: sltiu a1, a1, 2 +; RV32M-NEXT: xori a1, a1, 1 ; RV32M-NEXT: li a4, 1463 ; RV32M-NEXT: mul a3, a3, a4 ; RV32M-NEXT: addi a3, a3, -1463 ; RV32M-NEXT: andi a3, a3, 2047 -; RV32M-NEXT: li a4, 292 -; RV32M-NEXT: sltu a3, a4, a3 +; RV32M-NEXT: sltiu a3, a3, 293 +; RV32M-NEXT: xori a3, a3, 1 ; RV32M-NEXT: neg a2, a2 ; RV32M-NEXT: neg a3, a3 ; RV32M-NEXT: neg a4, a1 @@ -501,20 +501,20 @@ ; RV64M-NEXT: srli a1, a1, 54 ; RV64M-NEXT: or a1, a1, a4 ; RV64M-NEXT: andi a1, a1, 2047 -; RV64M-NEXT: li a4, 341 -; RV64M-NEXT: sltu a1, a4, a1 +; RV64M-NEXT: sltiu a1, a1, 342 +; RV64M-NEXT: xori a1, a1, 1 ; RV64M-NEXT: li a4, 819 ; RV64M-NEXT: mulw a3, a3, a4 ; RV64M-NEXT: addiw a3, a3, -1638 ; RV64M-NEXT: andi a3, a3, 2047 -; RV64M-NEXT: li a4, 1 -; RV64M-NEXT: sltu a3, a4, a3 +; RV64M-NEXT: sltiu a3, a3, 2 +; RV64M-NEXT: xori a3, a3, 1 ; RV64M-NEXT: li a4, 1463 ; RV64M-NEXT: mulw a2, a2, a4 ; RV64M-NEXT: addiw a2, a2, -1463 ; RV64M-NEXT: andi a2, a2, 2047 -; RV64M-NEXT: li a4, 292 -; RV64M-NEXT: sltu a2, a4, a2 +; RV64M-NEXT: sltiu a2, a2, 293 +; RV64M-NEXT: xori a2, a2, 1 ; RV64M-NEXT: negw a1, a1 ; RV64M-NEXT: negw a2, a2 ; RV64M-NEXT: andi a1, a1, 2047 Index: llvm/test/CodeGen/RISCV/xaluo.ll =================================================================== --- llvm/test/CodeGen/RISCV/xaluo.ll +++ llvm/test/CodeGen/RISCV/xaluo.ll @@ -1584,8 +1584,8 @@ ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: not a1, a1 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: li a1, -1 -; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: saddo.not.i64: @@ -1607,8 +1607,8 @@ ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: not a1, a1 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: li a1, -1 -; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: slti a0, a0, 0 +; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: saddo.not.i64: @@ -1984,8 +1984,8 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: li a1, -1 -; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub.not.i64: @@ -2005,8 +2005,8 @@ ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: li a1, -1 -; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: slti a0, a0, 0 +; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssub.not.i64: