diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -347,10 +347,19 @@ // Standalone (codegen-only) immleaf patterns. -// A 12-bit signed immediate plus one where the imm range will be -2047~2048. +// A 12-bit signed immediate plus one where the imm range will be [-2047, 2048]. def simm12_plus1 : ImmLeaf(Imm) && Imm != -2048) || Imm == 2048;}]>; +// A 12-bit signed immediate sub one and exclude zero +def simm12_sub1_nonzero : PatLeaf<(imm), [{ + if (!N->hasOneUse()) + return false; + // The immediate operand must be in range [-2049, 0) or (0, 2046]. + int64_t Imm = N->getSExtValue(); + return (Imm >= -2049 && Imm < 0) || (Imm > 0 && Imm <= 2046); +}]>; + // A 6-bit constant greater than 32. def uimm6gt32 : ImmLeaf(Imm) && Imm > 32; @@ -373,9 +382,9 @@ N->getValueType(0)); }]>; -// Return an immediate value plus 32. -def ImmPlus32 : SDNodeXFormgetTargetConstant(N->getSExtValue() + 32, SDLoc(N), +// Return an immediate value plus 1. +def ImmPlus1 : SDNodeXFormgetTargetConstant(N->getSExtValue() + 1, SDLoc(N), N->getValueType(0)); }]>; @@ -1208,6 +1217,10 @@ def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>; def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>; def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>; +def : Pat<(setgt GPR:$rs1, simm12_sub1_nonzero:$imm), + (XORI (SLTI GPR:$rs1, (ImmPlus1 simm12_sub1_nonzero:$imm)), 1)>; +def : Pat<(setugt GPR:$rs1, simm12_sub1_nonzero:$imm), + (XORI (SLTIU GPR:$rs1, (ImmPlus1 simm12_sub1_nonzero:$imm)), 1)>; def IntCCtoRISCVCC : SDNodeXForm(N->getOperand(2))->get(); diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -109,8 +109,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -120,8 +120,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -494,8 +494,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -505,8 +505,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -770,8 +770,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -781,8 +781,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -1103,8 +1103,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -1114,8 +1114,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -119,8 +119,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gedf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -130,8 +130,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gedf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -452,8 +452,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltdf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -463,8 +463,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltdf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll @@ -109,8 +109,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -120,8 +120,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -478,8 +478,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -489,8 +489,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -754,8 +754,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -765,8 +765,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -1071,8 +1071,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -1082,8 +1082,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -119,8 +119,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __gesf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -130,8 +130,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __gesf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret @@ -436,8 +436,8 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: call __ltsf2@plt -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: slti a0, a0, 0 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret @@ -447,8 +447,8 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: call __ltsf2@plt -; RV64I-NEXT: li a1, -1 -; RV64I-NEXT: slt a0, a1, a0 +; RV64I-NEXT: slti a0, a0, 0 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/i32-icmp.ll b/llvm/test/CodeGen/RISCV/i32-icmp.ll --- a/llvm/test/CodeGen/RISCV/i32-icmp.ll +++ b/llvm/test/CodeGen/RISCV/i32-icmp.ll @@ -136,6 +136,63 @@ ret i32 %2 } +define i32 @icmp_ugt_constant_zero(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_zero: +; RV32I: # %bb.0: +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, 0 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_2047(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_2047: +; RV32I: # %bb.0: +; RV32I-NEXT: li a1, 2047 +; RV32I-NEXT: sltu a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, 2047 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_2046(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_2046: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, 2047 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp ugt i32 %a, 2046 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_neg_2049(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_neg_2049: +; RV32I: # %bb.0: +; RV32I-NEXT: sltiu a0, a0, -2048 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret +; 4294965247 signed extend is -2049 + %1 = icmp ugt i32 %a, 4294965247 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_ugt_constant_neg_2050(i32 %a) nounwind { +; RV32I-LABEL: icmp_ugt_constant_neg_2050: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 1048575 +; RV32I-NEXT: addi a1, a1, 2046 +; RV32I-NEXT: sltu a0, a1, a0 +; RV32I-NEXT: ret +; 4294965246 signed extend is -2050 + %1 = icmp ugt i32 %a, 4294965246 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + define i32 @icmp_uge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_uge: ; RV32I: # %bb.0: @@ -178,6 +235,72 @@ ret i32 %2 } +define i32 @icmp_sgt_constant(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, 6 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 5 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_zero(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_zero: +; RV32I: # %bb.0: +; RV32I-NEXT: sgtz a0, a0 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 0 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_2046(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_2046: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, 2047 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 2046 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_2047(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_2047: +; RV32I: # %bb.0: +; RV32I-NEXT: li a1, 2047 +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, 2047 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_neg_2049(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_neg_2049: +; RV32I: # %bb.0: +; RV32I-NEXT: slti a0, a0, -2048 +; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, -2049 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + +define i32 @icmp_sgt_constant_neg_2050(i32 %a) nounwind { +; RV32I-LABEL: icmp_sgt_constant_neg_2050: +; RV32I: # %bb.0: +; RV32I-NEXT: lui a1, 1048575 +; RV32I-NEXT: addi a1, a1, 2046 +; RV32I-NEXT: slt a0, a1, a0 +; RV32I-NEXT: ret + %1 = icmp sgt i32 %a, -2050 + %2 = zext i1 %1 to i32 + ret i32 %2 +} + define i32 @icmp_sge(i32 %a, i32 %b) nounwind { ; RV32I-LABEL: icmp_sge: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll --- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll +++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll @@ -48,8 +48,8 @@ define i32 @selecti64i32(i64 %a) { ; RV32-LABEL: selecti64i32: ; RV32: # %bb.0: -; RV32-NEXT: li a0, -1 -; RV32-NEXT: slt a0, a0, a1 +; RV32-NEXT: slti a0, a1, 0 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: sub a0, a1, a0 ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll --- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll +++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll @@ -106,8 +106,8 @@ ; ; RV64-LABEL: pos_sel_special_constant: ; RV64: # %bb.0: -; RV64-NEXT: li a1, -1 -; RV64-NEXT: slt a0, a1, a0 +; RV64-NEXT: slti a0, a0, 0 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: slli a0, a0, 9 ; RV64-NEXT: ret %tmp.1 = icmp sgt i32 %a, -1 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -195,8 +195,8 @@ ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: neg a0, a0 ; RV32-NEXT: andi a0, a0, 15 -; RV32-NEXT: li a1, 3 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 4 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: test_urem_odd_setne: @@ -205,8 +205,8 @@ ; RV64-NEXT: addw a0, a1, a0 ; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a0, a0, 15 -; RV64-NEXT: li a1, 3 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 4 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ret ; ; RV32M-LABEL: test_urem_odd_setne: @@ -215,8 +215,8 @@ ; RV32M-NEXT: add a0, a1, a0 ; RV32M-NEXT: neg a0, a0 ; RV32M-NEXT: andi a0, a0, 15 -; RV32M-NEXT: li a1, 3 -; RV32M-NEXT: sltu a0, a1, a0 +; RV32M-NEXT: sltiu a0, a0, 4 +; RV32M-NEXT: xori a0, a0, 1 ; RV32M-NEXT: ret ; ; RV64M-LABEL: test_urem_odd_setne: @@ -225,8 +225,8 @@ ; RV64M-NEXT: addw a0, a1, a0 ; RV64M-NEXT: negw a0, a0 ; RV64M-NEXT: andi a0, a0, 15 -; RV64M-NEXT: li a1, 3 -; RV64M-NEXT: sltu a0, a1, a0 +; RV64M-NEXT: sltiu a0, a0, 4 +; RV64M-NEXT: xori a0, a0, 1 ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_urem_odd_setne: @@ -235,8 +235,8 @@ ; RV32MV-NEXT: add a0, a1, a0 ; RV32MV-NEXT: neg a0, a0 ; RV32MV-NEXT: andi a0, a0, 15 -; RV32MV-NEXT: li a1, 3 -; RV32MV-NEXT: sltu a0, a1, a0 +; RV32MV-NEXT: sltiu a0, a0, 4 +; RV32MV-NEXT: xori a0, a0, 1 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_urem_odd_setne: @@ -245,8 +245,8 @@ ; RV64MV-NEXT: addw a0, a1, a0 ; RV64MV-NEXT: negw a0, a0 ; RV64MV-NEXT: andi a0, a0, 15 -; RV64MV-NEXT: li a1, 3 -; RV64MV-NEXT: sltu a0, a1, a0 +; RV64MV-NEXT: sltiu a0, a0, 4 +; RV64MV-NEXT: xori a0, a0, 1 ; RV64MV-NEXT: ret %urem = urem i4 %X, 5 %cmp = icmp ne i4 %urem, 0 @@ -261,8 +261,8 @@ ; RV32-NEXT: li a1, 307 ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: andi a0, a0, 511 -; RV32-NEXT: li a1, 1 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 2 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -274,8 +274,8 @@ ; RV64-NEXT: li a1, 307 ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: andi a0, a0, 511 -; RV64-NEXT: li a1, 1 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 2 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret @@ -285,8 +285,8 @@ ; RV32M-NEXT: li a1, 307 ; RV32M-NEXT: mul a0, a0, a1 ; RV32M-NEXT: andi a0, a0, 511 -; RV32M-NEXT: li a1, 1 -; RV32M-NEXT: sltu a0, a1, a0 +; RV32M-NEXT: sltiu a0, a0, 2 +; RV32M-NEXT: xori a0, a0, 1 ; RV32M-NEXT: ret ; ; RV64M-LABEL: test_urem_negative_odd: @@ -294,8 +294,8 @@ ; RV64M-NEXT: li a1, 307 ; RV64M-NEXT: mulw a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 511 -; RV64M-NEXT: li a1, 1 -; RV64M-NEXT: sltu a0, a1, a0 +; RV64M-NEXT: sltiu a0, a0, 2 +; RV64M-NEXT: xori a0, a0, 1 ; RV64M-NEXT: ret ; ; RV32MV-LABEL: test_urem_negative_odd: @@ -303,8 +303,8 @@ ; RV32MV-NEXT: li a1, 307 ; RV32MV-NEXT: mul a0, a0, a1 ; RV32MV-NEXT: andi a0, a0, 511 -; RV32MV-NEXT: li a1, 1 -; RV32MV-NEXT: sltu a0, a1, a0 +; RV32MV-NEXT: sltiu a0, a0, 2 +; RV32MV-NEXT: xori a0, a0, 1 ; RV32MV-NEXT: ret ; ; RV64MV-LABEL: test_urem_negative_odd: @@ -312,8 +312,8 @@ ; RV64MV-NEXT: li a1, 307 ; RV64MV-NEXT: mulw a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 511 -; RV64MV-NEXT: li a1, 1 -; RV64MV-NEXT: sltu a0, a1, a0 +; RV64MV-NEXT: sltiu a0, a0, 2 +; RV64MV-NEXT: xori a0, a0, 1 ; RV64MV-NEXT: ret %urem = urem i9 %X, -5 %cmp = icmp ne i9 %urem, 0 @@ -344,8 +344,8 @@ ; RV32-NEXT: srli a0, a0, 22 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: li a1, 341 -; RV32-NEXT: sltu s3, a1, a0 +; RV32-NEXT: sltiu a0, a0, 342 +; RV32-NEXT: xori s3, a0, 1 ; RV32-NEXT: li a1, 819 ; RV32-NEXT: mv a0, s1 ; RV32-NEXT: call __mulsi3@plt @@ -358,8 +358,8 @@ ; RV32-NEXT: call __mulsi3@plt ; RV32-NEXT: addi a0, a0, -1463 ; RV32-NEXT: andi a0, a0, 2047 -; RV32-NEXT: li a1, 292 -; RV32-NEXT: sltu a0, a1, a0 +; RV32-NEXT: sltiu a0, a0, 293 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: neg a1, s3 ; RV32-NEXT: neg a0, a0 ; RV32-NEXT: neg a2, s1 @@ -404,8 +404,8 @@ ; RV64-NEXT: srli a0, a0, 54 ; RV64-NEXT: or a0, a0, a1 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: li a1, 341 -; RV64-NEXT: sltu s3, a1, a0 +; RV64-NEXT: sltiu a0, a0, 342 +; RV64-NEXT: xori s3, a0, 1 ; RV64-NEXT: li a1, 819 ; RV64-NEXT: mv a0, s2 ; RV64-NEXT: call __muldi3@plt @@ -418,8 +418,8 @@ ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: addiw a0, a0, -1463 ; RV64-NEXT: andi a0, a0, 2047 -; RV64-NEXT: li a1, 292 -; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sltiu a0, a0, 293 +; RV64-NEXT: xori a0, a0, 1 ; RV64-NEXT: negw a1, s3 ; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a1, a1, 2047 @@ -456,8 +456,8 @@ ; RV32M-NEXT: srli a2, a2, 22 ; RV32M-NEXT: or a2, a2, a4 ; RV32M-NEXT: andi a2, a2, 2047 -; RV32M-NEXT: li a4, 341 -; RV32M-NEXT: sltu a2, a4, a2 +; RV32M-NEXT: sltiu a2, a2, 342 +; RV32M-NEXT: xori a2, a2, 1 ; RV32M-NEXT: li a4, 819 ; RV32M-NEXT: mul a1, a1, a4 ; RV32M-NEXT: addi a1, a1, -1638 @@ -468,8 +468,8 @@ ; RV32M-NEXT: mul a3, a3, a4 ; RV32M-NEXT: addi a3, a3, -1463 ; RV32M-NEXT: andi a3, a3, 2047 -; RV32M-NEXT: li a4, 292 -; RV32M-NEXT: sltu a3, a4, a3 +; RV32M-NEXT: sltiu a3, a3, 293 +; RV32M-NEXT: xori a3, a3, 1 ; RV32M-NEXT: neg a2, a2 ; RV32M-NEXT: neg a3, a3 ; RV32M-NEXT: neg a4, a1 @@ -501,8 +501,8 @@ ; RV64M-NEXT: srli a1, a1, 54 ; RV64M-NEXT: or a1, a1, a4 ; RV64M-NEXT: andi a1, a1, 2047 -; RV64M-NEXT: li a4, 341 -; RV64M-NEXT: sltu a1, a4, a1 +; RV64M-NEXT: sltiu a1, a1, 342 +; RV64M-NEXT: xori a1, a1, 1 ; RV64M-NEXT: li a4, 819 ; RV64M-NEXT: mulw a3, a3, a4 ; RV64M-NEXT: addiw a3, a3, -1638 @@ -513,8 +513,8 @@ ; RV64M-NEXT: mulw a2, a2, a4 ; RV64M-NEXT: addiw a2, a2, -1463 ; RV64M-NEXT: andi a2, a2, 2047 -; RV64M-NEXT: li a4, 292 -; RV64M-NEXT: sltu a2, a4, a2 +; RV64M-NEXT: sltiu a2, a2, 293 +; RV64M-NEXT: xori a2, a2, 1 ; RV64M-NEXT: negw a1, a1 ; RV64M-NEXT: negw a2, a2 ; RV64M-NEXT: andi a1, a1, 2047 diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1984,8 +1984,8 @@ ; RV32-NEXT: xor a0, a1, a0 ; RV32-NEXT: xor a1, a1, a3 ; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: li a1, -1 -; RV32-NEXT: slt a0, a1, a0 +; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: ssub.not.i64: @@ -2005,8 +2005,8 @@ ; RV32ZBA-NEXT: xor a0, a1, a0 ; RV32ZBA-NEXT: xor a1, a1, a3 ; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: li a1, -1 -; RV32ZBA-NEXT: slt a0, a1, a0 +; RV32ZBA-NEXT: slti a0, a0, 0 +; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; ; RV64ZBA-LABEL: ssub.not.i64: