diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -12135,10 +12135,6 @@ if (Subtarget.hasShortForwardBranchOpt()) return SDValue(); - // Only support XLenVT. - if (N->getValueType(0) != Subtarget.getXLenVT()) - return SDValue(); - SDValue TrueVal = N->getOperand(1); SDValue FalseVal = N->getOperand(2); if (SDValue V = tryFoldSelectIntoOp(N, DAG, TrueVal, FalseVal, /*Swapped*/false)) diff --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll --- a/llvm/test/CodeGen/RISCV/condops.ll +++ b/llvm/test/CodeGen/RISCV/condops.ll @@ -69,17 +69,12 @@ ; ; RV32ZICOND-LABEL: add1: ; RV32ZICOND: # %bb.0: -; RV32ZICOND-NEXT: add a4, a2, a4 -; RV32ZICOND-NEXT: add a3, a1, a3 -; RV32ZICOND-NEXT: sltu a5, a3, a1 -; RV32ZICOND-NEXT: add a4, a4, a5 -; RV32ZICOND-NEXT: czero.nez a1, a1, a0 -; RV32ZICOND-NEXT: czero.eqz a3, a3, a0 -; RV32ZICOND-NEXT: or a3, a3, a1 -; RV32ZICOND-NEXT: czero.eqz a1, a4, a0 -; RV32ZICOND-NEXT: czero.nez a0, a2, a0 -; RV32ZICOND-NEXT: or a1, a1, a0 -; RV32ZICOND-NEXT: mv a0, a3 +; RV32ZICOND-NEXT: czero.eqz a4, a4, a0 +; RV32ZICOND-NEXT: add a2, a2, a4 +; RV32ZICOND-NEXT: czero.eqz a0, a3, a0 +; RV32ZICOND-NEXT: add a0, a1, a0 +; RV32ZICOND-NEXT: sltu a1, a0, a1 +; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add1: @@ -107,17 +102,12 @@ ; ; RV32ZICOND-LABEL: add2: ; RV32ZICOND: # %bb.0: -; RV32ZICOND-NEXT: add a2, a2, a4 -; RV32ZICOND-NEXT: add a5, a1, a3 -; RV32ZICOND-NEXT: sltu a1, a5, a1 +; RV32ZICOND-NEXT: czero.eqz a2, a2, a0 +; RV32ZICOND-NEXT: add a2, a4, a2 +; RV32ZICOND-NEXT: czero.eqz a0, a1, a0 +; RV32ZICOND-NEXT: add a0, a3, a0 +; RV32ZICOND-NEXT: sltu a1, a0, a3 ; RV32ZICOND-NEXT: add a1, a2, a1 -; RV32ZICOND-NEXT: czero.nez a2, a3, a0 -; RV32ZICOND-NEXT: czero.eqz a3, a5, a0 -; RV32ZICOND-NEXT: or a2, a3, a2 -; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 -; RV32ZICOND-NEXT: czero.nez a0, a4, a0 -; RV32ZICOND-NEXT: or a1, a1, a0 -; RV32ZICOND-NEXT: mv a0, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add2: @@ -145,17 +135,12 @@ ; ; RV32ZICOND-LABEL: add3: ; RV32ZICOND: # %bb.0: -; RV32ZICOND-NEXT: add a4, a2, a4 -; RV32ZICOND-NEXT: add a3, a1, a3 -; RV32ZICOND-NEXT: sltu a5, a3, a1 -; RV32ZICOND-NEXT: add a4, a4, a5 -; RV32ZICOND-NEXT: czero.eqz a1, a1, a0 -; RV32ZICOND-NEXT: czero.nez a3, a3, a0 -; RV32ZICOND-NEXT: or a3, a1, a3 -; RV32ZICOND-NEXT: czero.nez a1, a4, a0 -; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 -; RV32ZICOND-NEXT: or a1, a0, a1 -; RV32ZICOND-NEXT: mv a0, a3 +; RV32ZICOND-NEXT: czero.nez a4, a4, a0 +; RV32ZICOND-NEXT: add a2, a2, a4 +; RV32ZICOND-NEXT: czero.nez a0, a3, a0 +; RV32ZICOND-NEXT: add a0, a1, a0 +; RV32ZICOND-NEXT: sltu a1, a0, a1 +; RV32ZICOND-NEXT: add a1, a2, a1 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add3: @@ -183,17 +168,12 @@ ; ; RV32ZICOND-LABEL: add4: ; RV32ZICOND: # %bb.0: -; RV32ZICOND-NEXT: add a2, a2, a4 -; RV32ZICOND-NEXT: add a5, a1, a3 -; RV32ZICOND-NEXT: sltu a1, a5, a1 +; RV32ZICOND-NEXT: czero.nez a2, a2, a0 +; RV32ZICOND-NEXT: add a2, a4, a2 +; RV32ZICOND-NEXT: czero.nez a0, a1, a0 +; RV32ZICOND-NEXT: add a0, a3, a0 +; RV32ZICOND-NEXT: sltu a1, a0, a3 ; RV32ZICOND-NEXT: add a1, a2, a1 -; RV32ZICOND-NEXT: czero.eqz a2, a3, a0 -; RV32ZICOND-NEXT: czero.nez a3, a5, a0 -; RV32ZICOND-NEXT: or a2, a2, a3 -; RV32ZICOND-NEXT: czero.nez a1, a1, a0 -; RV32ZICOND-NEXT: czero.eqz a0, a4, a0 -; RV32ZICOND-NEXT: or a1, a0, a1 -; RV32ZICOND-NEXT: mv a0, a2 ; RV32ZICOND-NEXT: ret ; ; RV64ZICOND-LABEL: add4: @@ -221,14 +201,12 @@ ; ; RV32ZICOND-LABEL: sub1: ; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: czero.eqz a3, a3, a0 ; RV32ZICOND-NEXT: sltu a5, a1, a3 -; RV32ZICOND-NEXT: sub a4, a2, a4 -; RV32ZICOND-NEXT: sub a4, a4, a5 -; RV32ZICOND-NEXT: czero.eqz a4, a4, a0 -; RV32ZICOND-NEXT: czero.nez a2, a2, a0 -; RV32ZICOND-NEXT: or a2, a4, a2 -; RV32ZICOND-NEXT: czero.eqz a0, a3, a0 -; RV32ZICOND-NEXT: sub a0, a1, a0 +; RV32ZICOND-NEXT: czero.eqz a0, a4, a0 +; RV32ZICOND-NEXT: sub a2, a2, a0 +; RV32ZICOND-NEXT: sub a2, a2, a5 +; RV32ZICOND-NEXT: sub a0, a1, a3 ; RV32ZICOND-NEXT: mv a1, a2 ; RV32ZICOND-NEXT: ret ; @@ -257,14 +235,12 @@ ; ; RV32ZICOND-LABEL: sub2: ; RV32ZICOND: # %bb.0: +; RV32ZICOND-NEXT: czero.nez a3, a3, a0 ; RV32ZICOND-NEXT: sltu a5, a1, a3 -; RV32ZICOND-NEXT: sub a4, a2, a4 -; RV32ZICOND-NEXT: sub a4, a4, a5 -; RV32ZICOND-NEXT: czero.nez a4, a4, a0 -; RV32ZICOND-NEXT: czero.eqz a2, a2, a0 -; RV32ZICOND-NEXT: or a2, a2, a4 -; RV32ZICOND-NEXT: czero.nez a0, a3, a0 -; RV32ZICOND-NEXT: sub a0, a1, a0 +; RV32ZICOND-NEXT: czero.nez a0, a4, a0 +; RV32ZICOND-NEXT: sub a2, a2, a0 +; RV32ZICOND-NEXT: sub a2, a2, a5 +; RV32ZICOND-NEXT: sub a0, a1, a3 ; RV32ZICOND-NEXT: mv a1, a2 ; RV32ZICOND-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -589,19 +589,15 @@ ; ; RV64IM-LABEL: select_add_1: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: beqz a0, .LBB16_2 -; RV64IM-NEXT: # %bb.1: -; RV64IM-NEXT: addw a2, a1, a2 -; RV64IM-NEXT: .LBB16_2: # %entry -; RV64IM-NEXT: mv a0, a2 +; RV64IM-NEXT: negw a0, a0 +; RV64IM-NEXT: and a0, a0, a1 +; RV64IM-NEXT: addw a0, a2, a0 ; RV64IM-NEXT: ret ; ; RV64IMXVTCONDOPS-LABEL: select_add_1: ; RV64IMXVTCONDOPS: # %bb.0: # %entry -; RV64IMXVTCONDOPS-NEXT: addw a1, a1, a2 -; RV64IMXVTCONDOPS-NEXT: vt.maskcn a2, a2, a0 ; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a1, a0 -; RV64IMXVTCONDOPS-NEXT: or a0, a0, a2 +; RV64IMXVTCONDOPS-NEXT: addw a0, a2, a0 ; RV64IMXVTCONDOPS-NEXT: ret ; ; RV32IMZICOND-LABEL: select_add_1: @@ -612,10 +608,8 @@ ; ; RV64IMZICOND-LABEL: select_add_1: ; RV64IMZICOND: # %bb.0: # %entry -; RV64IMZICOND-NEXT: addw a1, a1, a2 -; RV64IMZICOND-NEXT: czero.nez a2, a2, a0 ; RV64IMZICOND-NEXT: czero.eqz a0, a1, a0 -; RV64IMZICOND-NEXT: or a0, a0, a2 +; RV64IMZICOND-NEXT: addw a0, a2, a0 ; RV64IMZICOND-NEXT: ret entry: %c = add i32 %a, %b @@ -633,19 +627,15 @@ ; ; RV64IM-LABEL: select_add_2: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: bnez a0, .LBB17_2 -; RV64IM-NEXT: # %bb.1: # %entry -; RV64IM-NEXT: addw a1, a1, a2 -; RV64IM-NEXT: .LBB17_2: # %entry -; RV64IM-NEXT: mv a0, a1 +; RV64IM-NEXT: addiw a0, a0, -1 +; RV64IM-NEXT: and a0, a0, a2 +; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMXVTCONDOPS-LABEL: select_add_2: ; RV64IMXVTCONDOPS: # %bb.0: # %entry -; RV64IMXVTCONDOPS-NEXT: addw a2, a1, a2 -; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0 ; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0 -; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0 +; RV64IMXVTCONDOPS-NEXT: addw a0, a1, a0 ; RV64IMXVTCONDOPS-NEXT: ret ; ; RV32IMZICOND-LABEL: select_add_2: @@ -656,10 +646,8 @@ ; ; RV64IMZICOND-LABEL: select_add_2: ; RV64IMZICOND: # %bb.0: # %entry -; RV64IMZICOND-NEXT: addw a2, a1, a2 -; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0 ; RV64IMZICOND-NEXT: czero.nez a0, a2, a0 -; RV64IMZICOND-NEXT: or a0, a1, a0 +; RV64IMZICOND-NEXT: addw a0, a1, a0 ; RV64IMZICOND-NEXT: ret entry: %c = add i32 %a, %b @@ -677,19 +665,16 @@ ; ; RV64IM-LABEL: select_add_3: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: bnez a0, .LBB18_2 -; RV64IM-NEXT: # %bb.1: # %entry -; RV64IM-NEXT: addiw a1, a1, 42 -; RV64IM-NEXT: .LBB18_2: # %entry -; RV64IM-NEXT: mv a0, a1 +; RV64IM-NEXT: addiw a0, a0, -1 +; RV64IM-NEXT: andi a0, a0, 42 +; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMXVTCONDOPS-LABEL: select_add_3: ; RV64IMXVTCONDOPS: # %bb.0: # %entry -; RV64IMXVTCONDOPS-NEXT: addiw a2, a1, 42 -; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0 +; RV64IMXVTCONDOPS-NEXT: li a2, 42 ; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0 -; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0 +; RV64IMXVTCONDOPS-NEXT: addw a0, a1, a0 ; RV64IMXVTCONDOPS-NEXT: ret ; ; RV32IMZICOND-LABEL: select_add_3: @@ -701,10 +686,9 @@ ; ; RV64IMZICOND-LABEL: select_add_3: ; RV64IMZICOND: # %bb.0: # %entry -; RV64IMZICOND-NEXT: addiw a2, a1, 42 -; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0 +; RV64IMZICOND-NEXT: li a2, 42 ; RV64IMZICOND-NEXT: czero.nez a0, a2, a0 -; RV64IMZICOND-NEXT: or a0, a1, a0 +; RV64IMZICOND-NEXT: addw a0, a1, a0 ; RV64IMZICOND-NEXT: ret entry: %c = add i32 %a, 42 @@ -770,19 +754,15 @@ ; ; RV64IM-LABEL: select_sub_2: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: bnez a0, .LBB20_2 -; RV64IM-NEXT: # %bb.1: # %entry -; RV64IM-NEXT: subw a1, a1, a2 -; RV64IM-NEXT: .LBB20_2: # %entry -; RV64IM-NEXT: mv a0, a1 +; RV64IM-NEXT: addiw a0, a0, -1 +; RV64IM-NEXT: and a0, a0, a2 +; RV64IM-NEXT: subw a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMXVTCONDOPS-LABEL: select_sub_2: ; RV64IMXVTCONDOPS: # %bb.0: # %entry -; RV64IMXVTCONDOPS-NEXT: subw a2, a1, a2 -; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0 ; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0 -; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0 +; RV64IMXVTCONDOPS-NEXT: subw a0, a1, a0 ; RV64IMXVTCONDOPS-NEXT: ret ; ; RV32IMZICOND-LABEL: select_sub_2: @@ -793,10 +773,8 @@ ; ; RV64IMZICOND-LABEL: select_sub_2: ; RV64IMZICOND: # %bb.0: # %entry -; RV64IMZICOND-NEXT: subw a2, a1, a2 -; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0 ; RV64IMZICOND-NEXT: czero.nez a0, a2, a0 -; RV64IMZICOND-NEXT: or a0, a1, a0 +; RV64IMZICOND-NEXT: subw a0, a1, a0 ; RV64IMZICOND-NEXT: ret entry: %c = sub i32 %a, %b @@ -814,19 +792,16 @@ ; ; RV64IM-LABEL: select_sub_3: ; RV64IM: # %bb.0: # %entry -; RV64IM-NEXT: bnez a0, .LBB21_2 -; RV64IM-NEXT: # %bb.1: # %entry -; RV64IM-NEXT: addiw a1, a1, -42 -; RV64IM-NEXT: .LBB21_2: # %entry -; RV64IM-NEXT: mv a0, a1 +; RV64IM-NEXT: addiw a0, a0, -1 +; RV64IM-NEXT: andi a0, a0, 42 +; RV64IM-NEXT: subw a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMXVTCONDOPS-LABEL: select_sub_3: ; RV64IMXVTCONDOPS: # %bb.0: # %entry -; RV64IMXVTCONDOPS-NEXT: addiw a2, a1, -42 -; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0 +; RV64IMXVTCONDOPS-NEXT: li a2, 42 ; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0 -; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0 +; RV64IMXVTCONDOPS-NEXT: subw a0, a1, a0 ; RV64IMXVTCONDOPS-NEXT: ret ; ; RV32IMZICOND-LABEL: select_sub_3: @@ -838,10 +813,9 @@ ; ; RV64IMZICOND-LABEL: select_sub_3: ; RV64IMZICOND: # %bb.0: # %entry -; RV64IMZICOND-NEXT: addiw a2, a1, -42 -; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0 +; RV64IMZICOND-NEXT: li a2, 42 ; RV64IMZICOND-NEXT: czero.nez a0, a2, a0 -; RV64IMZICOND-NEXT: or a0, a1, a0 +; RV64IMZICOND-NEXT: subw a0, a1, a0 ; RV64IMZICOND-NEXT: ret entry: %c = sub i32 %a, 42 diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -1040,37 +1040,37 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: j .LBB18_4 ; CHECK-NEXT: .LBB18_3: -; CHECK-NEXT: slliw a0, a0, 16 +; CHECK-NEXT: slli a0, a0, 16 ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: .LBB18_4: # %if.end ; CHECK-NEXT: srliw a3, a0, 24 ; CHECK-NEXT: snez a2, a3 ; CHECK-NEXT: bnez a3, .LBB18_6 ; CHECK-NEXT: # %bb.5: -; CHECK-NEXT: slliw a0, a0, 8 +; CHECK-NEXT: slli a0, a0, 8 ; CHECK-NEXT: .LBB18_6: # %if.end -; CHECK-NEXT: addiw a2, a2, -1 +; CHECK-NEXT: addi a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -8 ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a3, a0, 28 ; CHECK-NEXT: snez a2, a3 ; CHECK-NEXT: bnez a3, .LBB18_8 ; CHECK-NEXT: # %bb.7: -; CHECK-NEXT: slliw a0, a0, 4 +; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: .LBB18_8: # %if.end -; CHECK-NEXT: addiw a2, a2, -1 +; CHECK-NEXT: addi a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -4 ; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a3, a0, 30 ; CHECK-NEXT: snez a2, a3 ; CHECK-NEXT: bnez a3, .LBB18_10 ; CHECK-NEXT: # %bb.9: -; CHECK-NEXT: slliw a0, a0, 2 +; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: .LBB18_10: # %if.end -; CHECK-NEXT: addiw a2, a2, -1 +; CHECK-NEXT: addi a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -2 +; CHECK-NEXT: sraiw a0, a0, 31 ; CHECK-NEXT: not a0, a0 -; CHECK-NEXT: srli a0, a0, 31 ; CHECK-NEXT: add a0, a2, a0 ; CHECK-NEXT: addw a0, a1, a0 ; CHECK-NEXT: .LBB18_11: # %cleanup @@ -1095,7 +1095,7 @@ ; NOREMOVAL-NEXT: # %bb.5: ; NOREMOVAL-NEXT: slli a0, a0, 8 ; NOREMOVAL-NEXT: .LBB18_6: # %if.end -; NOREMOVAL-NEXT: addiw a2, a2, -1 +; NOREMOVAL-NEXT: addi a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -8 ; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a3, a0, 28 @@ -1104,7 +1104,7 @@ ; NOREMOVAL-NEXT: # %bb.7: ; NOREMOVAL-NEXT: slli a0, a0, 4 ; NOREMOVAL-NEXT: .LBB18_8: # %if.end -; NOREMOVAL-NEXT: addiw a2, a2, -1 +; NOREMOVAL-NEXT: addi a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -4 ; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a3, a0, 30 @@ -1113,14 +1113,14 @@ ; NOREMOVAL-NEXT: # %bb.9: ; NOREMOVAL-NEXT: slli a0, a0, 2 ; NOREMOVAL-NEXT: .LBB18_10: # %if.end -; NOREMOVAL-NEXT: sext.w a0, a0 -; NOREMOVAL-NEXT: addiw a2, a2, -1 +; NOREMOVAL-NEXT: addi a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -2 +; NOREMOVAL-NEXT: sraiw a0, a0, 31 ; NOREMOVAL-NEXT: not a0, a0 -; NOREMOVAL-NEXT: srli a0, a0, 31 ; NOREMOVAL-NEXT: add a0, a2, a0 -; NOREMOVAL-NEXT: addw a0, a1, a0 +; NOREMOVAL-NEXT: add a0, a1, a0 ; NOREMOVAL-NEXT: .LBB18_11: # %cleanup +; NOREMOVAL-NEXT: sext.w a0, a0 ; NOREMOVAL-NEXT: ret entry: %tobool.not = icmp eq i32 %x, 0