diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -13821,6 +13821,11 @@ DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1); return std::min(Tmp, Tmp2); } + case RISCVISD::CZERO_EQZ: + case RISCVISD::CZERO_NEZ: + // Output is either all zero or operand 0. We can propagate sign bit count + // from operand 0. + return DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); case RISCVISD::ABSW: { // We expand this at isel to negw+max. The result will have 33 sign bits // if the input has at least 33 sign bits. diff --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll --- a/llvm/test/CodeGen/RISCV/condops.ll +++ b/llvm/test/CodeGen/RISCV/condops.ll @@ -3467,15 +3467,14 @@ ; RV64XVENTANACONDOPS-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64XVENTANACONDOPS-NEXT: vt.maskcn a3, a3, a0 -; RV64XVENTANACONDOPS-NEXT: vt.maskc a0, a2, a0 -; RV64XVENTANACONDOPS-NEXT: or a0, a0, a3 -; RV64XVENTANACONDOPS-NEXT: slli s0, a0, 48 +; RV64XVENTANACONDOPS-NEXT: vt.maskc s0, a2, a0 +; RV64XVENTANACONDOPS-NEXT: or s0, s0, a3 ; RV64XVENTANACONDOPS-NEXT: beqz a1, .LBB58_2 ; RV64XVENTANACONDOPS-NEXT: # %bb.1: -; RV64XVENTANACONDOPS-NEXT: srai a0, s0, 48 +; RV64XVENTANACONDOPS-NEXT: mv a0, s0 ; RV64XVENTANACONDOPS-NEXT: call bat@plt ; RV64XVENTANACONDOPS-NEXT: .LBB58_2: -; RV64XVENTANACONDOPS-NEXT: srai a0, s0, 48 +; RV64XVENTANACONDOPS-NEXT: mv a0, s0 ; RV64XVENTANACONDOPS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64XVENTANACONDOPS-NEXT: addi sp, sp, 16 @@ -3505,15 +3504,14 @@ ; RV32ZICOND-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32ZICOND-NEXT: czero.nez a3, a3, a0 -; RV32ZICOND-NEXT: czero.eqz a0, a2, a0 -; RV32ZICOND-NEXT: or a0, a0, a3 -; RV32ZICOND-NEXT: slli s0, a0, 16 +; RV32ZICOND-NEXT: czero.eqz s0, a2, a0 +; RV32ZICOND-NEXT: or s0, s0, a3 ; RV32ZICOND-NEXT: beqz a1, .LBB58_2 ; RV32ZICOND-NEXT: # %bb.1: -; RV32ZICOND-NEXT: srai a0, s0, 16 +; RV32ZICOND-NEXT: mv a0, s0 ; RV32ZICOND-NEXT: call bat@plt ; RV32ZICOND-NEXT: .LBB58_2: -; RV32ZICOND-NEXT: srai a0, s0, 16 +; RV32ZICOND-NEXT: mv a0, s0 ; RV32ZICOND-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32ZICOND-NEXT: addi sp, sp, 16 @@ -3525,15 +3523,14 @@ ; RV64ZICOND-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64ZICOND-NEXT: czero.nez a3, a3, a0 -; RV64ZICOND-NEXT: czero.eqz a0, a2, a0 -; RV64ZICOND-NEXT: or a0, a0, a3 -; RV64ZICOND-NEXT: slli s0, a0, 48 +; RV64ZICOND-NEXT: czero.eqz s0, a2, a0 +; RV64ZICOND-NEXT: or s0, s0, a3 ; RV64ZICOND-NEXT: beqz a1, .LBB58_2 ; RV64ZICOND-NEXT: # %bb.1: -; RV64ZICOND-NEXT: srai a0, s0, 48 +; RV64ZICOND-NEXT: mv a0, s0 ; RV64ZICOND-NEXT: call bat@plt ; RV64ZICOND-NEXT: .LBB58_2: -; RV64ZICOND-NEXT: srai a0, s0, 48 +; RV64ZICOND-NEXT: mv a0, s0 ; RV64ZICOND-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64ZICOND-NEXT: addi sp, sp, 16