diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1338,14 +1338,8 @@ Val = N.getOperand(0); return true; } - // FIXME: Should we just call computeNumSignBits here? - if (N.getOpcode() == ISD::AssertSext && - cast(N->getOperand(1))->getVT().bitsLE(MVT::i32)) { - Val = N; - return true; - } - if (N.getOpcode() == ISD::AssertZext && - cast(N->getOperand(1))->getVT().bitsLT(MVT::i32)) { + MVT VT = N.getSimpleValueType(); + if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) { Val = N; return true; } @@ -1361,9 +1355,9 @@ return true; } } - // FIXME: Should we just call computeKnownBits here? - if (N.getOpcode() == ISD::AssertZext && - cast(N->getOperand(1))->getVT().bitsLE(MVT::i32)) { + MVT VT = N.getSimpleValueType(); + APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32); + if (CurDAG->MaskedValueIsZero(N, Mask)) { Val = N; return true; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -1029,7 +1029,7 @@ let Predicates = [HasStdExtZbb, IsRV64] in { def : PatGpr; def : PatGpr; -def : Pat<(i64 (ctpop (and GPR:$rs1, 0xFFFFFFFF))), (CPOPW GPR:$rs1)>; +def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>; } // Predicates = [HasStdExtZbb, IsRV64] let Predicates = [HasStdExtZbp, IsRV64] in { diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -206,7 +206,7 @@ ; RV64IFD-LABEL: fcvt_d_w_load: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: lw a0, 0(a0) -; RV64IFD-NEXT: fcvt.d.l ft0, a0 +; RV64IFD-NEXT: fcvt.d.w ft0, a0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %a = load i32, i32* %p @@ -249,7 +249,7 @@ ; RV64IFD-LABEL: fcvt_d_wu_load: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: lwu a0, 0(a0) -; RV64IFD-NEXT: fcvt.d.lu ft0, a0 +; RV64IFD-NEXT: fcvt.d.wu ft0, a0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %a = load i32, i32* %p diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -207,7 +207,7 @@ ; RV64IF-LABEL: fcvt_s_w_load: ; RV64IF: # %bb.0: ; RV64IF-NEXT: lw a0, 0(a0) -; RV64IF-NEXT: fcvt.s.l ft0, a0 +; RV64IF-NEXT: fcvt.s.w ft0, a0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %a = load i32, i32* %p @@ -242,7 +242,7 @@ ; RV64IF-LABEL: fcvt_s_wu_load: ; RV64IF: # %bb.0: ; RV64IF-NEXT: lwu a0, 0(a0) -; RV64IF-NEXT: fcvt.s.lu ft0, a0 +; RV64IF-NEXT: fcvt.s.wu ft0, a0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %a = load i32, i32* %p diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -855,14 +855,14 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: slli a0, a0, 48 ; RV64IZFH-NEXT: srai a0, a0, 48 -; RV64IZFH-NEXT: fcvt.h.l fa0, a0 +; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_si: ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: slli a0, a0, 48 ; RV64IDZFH-NEXT: srai a0, a0, 48 -; RV64IDZFH-NEXT: fcvt.h.l fa0, a0 +; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret %1 = sitofp i16 %a to half ret half %1 @@ -914,7 +914,7 @@ ; RV64IZFH-NEXT: lui a1, 16 ; RV64IZFH-NEXT: addiw a1, a1, -1 ; RV64IZFH-NEXT: and a0, a0, a1 -; RV64IZFH-NEXT: fcvt.h.lu fa0, a0 +; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_ui: @@ -922,7 +922,7 @@ ; RV64IDZFH-NEXT: lui a1, 16 ; RV64IDZFH-NEXT: addiw a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a0, a1 -; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0 +; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret %1 = uitofp i16 %a to half ret half %1 @@ -992,13 +992,13 @@ ; RV64IZFH-LABEL: fcvt_h_w_load: ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: lw a0, 0(a0) -; RV64IZFH-NEXT: fcvt.h.l fa0, a0 +; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_w_load: ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: lw a0, 0(a0) -; RV64IDZFH-NEXT: fcvt.h.l fa0, a0 +; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret %a = load i32, i32* %p %1 = sitofp i32 %a to half @@ -1045,13 +1045,13 @@ ; RV64IZFH-LABEL: fcvt_h_wu_load: ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: lwu a0, 0(a0) -; RV64IZFH-NEXT: fcvt.h.lu fa0, a0 +; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_h_wu_load: ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: lwu a0, 0(a0) -; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0 +; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret %a = load i32, i32* %p %1 = uitofp i32 %a to half diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll --- a/llvm/test/CodeGen/RISCV/rem.ll +++ b/llvm/test/CodeGen/RISCV/rem.ll @@ -232,7 +232,7 @@ ; RV64IM-NEXT: srai a1, a1, 56 ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srai a0, a0, 56 -; RV64IM-NEXT: rem a0, a0, a1 +; RV64IM-NEXT: remw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i8 %a, %b ret i8 %1 @@ -328,7 +328,7 @@ ; RV64IM-NEXT: srai a1, a1, 48 ; RV64IM-NEXT: slli a0, a0, 48 ; RV64IM-NEXT: srai a0, a0, 48 -; RV64IM-NEXT: rem a0, a0, a1 +; RV64IM-NEXT: remw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i16 %a, %b ret i16 %1 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -1060,13 +1060,13 @@ ; RV64IB-LABEL: ctpop_i32_load: ; RV64IB: # %bb.0: ; RV64IB-NEXT: lwu a0, 0(a0) -; RV64IB-NEXT: cpop a0, a0 +; RV64IB-NEXT: cpopw a0, a0 ; RV64IB-NEXT: ret ; ; RV64IBB-LABEL: ctpop_i32_load: ; RV64IBB: # %bb.0: ; RV64IBB-NEXT: lwu a0, 0(a0) -; RV64IBB-NEXT: cpop a0, a0 +; RV64IBB-NEXT: cpopw a0, a0 ; RV64IBB-NEXT: ret %a = load i32, i32* %p %1 = call i32 @llvm.ctpop.i32(i32 %a)