diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -454,11 +454,14 @@ isa(Srl.getOperand(1)) && isa(Shl.getOperand(1)) && isa(And.getOperand(1))) { - uint32_t VC1 = Srl.getConstantOperandVal(1); - uint32_t VC2 = Shl.getConstantOperandVal(1); - uint32_t VC3 = And.getConstantOperandVal(1); + uint64_t VC1 = Srl.getConstantOperandVal(1); + uint64_t VC2 = Shl.getConstantOperandVal(1); + uint64_t VC3 = And.getConstantOperandVal(1); + // The mask needs to be 0xffffffff, but SimplifyDemandedBits may + // have removed lower bits that aren't necessary due to the right + // shift. if (VC2 == (32 - VC1) && - VC3 == maskLeadingOnes(VC2)) { + (VC3 | maskTrailingOnes(VC1)) == 0xffffffff) { RS1 = Shl.getOperand(0); Shamt = CurDAG->getTargetConstant(VC1, SDLoc(N), Srl.getOperand(1).getValueType()); diff --git a/llvm/test/CodeGen/RISCV/rv64Zbbp.ll b/llvm/test/CodeGen/RISCV/rv64Zbbp.ll --- a/llvm/test/CodeGen/RISCV/rv64Zbbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64Zbbp.ll @@ -387,7 +387,6 @@ ; This is similar to the type legalized roriw pattern, but the and mask is more ; than 32 bits so the lshr doesn't shift zeroes into the lower 32 bits. Make ; sure we don't match it to roriw. -; FIXME: We are currently truncating the mask to 32-bits before checking. define i64 @roriw_bug(i64 %x) nounwind { ; RV64I-LABEL: roriw_bug: ; RV64I: # %bb.0: @@ -401,23 +400,32 @@ ; ; RV64IB-LABEL: roriw_bug: ; RV64IB: # %bb.0: -; RV64IB-NEXT: andi a1, a0, -2 -; RV64IB-NEXT: roriw a0, a0, 1 -; RV64IB-NEXT: xor a0, a1, a0 +; RV64IB-NEXT: slli a1, a0, 31 +; RV64IB-NEXT: andi a0, a0, -2 +; RV64IB-NEXT: srli a2, a0, 1 +; RV64IB-NEXT: or a1, a1, a2 +; RV64IB-NEXT: sext.w a1, a1 +; RV64IB-NEXT: xor a0, a0, a1 ; RV64IB-NEXT: ret ; ; RV64IBB-LABEL: roriw_bug: ; RV64IBB: # %bb.0: -; RV64IBB-NEXT: andi a1, a0, -2 -; RV64IBB-NEXT: roriw a0, a0, 1 -; RV64IBB-NEXT: xor a0, a1, a0 +; RV64IBB-NEXT: slli a1, a0, 31 +; RV64IBB-NEXT: andi a0, a0, -2 +; RV64IBB-NEXT: srli a2, a0, 1 +; RV64IBB-NEXT: or a1, a1, a2 +; RV64IBB-NEXT: sext.w a1, a1 +; RV64IBB-NEXT: xor a0, a0, a1 ; RV64IBB-NEXT: ret ; ; RV64IBP-LABEL: roriw_bug: ; RV64IBP: # %bb.0: -; RV64IBP-NEXT: andi a1, a0, -2 -; RV64IBP-NEXT: roriw a0, a0, 1 -; RV64IBP-NEXT: xor a0, a1, a0 +; RV64IBP-NEXT: slli a1, a0, 31 +; RV64IBP-NEXT: andi a0, a0, -2 +; RV64IBP-NEXT: srli a2, a0, 1 +; RV64IBP-NEXT: or a1, a1, a2 +; RV64IBP-NEXT: sext.w a1, a1 +; RV64IBP-NEXT: xor a0, a0, a1 ; RV64IBP-NEXT: ret %a = shl i64 %x, 31 %b = and i64 %x, 18446744073709551614