diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6119,11 +6119,14 @@ if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL)) return V; + if (N1.getOpcode() == ISD::SRL) + std::swap(N0, N1); + // TODO: Rewrite this to return a new 'AND' instead of using CombineTo. - if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && - VT.getSizeInBits() <= 64 && N0->hasOneUse()) { - if (ConstantSDNode *ADDI = dyn_cast(N0.getOperand(1))) { - if (ConstantSDNode *SRLI = dyn_cast(N1.getOperand(1))) { + if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::ADD && + VT.getSizeInBits() <= 64 && N1->hasOneUse()) { + if (ConstantSDNode *ADDI = dyn_cast(N1.getOperand(1))) { + if (ConstantSDNode *SRLI = dyn_cast(N0.getOperand(1))) { // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal // immediate for an add, but it is legal if its top c2 bits are set, // transform the ADD so the immediate doesn't need to be materialized @@ -6134,14 +6137,14 @@ !TLI.isLegalAddImmediate(ADDC.getSExtValue())) { APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), SRLC.getZExtValue()); - if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { + if (DAG.MaskedValueIsZero(N1.getOperand(1), Mask)) { ADDC |= Mask; if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { - SDLoc DL0(N0); + SDLoc DL0(N1); SDValue NewAdd = DAG.getNode(ISD::ADD, DL0, VT, - N0.getOperand(0), DAG.getConstant(ADDC, DL, VT)); - CombineTo(N0.getNode(), NewAdd); + N1.getOperand(0), DAG.getConstant(ADDC, DL, VT)); + CombineTo(N1.getNode(), NewAdd); // Return N so it doesn't get rechecked! return SDValue(N, 0); } diff --git a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll --- a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll +++ b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I @a = global [4096 x i32] zeroinitializer, align 4 @b = global [4096 x i32] zeroinitializer, align 4 @@ -46,3 +48,16 @@ for.end: ret i32 0 } + +define i32 @lsr_addimm(i32 %x, i32 %y) { +; RV64I-LABEL: lsr_addimm: +; RV64I: # %bb.0: +; RV64I-NEXT: addiw a0, a0, -1 +; RV64I-NEXT: srliw a1, a1, 20 +; RV64I-NEXT: and a0, a1, a0 +; RV64I-NEXT: ret + %1 = add i32 %x, 4095 + %2 = lshr i32 %y, 20 + %r = and i32 %2, %1 + ret i32 %r +}