diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2245,6 +2245,7 @@ assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB || Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND || + Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR || Node->getOpcode() == ISD::SIGN_EXTEND_INREG || isa(Node)) && "Unexpected opcode"); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1688,10 +1688,14 @@ // Use binop_allwusers to recover immediates that may have been broken by // SimplifyDemandedBits. -// TODO: This is valid for ADDI/ORI/XORI. def : Pat<(binop_allwusers GPR:$rs1, u32simm12:$imm), (ANDI GPR:$rs1, u32simm12:$imm)>; +def : Pat<(binop_allwusers GPR:$rs1, u32simm12:$imm), + (ORI GPR:$rs1, u32simm12:$imm)>; + +def : Pat<(binop_allwusers GPR:$rs1, u32simm12:$imm), + (XORI GPR:$rs1, u32simm12:$imm)>; /// Loads defm : LdPat; diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -118,6 +118,44 @@ ret i32 %5 } +define signext i32 @addi_sub_cse(i32 signext %0, i32 signext %1, ptr %2) { +; CHECK-LABEL: addi_sub_cse: +; CHECK: # %bb.0: +; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: addiw a0, a0, -8 +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret + %4 = add i32 %0, -8 + %5 = sub i32 %4, %1 + store i32 %5, ptr %2, align 4 + ret i32 %5 +} + +define signext i32 @xori_sub_cse(i32 signext %0, i32 signext %1, ptr %2) { +; CHECK-LABEL: xori_sub_cse: +; CHECK: # %bb.0: +; CHECK-NEXT: xori a0, a0, -8 +; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret + %4 = xor i32 %0, -8 + %5 = sub i32 %4, %1 + store i32 %5, ptr %2, align 4 + ret i32 %5 +} + +define signext i32 @ori_sub_cse(i32 signext %0, i32 signext %1, ptr %2) { +; CHECK-LABEL: ori_sub_cse: +; CHECK: # %bb.0: +; CHECK-NEXT: ori a0, a0, -8 +; CHECK-NEXT: subw a0, a0, a1 +; CHECK-NEXT: sw a0, 0(a2) +; CHECK-NEXT: ret + %4 = or i32 %0, -8 + %5 = sub i32 %4, %1 + store i32 %5, ptr %2, align 4 + ret i32 %5 +} ; SimplifyDemandedBits breaks the ANDI by turning -8 into 0xfffffff8. This ; gets CSEd with the AND needed for type legalizing the lshr. This increases ; the use count of the AND with 0xfffffff8 making TargetShrinkDemandedConstant