diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5925,6 +5925,9 @@ if (ShiftAmt.uge(VTBitWidth)) return SDValue(); + if (!TLI.hasBitTest(Srl.getOperand(0), Srl.getOperand(1))) + return SDValue(); + // Turn this into a bit-test pattern using mask op + setcc: // and (not (srl X, C)), 1 --> (and X, 1< &Ops) const override; bool isFPImmLegal(const APFloat &Imm, EVT VT, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1242,6 +1242,11 @@ !isa(Y); } +bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const { + auto *C = dyn_cast(Y); + return C && C->getAPIntValue().ule(10); +} + /// Check if sinking \p I's operands to I's basic block is profitable, because /// the operands can be folded into a target instruction, e.g. /// splats of scalars can fold into vector instructions. diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -358,15 +358,14 @@ define i32 @bexti_xor_i32(i32 %a) nounwind { ; RV32I-LABEL: bexti_xor_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a0, a0, 7 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 128 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bexti_xor_i32: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: bexti a0, a0, 7 -; RV32ZBS-NEXT: xori a0, a0, 1 +; RV32ZBS-NEXT: andi a0, a0, 128 +; RV32ZBS-NEXT: seqz a0, a0 ; RV32ZBS-NEXT: ret %shr = lshr i32 %a, 7 %not = xor i32 %shr, -1 @@ -377,16 +376,15 @@ define i64 @bexti_xor_i64(i64 %a) nounwind { ; RV32I-LABEL: bexti_xor_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a0, a0, 7 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 128 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bexti_xor_i64: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: bexti a0, a0, 7 -; RV32ZBS-NEXT: xori a0, a0, 1 +; RV32ZBS-NEXT: andi a0, a0, 128 +; RV32ZBS-NEXT: seqz a0, a0 ; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: ret %shr = lshr i64 %a, 7 @@ -398,15 +396,14 @@ define i32 @bexti_xor_i32_1(i32 %a) nounwind { ; RV32I-LABEL: bexti_xor_i32_1: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a0, a0, 7 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 128 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bexti_xor_i32_1: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: bexti a0, a0, 7 -; RV32ZBS-NEXT: xori a0, a0, 1 +; RV32ZBS-NEXT: andi a0, a0, 128 +; RV32ZBS-NEXT: seqz a0, a0 ; RV32ZBS-NEXT: ret %shr = lshr i32 %a, 7 %and = and i32 %shr, 1 @@ -417,16 +414,15 @@ define i64 @bexti_xor_i64_1(i64 %a) nounwind { ; RV32I-LABEL: bexti_xor_i64_1: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a0, a0, 7 -; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 128 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bexti_xor_i64_1: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: bexti a0, a0, 7 -; RV32ZBS-NEXT: xori a0, a0, 1 +; RV32ZBS-NEXT: andi a0, a0, 128 +; RV32ZBS-NEXT: seqz a0, a0 ; RV32ZBS-NEXT: li a1, 0 ; RV32ZBS-NEXT: ret %shr = lshr i64 %a, 7 diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll --- a/llvm/test/CodeGen/RISCV/rv64zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll @@ -441,15 +441,14 @@ define signext i32 @bexti_xor_i32(i32 signext %a) nounwind { ; RV64I-LABEL: bexti_xor_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 7 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 128 +; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: bexti_xor_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: bexti a0, a0, 7 -; RV64ZBS-NEXT: xori a0, a0, 1 +; RV64ZBS-NEXT: andi a0, a0, 128 +; RV64ZBS-NEXT: seqz a0, a0 ; RV64ZBS-NEXT: ret %shr = lshr i32 %a, 7 %not = xor i32 %shr, -1 @@ -460,15 +459,14 @@ define i64 @bexti_xor_i64(i64 %a) nounwind { ; RV64I-LABEL: bexti_xor_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 7 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 128 +; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: bexti_xor_i64: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: bexti a0, a0, 7 -; RV64ZBS-NEXT: xori a0, a0, 1 +; RV64ZBS-NEXT: andi a0, a0, 128 +; RV64ZBS-NEXT: seqz a0, a0 ; RV64ZBS-NEXT: ret %shr = lshr i64 %a, 7 %not = xor i64 %shr, -1 @@ -479,15 +477,14 @@ define signext i32 @bexti_xor_i32_1(i32 signext %a) nounwind { ; RV64I-LABEL: bexti_xor_i32_1: ; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 7 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 128 +; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: bexti_xor_i32_1: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: bexti a0, a0, 7 -; RV64ZBS-NEXT: xori a0, a0, 1 +; RV64ZBS-NEXT: andi a0, a0, 128 +; RV64ZBS-NEXT: seqz a0, a0 ; RV64ZBS-NEXT: ret %shr = lshr i32 %a, 7 %and = and i32 %shr, 1 @@ -498,15 +495,14 @@ define i64 @bexti_xor_i64_1(i64 %a) nounwind { ; RV64I-LABEL: bexti_xor_i64_1: ; RV64I: # %bb.0: -; RV64I-NEXT: srli a0, a0, 7 -; RV64I-NEXT: not a0, a0 -; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 128 +; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret ; ; RV64ZBS-LABEL: bexti_xor_i64_1: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: bexti a0, a0, 7 -; RV64ZBS-NEXT: xori a0, a0, 1 +; RV64ZBS-NEXT: andi a0, a0, 128 +; RV64ZBS-NEXT: seqz a0, a0 ; RV64ZBS-NEXT: ret %shr = lshr i64 %a, 7 %and = and i64 %shr, 1