diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6822,6 +6822,10 @@ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && "Unexpected custom legalisation"); if (N->getOperand(1).getOpcode() != ISD::Constant) { + // If we can use a BSET instruction, allow default promotion to apply. + if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() && + isOneConstant(N->getOperand(0))) + break; Results.push_back(customLegalizeToWOp(N, DAG)); break; } diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll --- a/llvm/test/CodeGen/RISCV/rv64zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll @@ -15,10 +15,9 @@ ; ; RV64ZBS-LABEL: bclr_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -38,10 +37,8 @@ ; ; RV64ZBS-LABEL: bclr_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %neg = xor i32 %shl, -1 @@ -62,10 +59,8 @@ ; RV64ZBS-LABEL: bclr_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: not a1, a1 -; RV64ZBS-NEXT: and a0, a1, a0 +; RV64ZBS-NEXT: bclr a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b @@ -123,9 +118,9 @@ ; ; RV64ZBS-LABEL: bset_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -143,9 +138,8 @@ ; ; RV64ZBS-LABEL: bset_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %or = or i32 %shl, %a @@ -164,9 +158,8 @@ ; RV64ZBS-LABEL: bset_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: or a0, a1, a0 +; RV64ZBS-NEXT: bset a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b @@ -184,8 +177,8 @@ ; ; RV64ZBS-LABEL: bset_i32_zero: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a1, 1 -; RV64ZBS-NEXT: sllw a0, a1, a0 +; RV64ZBS-NEXT: bset a0, zero, a0 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %a ret i32 %shl @@ -252,9 +245,9 @@ ; ; RV64ZBS-LABEL: binv_i32: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: andi a1, a1, 31 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %and = and i32 %b, 31 %shl = shl nuw i32 1, %and @@ -272,9 +265,8 @@ ; ; RV64ZBS-LABEL: binv_i32_no_mask: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %shl = shl i32 1, %b %xor = xor i32 %shl, %a @@ -293,9 +285,8 @@ ; RV64ZBS-LABEL: binv_i32_load: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: lw a0, 0(a0) -; RV64ZBS-NEXT: li a2, 1 -; RV64ZBS-NEXT: sllw a1, a2, a1 -; RV64ZBS-NEXT: xor a0, a1, a0 +; RV64ZBS-NEXT: binv a0, a0, a1 +; RV64ZBS-NEXT: sext.w a0, a0 ; RV64ZBS-NEXT: ret %a = load i32, i32* %p %shl = shl i32 1, %b