Index: llvm/lib/Target/RISCV/RISCVInstrInfoB.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -657,6 +657,28 @@ def : Pat<(fshr GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbbOrZbp] +let Predicates = [HasStdExtZbs, IsRV32] in { +def : Pat<(and (xor (shl 1, GPR:$rs2), -1), GPR:$rs1), + (SBCLR GPR:$rs1, GPR:$rs2)>; +def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (SBCLR GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtZbs, IsRV32] +let Predicates = [HasStdExtZbs, IsRV64] in +def : Pat<(and (xor (riscv_sllw 1, GPR:$rs2), -1), GPR:$rs1), + (SBCLR GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZbs, IsRV32] in +def : Pat<(or (shl 1, GPR:$rs2), GPR:$rs1), (SBSET GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbs, IsRV64] in +def : Pat<(or (riscv_sllw 1, GPR:$rs2), GPR:$rs1), (SBSET GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZbs, IsRV32] in +def : Pat<(xor (shl 1, GPR:$rs2), GPR:$rs1), (SBINV GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbs, IsRV64] in +def : Pat<(xor (riscv_sllw 1, GPR:$rs2), GPR:$rs1), (SBINV GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZbs] in +def : Pat<(and (srl GPR:$rs1, GPR:$rs2), 1), (SBEXT GPR:$rs1, GPR:$rs2)>; + let Predicates = [HasStdExtZbp, IsRV32] in { def : Pat<(or (or (and (srl GPR:$rs1, (i32 1)), (i32 0x55555555)), GPR:$rs1), (and (shl GPR:$rs1, (i32 1)), (i32 0xAAAAAAAA))), Index: llvm/test/CodeGen/RISCV/rv32Zbs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zbs.ll @@ -0,0 +1,55 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbs -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB + +define i32 @sbclr(i32 %a, i32 %b) nounwind { +; RV32I-NOT: sbclr a0, a0, a1 +; +; RV32IB-LABEL: sbclr: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbclr a0, a0, a1 +; RV32IB-NEXT: ret + %shl = shl i32 1, %b + %neg = xor i32 %shl, -1 + %and = and i32 %neg, %a + ret i32 %and +} + +define i32 @sbset(i32 %a, i32 %b) nounwind { +; RV32I-NOT: sbset a0, a0, a1 +; +; RV32IB-LABEL: sbset: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbset a0, a0, a1 +; RV32IB-NEXT: ret + %shl = shl i32 1, %b + %or = or i32 %shl, %a + ret i32 %or +} + +define dso_local i32 @sbinv(i32 %a, i32 %b) nounwind { +; RV32I-NOT: sbinv a0, a0, a1 +; +; RV32IB-LABEL: sbinv: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbinv a0, a0, a1 +; RV32IB-NEXT: ret + %shl = shl i32 1, %b + %xor = xor i32 %shl, %a + ret i32 %xor +} + +define i32 @sbext(i32 %a, i32 %b) nounwind { +; RV32I-NOT: sbext a0, a0, a1 +; +; RV32IB-LABEL: sbext: +; RV32IB: # %bb.0: +; RV32IB-NEXT: sbext a0, a0, a1 +; RV32IB-NEXT: ret + %shr = lshr i32 %a, %b + %and = and i32 %shr, 1 + ret i32 %and +} Index: llvm/test/CodeGen/RISCV/rv64Zbs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zbs.ll @@ -0,0 +1,61 @@ +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I +; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbs -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB + +define i64 @sbclr(i64 %a, i64 %b) nounwind { +; RV64I-NOT: sbclr a0, a0, a1 +; +; RV64IB-LABEL: sbclr: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbclr a0, a0, a1 +; RV64IB-NEXT: ret + %sh_prom = trunc i64 %b to i32 + %shl = shl i32 1, %sh_prom + %neg = xor i32 %shl, -1 + %conv = sext i32 %neg to i64 + %and = and i64 %conv, %a + ret i64 %and +} + +define i64 @sbset(i64 %a, i64 %b) nounwind { +; RV64I-NOT: sbset a0, a0, a1 +; +; RV64IB-LABEL: sbset: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbset a0, a0, a1 +; RV64IB-NEXT: ret + %sh_prom = trunc i64 %b to i32 + %shl = shl i32 1, %sh_prom + %conv = sext i32 %shl to i64 + %or = or i64 %conv, %a + ret i64 %or +} + +define i64 @sbinv(i64 %a, i64 %b) nounwind { +; RV64I-NOT: sbinv a0, a0, a1 +; +; RV64IB-LABEL: sbinv: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbinv a0, a0, a1 +; RV64IB-NEXT: ret + %sh_prom = trunc i64 %b to i32 + %shl = shl i32 1, %sh_prom + %conv = sext i32 %shl to i64 + %xor = xor i64 %conv, %a + ret i64 %xor +} + +define i64 @sbext(i64 %a, i64 %b) nounwind { +; RV64I-NOT: sbext a0, a0, a1 +; +; RV64IB-LABEL: sbext: +; RV64IB: # %bb.0: +; RV64IB-NEXT: sbext a0, a0, a1 +; RV64IB-NEXT: ret + %shr = lshr i64 %a, %b + %and = and i64 %shr, 1 + ret i64 %and +}