Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -165,6 +165,11 @@ if (Subtarget.hasStdExtZbp()) setOperationAction(ISD::BITREVERSE, XLenVT, Legal); + if (Subtarget.hasStdExtZbt()) { + setOperationAction(ISD::FSHL, XLenVT, Legal); + setOperationAction(ISD::FSHR, XLenVT, Legal); + } + ISD::CondCode FPCCToExtend[] = { ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, Index: llvm/lib/Target/RISCV/RISCVInstrInfoB.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -769,6 +769,17 @@ def : Pat<(bitreverse GPR:$rs1), (GREVI GPR:$rs1, (i64 63))>; } // Predicates = [HasStdExtZbp, IsRV64] +let Predicates = [HasStdExtZbt] in { +def : Pat<(or (and (xor GPR:$rs2, -1), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)), + (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>; +def : Pat<(riscv_selectcc GPR:$rs2, (XLenVT 0), (XLenVT 17), GPR:$rs3, GPR:$rs1), + (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>; +def : Pat<(fshl GPR:$rs1, GPR:$rs2, GPR:$rs3), + (FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>; +def : Pat<(fshr GPR:$rs1, GPR:$rs2, GPR:$rs3), + (FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>; +} // Predicates = [HasStdExtZbt] + let Predicates = [HasStdExtZbb] in { def : Pat<(ctlz GPR:$rs1), (CLZ GPR:$rs1)>; def : Pat<(cttz GPR:$rs1), (CTZ GPR:$rs1)>; Index: llvm/test/CodeGen/RISCV/rv32Zbt.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zbt.ll @@ -0,0 +1,58 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB + +define i32 @cmix(i32 %a, i32 %b, i32 %c) nounwind { +; RV32I-NOT: cmix a0, a1, a0, a2 +; +; RV32IB-LABEL: cmix: +; RV32IB: # %bb.0: +; RV32IB-NEXT: cmix a0, a1, a0, a2 +; RV32IB-NEXT: ret + %and = and i32 %b, %a + %neg = xor i32 %b, -1 + %and1 = and i32 %neg, %c + %or = or i32 %and1, %and + ret i32 %or +} + +define i32 @cmov(i32 %a, i32 %b, i32 %c) nounwind { +; RV32I-NOT: cmov a0, a1, a0, a2 +; +; RV32IB-LABEL: cmov: +; RV32IB: # %bb.0: +; RV32IB-NEXT: cmov a0, a1, a0, a2 +; RV32IB-NEXT: ret + %tobool = icmp eq i32 %b, 0 + %cond = select i1 %tobool, i32 %c, i32 %a + ret i32 %cond +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define i32 @fshl(i32 %a, i32 %b, i32 %c) nounwind { +; RV32I-NOT: fsl a0, a0, a2, a1 +; +; RV32IB-LABEL: fshl: +; RV32IB: # %bb.0: +; RV32IB-NEXT: fsl a0, a0, a2, a1 +; RV32IB-NEXT: ret + %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c) + ret i32 %1 +} + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define i32 @fshr(i32 %a, i32 %b, i32 %c) nounwind { +; RV32I-NOT: fsr a0, a0, a2, a1 +; +; RV32IB-LABEL: fshr: +; RV32IB: # %bb.0: +; RV32IB-NEXT: fsr a0, a0, a2, a1 +; RV32IB-NEXT: ret + %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c) + ret i32 %1 +} Index: llvm/test/CodeGen/RISCV/rv64Zbt.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zbt.ll @@ -0,0 +1,58 @@ +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I +; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB + +define i64 @cmix(i64 %a, i64 %b, i64 %c) nounwind { +; RV64I-NOT: cmix a0, a1, a0, a2 +; +; RV64IB-LABEL: cmix: +; RV64IB: # %bb.0: +; RV64IB-NEXT: cmix a0, a1, a0, a2 +; RV64IB-NEXT: ret + %and = and i64 %b, %a + %neg = xor i64 %b, -1 + %and1 = and i64 %neg, %c + %or = or i64 %and1, %and + ret i64 %or +} + +define i64 @cmov(i64 %a, i64 %b, i64 %c) nounwind { +; RV64I-NOT: cmov a0, a1, a0, a2 +; +; RV64IB-LABEL: cmov: +; RV64IB: # %bb.0: +; RV64IB-NEXT: cmov a0, a1, a0, a2 +; RV64IB-NEXT: ret + %tobool = icmp eq i64 %b, 0 + %cond = select i1 %tobool, i64 %c, i64 %a + ret i64 %cond +} + +declare i64 @llvm.fshl.i64(i64, i64, i64) + +define i64 @fshl(i64 %a, i64 %b, i64 %c) nounwind { +; RV64I-NOT: fsl a0, a0, a2, a1 +; +; RV64IB-LABEL: fshl: +; RV64IB: # %bb.0: +; RV64IB-NEXT: fsl a0, a0, a2, a1 +; RV64IB-NEXT: ret + %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c) + ret i64 %1 +} + +declare i64 @llvm.fshr.i64(i64, i64, i64) + +define i64 @fshr(i64 %a, i64 %b, i64 %c) nounwind { +; RV64I-NOT: fsr a0, a0, a2, a1 +; +; RV64IB-LABEL: fshr: +; RV64IB: # %bb.0: +; RV64IB-NEXT: fsr a0, a0, a2, a1 +; RV64IB-NEXT: ret + %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c) + ret i64 %1 +}