Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -148,8 +148,10 @@ setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); - setOperationAction(ISD::ROTL, XLenVT, Expand); - setOperationAction(ISD::ROTR, XLenVT, Expand); + if (!(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp())) { + setOperationAction(ISD::ROTL, XLenVT, Expand); + setOperationAction(ISD::ROTR, XLenVT, Expand); + } if (!Subtarget.hasStdExtZbp()) setOperationAction(ISD::BSWAP, XLenVT, Expand); Index: llvm/lib/Target/RISCV/RISCVInstrInfoB.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -637,6 +637,12 @@ // Codegen patterns //===----------------------------------------------------------------------===// +let Predicates = [HasStdExtZbbOrZbp] in { +def : Pat<(and GPR:$rs1, (not GPR:$rs2)), (ANDN GPR:$rs1, GPR:$rs2)>; +def : Pat<(or GPR:$rs1, (not GPR:$rs2)), (ORN GPR:$rs1, GPR:$rs2)>; +def : Pat<(xor GPR:$rs1, (not GPR:$rs2)), (XNOR GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtZbbOrZbp] + let Predicates = [HasStdExtZbb] in { def : Pat<(xor (shl (xor GPR:$rs1, -1), GPR:$rs2), -1), (SLO GPR:$rs1, GPR:$rs2)>; @@ -644,6 +650,13 @@ (SRO GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbb] +let Predicates = [HasStdExtZbbOrZbp] in { +def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>; +def : Pat<(fshl GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>; +def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>; +def : Pat<(fshr GPR:$rs1, GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>; +} // Predicates = [HasStdExtZbbOrZbp] + let Predicates = [HasStdExtZbp, IsRV32] in { def : Pat<(or (or (and (srl GPR:$rs1, (i32 1)), (i32 0x55555555)), GPR:$rs1), (and (shl GPR:$rs1, (i32 1)), (i32 0xAAAAAAAA))), @@ -765,6 +778,23 @@ (MAXU GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZbb] +let Predicates = [HasStdExtZbbOrZbp, IsRV32] in +def : Pat<(or (and GPR:$rs1, 0x0000FFFF), (shl GPR:$rs2, (i32 16))), + (PACK GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbbOrZbp, IsRV64] in +def : Pat<(or (and GPR:$rs1, 0x00000000FFFFFFFF), (shl GPR:$rs2, (i64 32))), + (PACK GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbbOrZbp, IsRV32] in +def : Pat<(or (and GPR:$rs2, 0xFFFF0000), (srl GPR:$rs1, (i32 16))), + (PACKU GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbbOrZbp, IsRV64] in +def : Pat<(or (and GPR:$rs2, 0xFFFFFFFF00000000), (srl GPR:$rs1, (i64 32))), + (PACKU GPR:$rs1, GPR:$rs2)>; +let Predicates = [HasStdExtZbbOrZbp] in +def : Pat<(or (and (shl GPR:$rs2, (XLenVT 8)), 0xFF00), + (and GPR:$rs1, 0x00FF)), + (PACKH GPR:$rs1, GPR:$rs2)>; + let Predicates = [HasStdExtZbp, IsRV32] in { def : Pat<(or (or (and (shl GPR:$rs1, (i32 8)), (i32 0x00FF0000)), (and GPR:$rs1, (i32 0xFF0000FF))), Index: llvm/test/CodeGen/RISCV/rv32Zbbp.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv32Zbbp.ll @@ -0,0 +1,110 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv32 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32IB + +define i32 @andn(i32 %a, i32 %b) nounwind { +; RV32I-NOT: andn a0, a0, a1 +; +; RV32IB-LABEL: andn: +; RV32IB: # %bb.0: +; RV32IB-NEXT: andn a0, a0, a1 +; RV32IB-NEXT: ret + %neg = xor i32 %b, -1 + %and = and i32 %neg, %a + ret i32 %and +} + +define i32 @orn(i32 %a, i32 %b) nounwind { +; RV32I-NOT: orn a0, a0, a1 +; +; RV32IB-LABEL: orn: +; RV32IB: # %bb.0: +; RV32IB-NEXT: orn a0, a0, a1 +; RV32IB-NEXT: ret + %neg = xor i32 %b, -1 + %or = or i32 %neg, %a + ret i32 %or +} + +define i32 @xnor(i32 %a, i32 %b) nounwind { +; RV32I-NOT: xnor a0, a0, a1 +; +; RV32IB-LABEL: xnor: +; RV32IB: # %bb.0: +; RV32IB-NEXT: xnor a0, a0, a1 +; RV32IB-NEXT: ret + %neg = xor i32 %a, -1 + %xor = xor i32 %neg, %b + ret i32 %xor +} + +declare i32 @llvm.fshl.i32(i32, i32, i32) + +define i32 @rol(i32 %a, i32 %b) nounwind { +; RV32I-NOT: rol a0, a0, a1 +; +; RV32IB-LABEL: rol: +; RV32IB: # %bb.0: +; RV32IB-NEXT: rol a0, a0, a1 +; RV32IB-NEXT: ret + %or = tail call i32 @llvm.fshl.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +declare i32 @llvm.fshr.i32(i32, i32, i32) + +define i32 @ror(i32 %a, i32 %b) nounwind { +; RV32I-NOT: ror a0, a0, a1 +; +; RV32IB-LABEL: ror: +; RV32IB: # %bb.0: +; RV32IB-NEXT: ror a0, a0, a1 +; RV32IB-NEXT: ret + %or = tail call i32 @llvm.fshr.i32(i32 %a, i32 %a, i32 %b) + ret i32 %or +} + +define i32 @pack(i32 %a, i32 %b) nounwind { +; RV32I-NOT: pack a0, a0, a1 +; +; RV32IB-LABEL: pack: +; RV32IB: # %bb.0: +; RV32IB-NEXT: pack a0, a0, a1 +; RV32IB-NEXT: ret + %shl = and i32 %a, 65535 + %shl1 = shl i32 %b, 16 + %or = or i32 %shl1, %shl + ret i32 %or +} + +define i32 @packu(i32 %a, i32 %b) nounwind { +; RV32I-NOT: packu a0, a0, a1 +; +; RV32IB-LABEL: packu: +; RV32IB: # %bb.0: +; RV32IB-NEXT: packu a0, a0, a1 +; RV32IB-NEXT: ret + %shr = lshr i32 %a, 16 + %shr1 = and i32 %b, -65536 + %or = or i32 %shr1, %shr + ret i32 %or +} + +define i32 @packh(i32 %a, i32 %b) nounwind { +; RV32I-NOT: packh a0, a0, a1 +; +; RV32IB-LABEL: packh: +; RV32IB: # %bb.0: +; RV32IB-NEXT: packh a0, a0, a1 +; RV32IB-NEXT: ret + %and = and i32 %a, 255 + %and1 = shl i32 %b, 8 + %shl = and i32 %and1, 65280 + %or = or i32 %shl, %and + ret i32 %or +} Index: llvm/test/CodeGen/RISCV/rv64Zbbp.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rv64Zbbp.ll @@ -0,0 +1,110 @@ +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I +; RUN: llc -mtriple=riscv64 -mattr=+experimental-b -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64IB + +define i64 @andn(i64 %a, i64 %b) nounwind { +; RV64I-NOT: andn a0, a0, a1 +; +; RV64IB-LABEL: andn: +; RV64IB: # %bb.0: +; RV64IB-NEXT: andn a0, a0, a1 +; RV64IB-NEXT: ret + %neg = xor i64 %b, -1 + %and = and i64 %neg, %a + ret i64 %and +} + +define i64 @orn(i64 %a, i64 %b) nounwind { +; RV64I-NOT: orn a0, a0, a1 +; +; RV64IB-LABEL: orn: +; RV64IB: # %bb.0: +; RV64IB-NEXT: orn a0, a0, a1 +; RV64IB-NEXT: ret + %neg = xor i64 %b, -1 + %or = or i64 %neg, %a + ret i64 %or +} + +define i64 @xnor(i64 %a, i64 %b) nounwind { +; RV64I-NOT: xnor a0, a0, a1 +; +; RV64IB-LABEL: xnor: +; RV64IB: # %bb.0: +; RV64IB-NEXT: xnor a0, a0, a1 +; RV64IB-NEXT: ret + %neg = xor i64 %a, -1 + %xor = xor i64 %neg, %b + ret i64 %xor +} + +declare i64 @llvm.fshl.i64(i64, i64, i64) + +define i64 @rol(i64 %a, i64 %b) nounwind { +; RV64I-NOT: rol a0, a0, a1 +; +; RV64IB-LABEL: rol: +; RV64IB: # %bb.0: +; RV64IB-NEXT: rol a0, a0, a1 +; RV64IB-NEXT: ret + %or = tail call i64 @llvm.fshl.i64(i64 %a, i64 %a, i64 %b) + ret i64 %or +} + +declare i64 @llvm.fshr.i64(i64, i64, i64) + +define i64 @ror(i64 %a, i64 %b) nounwind { +; RV64I-NOT: ror a0, a0, a1 +; +; RV64IB-LABEL: ror: +; RV64IB: # %bb.0: +; RV64IB-NEXT: ror a0, a0, a1 +; RV64IB-NEXT: ret + %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b) + ret i64 %or +} + +define i64 @pack(i64 %a, i64 %b) nounwind { +; RV64I-NOT: pack a0, a0, a1 +; +; RV64IB-LABEL: pack: +; RV64IB: # %bb.0: +; RV64IB-NEXT: pack a0, a0, a1 +; RV64IB-NEXT: ret + %shl = and i64 %a, 4294967295 + %shl1 = shl i64 %b, 32 + %or = or i64 %shl1, %shl + ret i64 %or +} + +define i64 @packu(i64 %a, i64 %b) nounwind { +; RV64I-NOT: packu a0, a0, a1 +; +; RV64IB-LABEL: packu: +; RV64IB: # %bb.0: +; RV64IB-NEXT: packu a0, a0, a1 +; RV64IB-NEXT: ret + %shr = lshr i64 %a, 32 + %shr1 = and i64 %b, -4294967296 + %or = or i64 %shr1, %shr + ret i64 %or +} + +define i64 @packh(i64 %a, i64 %b) nounwind { +; RV64I-NOT: packh a0, a0, a1 +; +; RV64IB-LABEL: packh: +; RV64IB: # %bb.0: +; RV64IB-NEXT: packh a0, a0, a1 +; RV64IB-NEXT: ret + %and = and i64 %a, 255 + %and1 = shl i64 %b, 8 + %shl = and i64 %and1, 65280 + %or = or i64 %shl, %and + ret i64 %or +}