diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -838,6 +838,8 @@ addTypeForP(MVT::v2i16, MVT::i32); } + setOperationAction(ISD::BSWAP, XLenVT, Legal); + // Expand all truncating stores and extending loads. for (MVT VT0 : MVT::vector_valuetypes()) { for (MVT VT1 : MVT::vector_valuetypes()) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -1261,3 +1261,10 @@ def : PatALU32; def : PatALU32; } // [HasStdExtZpn, IsRV64] + +// bswap +let Predicates = [HasStdExtZpn] in +def : Pat<(bswap i32:$rs1), (SWAP16 (SWAP8 GPR:$rs1))>; +let Predicates = [HasStdExtZpn, IsRV64] in +def : Pat<(bswap i64:$rs1), (PKBT32 (SWAP16 (SWAP8 GPR:$rs1)), + (SWAP16 (SWAP8 GPR:$rs1)))>; diff --git a/llvm/test/CodeGen/RISCV/rvp/bswap.ll b/llvm/test/CodeGen/RISCV/rvp/bswap.ll --- a/llvm/test/CodeGen/RISCV/rvp/bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvp/bswap.ll @@ -11,18 +11,17 @@ define i16 @bswap16(i16 %x) nounwind { ; RV32-LABEL: bswap16: ; RV32: # %bb.0: -; RV32-NEXT: slli a1, a0, 8 -; RV32-NEXT: slli a0, a0, 16 -; RV32-NEXT: srli a0, a0, 24 -; RV32-NEXT: or a0, a1, a0 +; RV32-NEXT: swap8 a0, a0 +; RV32-NEXT: swap16 a0, a0 +; RV32-NEXT: srli a0, a0, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap16: ; RV64: # %bb.0: -; RV64-NEXT: slli a1, a0, 8 -; RV64-NEXT: slli a0, a0, 48 -; RV64-NEXT: srli a0, a0, 56 -; RV64-NEXT: or a0, a1, a0 +; RV64-NEXT: swap8 a0, a0 +; RV64-NEXT: swap16 a0, a0 +; RV64-NEXT: pkbt32 a0, a0, a0 +; RV64-NEXT: srli a0, a0, 48 ; RV64-NEXT: ret %ret = call i16 @llvm.bswap.i16(i16 %x) ret i16 %ret @@ -31,34 +30,16 @@ define i32 @bswap32(i32 %x) nounwind { ; RV32-LABEL: bswap32: ; RV32: # %bb.0: -; RV32-NEXT: srli a1, a0, 8 -; RV32-NEXT: lui a2, 16 -; RV32-NEXT: addi a2, a2, -256 -; RV32-NEXT: and a1, a1, a2 -; RV32-NEXT: srli a2, a0, 24 -; RV32-NEXT: or a1, a1, a2 -; RV32-NEXT: slli a2, a0, 8 -; RV32-NEXT: lui a3, 4080 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: slli a0, a0, 24 -; RV32-NEXT: or a0, a0, a2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: swap8 a0, a0 +; RV32-NEXT: swap16 a0, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap32: ; RV64: # %bb.0: -; RV64-NEXT: srliw a1, a0, 8 -; RV64-NEXT: lui a2, 16 -; RV64-NEXT: addiw a2, a2, -256 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: srliw a2, a0, 24 -; RV64-NEXT: or a1, a1, a2 -; RV64-NEXT: slli a2, a0, 8 -; RV64-NEXT: lui a3, 4080 -; RV64-NEXT: and a2, a2, a3 -; RV64-NEXT: slli a0, a0, 24 -; RV64-NEXT: or a0, a0, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: swap8 a0, a0 +; RV64-NEXT: swap16 a0, a0 +; RV64-NEXT: pkbt32 a0, a0, a0 +; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: ret %ret = call i32 @llvm.bswap.i32(i32 %x) ret i32 %ret @@ -67,61 +48,18 @@ define i64 @bswap64(i64 %x) nounwind { ; RV32-LABEL: bswap64: ; RV32: # %bb.0: -; RV32-NEXT: srli a2, a1, 8 -; RV32-NEXT: lui a3, 16 -; RV32-NEXT: addi a3, a3, -256 -; RV32-NEXT: and a2, a2, a3 -; RV32-NEXT: srli a4, a1, 24 -; RV32-NEXT: or a2, a2, a4 -; RV32-NEXT: slli a4, a1, 8 -; RV32-NEXT: lui a5, 4080 -; RV32-NEXT: and a4, a4, a5 -; RV32-NEXT: slli a1, a1, 24 -; RV32-NEXT: or a1, a1, a4 -; RV32-NEXT: or a2, a1, a2 -; RV32-NEXT: srli a1, a0, 8 -; RV32-NEXT: and a1, a1, a3 -; RV32-NEXT: srli a3, a0, 24 -; RV32-NEXT: or a1, a1, a3 -; RV32-NEXT: slli a3, a0, 8 -; RV32-NEXT: and a3, a3, a5 -; RV32-NEXT: slli a0, a0, 24 -; RV32-NEXT: or a0, a0, a3 -; RV32-NEXT: or a1, a0, a1 +; RV32-NEXT: swap8 a1, a1 +; RV32-NEXT: swap16 a2, a1 +; RV32-NEXT: swap8 a0, a0 +; RV32-NEXT: swap16 a1, a0 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: bswap64: ; RV64: # %bb.0: -; RV64-NEXT: srli a1, a0, 24 -; RV64-NEXT: lui a2, 4080 -; RV64-NEXT: and a1, a1, a2 -; RV64-NEXT: srli a2, a0, 8 -; RV64-NEXT: addi a3, zero, 255 -; RV64-NEXT: slli a4, a3, 24 -; RV64-NEXT: and a2, a2, a4 -; RV64-NEXT: or a1, a2, a1 -; RV64-NEXT: srli a2, a0, 40 -; RV64-NEXT: lui a4, 16 -; RV64-NEXT: addiw a4, a4, -256 -; RV64-NEXT: and a2, a2, a4 -; RV64-NEXT: srli a4, a0, 56 -; RV64-NEXT: or a2, a2, a4 -; RV64-NEXT: or a1, a1, a2 -; RV64-NEXT: slli a2, a0, 8 -; RV64-NEXT: slli a4, a3, 32 -; RV64-NEXT: and a2, a2, a4 -; RV64-NEXT: slli a4, a0, 24 -; RV64-NEXT: slli a5, a3, 40 -; RV64-NEXT: and a4, a4, a5 -; RV64-NEXT: or a2, a4, a2 -; RV64-NEXT: slli a4, a0, 40 -; RV64-NEXT: slli a3, a3, 48 -; RV64-NEXT: and a3, a4, a3 -; RV64-NEXT: slli a0, a0, 56 -; RV64-NEXT: or a0, a0, a3 -; RV64-NEXT: or a0, a0, a2 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: swap8 a0, a0 +; RV64-NEXT: swap16 a0, a0 +; RV64-NEXT: pkbt32 a0, a0, a0 ; RV64-NEXT: ret %ret = call i64 @llvm.bswap.i64(i64 %x) ret i64 %ret