diff --git a/llvm/test/CodeGen/RISCV/bitreverse-srli-bitreverse.ll b/llvm/test/CodeGen/RISCV/bitreverse-srli-bitreverse.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/bitreverse-srli-bitreverse.ll @@ -0,0 +1,940 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I +; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV32ZBB +; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64ZBB +; RUN: llc -mtriple=riscv32 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV32ZBKB +; RUN: llc -mtriple=riscv64 -mattr=+zbkb -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefixes=RV64ZBKB + +declare i8 @llvm.bitreverse.i8(i8) +declare i16 @llvm.bitreverse.i16(i16) +declare i32 @llvm.bitreverse.i32(i32) +declare i64 @llvm.bitreverse.i64(i64) + +define i8 @test_bitreverse_srli_bitreverse_i8(i8 %a) nounwind { +; RV32I-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV32I: # %bb.0: +; RV32I-NEXT: andi a1, a0, 15 +; RV32I-NEXT: slli a1, a1, 4 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: srli a0, a0, 28 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: andi a1, a0, 51 +; RV32I-NEXT: slli a1, a1, 2 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: andi a0, a0, 51 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: andi a1, a0, 85 +; RV32I-NEXT: slli a1, a1, 1 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 85 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 7 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 240 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: andi a1, a0, 49 +; RV32I-NEXT: slli a1, a1, 2 +; RV32I-NEXT: srli a0, a0, 2 +; RV32I-NEXT: andi a0, a0, 48 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: andi a1, a0, 85 +; RV32I-NEXT: slli a1, a1, 1 +; RV32I-NEXT: srli a0, a0, 1 +; RV32I-NEXT: andi a0, a0, 81 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV64I: # %bb.0: +; RV64I-NEXT: andi a1, a0, 15 +; RV64I-NEXT: slli a1, a1, 4 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: srli a0, a0, 60 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: andi a1, a0, 51 +; RV64I-NEXT: slli a1, a1, 2 +; RV64I-NEXT: srli a0, a0, 2 +; RV64I-NEXT: andi a0, a0, 51 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: andi a1, a0, 85 +; RV64I-NEXT: slli a1, a1, 1 +; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 85 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: srli a1, a0, 7 +; RV64I-NEXT: slliw a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 240 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: andi a1, a0, 49 +; RV64I-NEXT: slli a1, a1, 2 +; RV64I-NEXT: srli a0, a0, 2 +; RV64I-NEXT: andi a0, a0, 48 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: andi a1, a0, 85 +; RV64I-NEXT: slli a1, a1, 1 +; RV64I-NEXT: srli a0, a0, 1 +; RV64I-NEXT: andi a0, a0, 81 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andi a1, a0, 15 +; RV32ZBB-NEXT: slli a1, a1, 4 +; RV32ZBB-NEXT: slli a0, a0, 24 +; RV32ZBB-NEXT: srli a0, a0, 28 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: andi a1, a0, 51 +; RV32ZBB-NEXT: slli a1, a1, 2 +; RV32ZBB-NEXT: srli a0, a0, 2 +; RV32ZBB-NEXT: andi a0, a0, 51 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: andi a1, a0, 85 +; RV32ZBB-NEXT: slli a1, a1, 1 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: andi a0, a0, 85 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: srli a1, a0, 7 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: andi a0, a0, 240 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: andi a1, a0, 49 +; RV32ZBB-NEXT: slli a1, a1, 2 +; RV32ZBB-NEXT: srli a0, a0, 2 +; RV32ZBB-NEXT: andi a0, a0, 48 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: andi a1, a0, 85 +; RV32ZBB-NEXT: slli a1, a1, 1 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: andi a0, a0, 81 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andi a1, a0, 15 +; RV64ZBB-NEXT: slli a1, a1, 4 +; RV64ZBB-NEXT: slli a0, a0, 56 +; RV64ZBB-NEXT: srli a0, a0, 60 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: andi a1, a0, 51 +; RV64ZBB-NEXT: slli a1, a1, 2 +; RV64ZBB-NEXT: srli a0, a0, 2 +; RV64ZBB-NEXT: andi a0, a0, 51 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: andi a1, a0, 85 +; RV64ZBB-NEXT: slli a1, a1, 1 +; RV64ZBB-NEXT: srli a0, a0, 1 +; RV64ZBB-NEXT: andi a0, a0, 85 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: srli a1, a0, 7 +; RV64ZBB-NEXT: slliw a0, a0, 1 +; RV64ZBB-NEXT: andi a0, a0, 240 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: andi a1, a0, 49 +; RV64ZBB-NEXT: slli a1, a1, 2 +; RV64ZBB-NEXT: srli a0, a0, 2 +; RV64ZBB-NEXT: andi a0, a0, 48 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: andi a1, a0, 85 +; RV64ZBB-NEXT: slli a1, a1, 1 +; RV64ZBB-NEXT: srli a0, a0, 1 +; RV64ZBB-NEXT: andi a0, a0, 81 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: ret +; +; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 27 +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 24 +; RV32ZBKB-NEXT: ret +; +; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i8: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 59 +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 56 +; RV64ZBKB-NEXT: ret + %1 = call i8 @llvm.bitreverse.i8(i8 %a) + %2 = lshr i8 %1, 3 + %3 = call i8 @llvm.bitreverse.i8(i8 %2) + ret i8 %3 +} + +define i16 @test_bitreverse_srli_bitreverse_i16(i16 %a) nounwind { +; RV32I-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV32I: # %bb.0: +; RV32I-NEXT: slli a1, a0, 8 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srli a0, a0, 24 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: lui a2, 1 +; RV32I-NEXT: addi a3, a2, -241 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: and a0, a0, a3 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: lui a3, 3 +; RV32I-NEXT: addi a4, a3, 819 +; RV32I-NEXT: and a1, a1, a4 +; RV32I-NEXT: and a0, a0, a4 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: lui a4, 5 +; RV32I-NEXT: addi a5, a4, 1365 +; RV32I-NEXT: and a1, a1, a5 +; RV32I-NEXT: and a0, a0, a5 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: slli a1, a0, 1 +; RV32I-NEXT: andi a1, a1, -256 +; RV32I-NEXT: srli a0, a0, 15 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: addi a5, a2, -256 +; RV32I-NEXT: and a1, a1, a5 +; RV32I-NEXT: addi a2, a2, -255 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: addi a2, a3, 768 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: addi a2, a3, 784 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: addi a2, a4, 1360 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: addi a2, a4, 1344 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV64I: # %bb.0: +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srli a0, a0, 56 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: lui a2, 1 +; RV64I-NEXT: addiw a3, a2, -241 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: and a0, a0, a3 +; RV64I-NEXT: slli a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: lui a3, 3 +; RV64I-NEXT: addiw a4, a3, 819 +; RV64I-NEXT: and a1, a1, a4 +; RV64I-NEXT: and a0, a0, a4 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: lui a4, 5 +; RV64I-NEXT: addiw a5, a4, 1365 +; RV64I-NEXT: and a1, a1, a5 +; RV64I-NEXT: and a0, a0, a5 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: slli a1, a0, 1 +; RV64I-NEXT: andi a1, a1, -256 +; RV64I-NEXT: srli a0, a0, 15 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: addiw a5, a2, -256 +; RV64I-NEXT: and a1, a1, a5 +; RV64I-NEXT: addiw a2, a2, -255 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slli a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: addiw a2, a3, 768 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: addiw a2, a3, 784 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: addiw a2, a4, 1360 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: addiw a2, a4, 1344 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: srli a1, a0, 12 +; RV32ZBB-NEXT: lui a2, 15 +; RV32ZBB-NEXT: addi a2, a2, 240 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: srli a0, a0, 20 +; RV32ZBB-NEXT: andi a0, a0, -241 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: lui a2, 3 +; RV32ZBB-NEXT: addi a3, a2, 819 +; RV32ZBB-NEXT: and a1, a1, a3 +; RV32ZBB-NEXT: and a0, a0, a3 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: lui a3, 5 +; RV32ZBB-NEXT: addi a4, a3, 1280 +; RV32ZBB-NEXT: and a1, a1, a4 +; RV32ZBB-NEXT: addi a3, a3, 1344 +; RV32ZBB-NEXT: and a0, a0, a3 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a0, a0, 7 +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: slli a1, a0, 4 +; RV32ZBB-NEXT: srli a1, a1, 16 +; RV32ZBB-NEXT: srli a0, a0, 20 +; RV32ZBB-NEXT: andi a0, a0, -256 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: addi a5, a2, 768 +; RV32ZBB-NEXT: and a1, a1, a5 +; RV32ZBB-NEXT: addi a2, a2, 784 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: and a1, a1, a4 +; RV32ZBB-NEXT: and a0, a0, a3 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: srli a1, a0, 44 +; RV64ZBB-NEXT: lui a2, 15 +; RV64ZBB-NEXT: addiw a2, a2, 240 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: srli a0, a0, 52 +; RV64ZBB-NEXT: andi a0, a0, -241 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: srli a1, a0, 2 +; RV64ZBB-NEXT: lui a2, 3 +; RV64ZBB-NEXT: addiw a3, a2, 819 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: and a0, a0, a3 +; RV64ZBB-NEXT: slli a0, a0, 2 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: lui a3, 5 +; RV64ZBB-NEXT: addiw a4, a3, 1280 +; RV64ZBB-NEXT: and a1, a1, a4 +; RV64ZBB-NEXT: addiw a3, a3, 1344 +; RV64ZBB-NEXT: and a0, a0, a3 +; RV64ZBB-NEXT: slli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a0, a0, 7 +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: slli a1, a0, 4 +; RV64ZBB-NEXT: srli a1, a1, 48 +; RV64ZBB-NEXT: srli a0, a0, 52 +; RV64ZBB-NEXT: andi a0, a0, -256 +; RV64ZBB-NEXT: or a0, a0, a1 +; RV64ZBB-NEXT: srli a1, a0, 2 +; RV64ZBB-NEXT: addiw a5, a2, 768 +; RV64ZBB-NEXT: and a1, a1, a5 +; RV64ZBB-NEXT: addiw a2, a2, 784 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slli a0, a0, 2 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: and a1, a1, a4 +; RV64ZBB-NEXT: and a0, a0, a3 +; RV64ZBB-NEXT: slli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: ret +; +; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 23 +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 16 +; RV32ZBKB-NEXT: ret +; +; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i16: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 55 +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 48 +; RV64ZBKB-NEXT: ret + %1 = call i16 @llvm.bitreverse.i16(i16 %a) + %2 = lshr i16 %1, 7 + %3 = call i16 @llvm.bitreverse.i16(i16 %2) + ret i16 %3 +} + +define i32 @test_bitreverse_srli_bitreverse_i32(i32 %a) nounwind { +; RV32I-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a1, a0, 8 +; RV32I-NEXT: lui a2, 16 +; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a2, a0, 24 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: slli a2, a0, 8 +; RV32I-NEXT: lui a3, 4080 +; RV32I-NEXT: and a2, a2, a3 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: lui a2, 61681 +; RV32I-NEXT: addi a2, a2, -241 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: lui a2, 209715 +; RV32I-NEXT: addi a2, a2, 819 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: lui a4, 349525 +; RV32I-NEXT: addi a4, a4, 1365 +; RV32I-NEXT: and a1, a1, a4 +; RV32I-NEXT: and a0, a0, a4 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 15 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srli a5, a0, 7 +; RV32I-NEXT: and a3, a5, a3 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: srli a0, a0, 23 +; RV32I-NEXT: andi a0, a0, 256 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: lui a3, 61680 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: addi a3, a3, 256 +; RV32I-NEXT: and a0, a0, a3 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a2, 209713 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: and a1, a1, a4 +; RV32I-NEXT: and a0, a0, a4 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV64I: # %bb.0: +; RV64I-NEXT: srliw a1, a0, 8 +; RV64I-NEXT: lui a2, 16 +; RV64I-NEXT: addiw a2, a2, -256 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srliw a2, a0, 24 +; RV64I-NEXT: or a1, a1, a2 +; RV64I-NEXT: slli a2, a0, 8 +; RV64I-NEXT: lui a3, 4080 +; RV64I-NEXT: and a2, a2, a3 +; RV64I-NEXT: slliw a0, a0, 24 +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: lui a2, 61681 +; RV64I-NEXT: addiw a2, a2, -241 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slliw a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: lui a2, 209715 +; RV64I-NEXT: addiw a2, a2, 819 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slliw a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: lui a2, 349525 +; RV64I-NEXT: addiw a4, a2, 1365 +; RV64I-NEXT: and a1, a1, a4 +; RV64I-NEXT: and a0, a0, a4 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 15 +; RV64I-NEXT: srli a5, a0, 7 +; RV64I-NEXT: and a3, a5, a3 +; RV64I-NEXT: slliw a1, a1, 24 +; RV64I-NEXT: or a1, a1, a3 +; RV64I-NEXT: srli a0, a0, 23 +; RV64I-NEXT: andi a0, a0, 256 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: lui a3, 61680 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: addiw a3, a3, 256 +; RV64I-NEXT: and a0, a0, a3 +; RV64I-NEXT: slliw a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: lui a3, 209712 +; RV64I-NEXT: and a1, a1, a3 +; RV64I-NEXT: lui a3, 209713 +; RV64I-NEXT: and a0, a0, a3 +; RV64I-NEXT: slliw a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: and a1, a1, a4 +; RV64I-NEXT: addiw a2, a2, 1364 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slliw a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: srli a1, a0, 4 +; RV32ZBB-NEXT: lui a2, 61681 +; RV32ZBB-NEXT: addi a2, a2, -241 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 4 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: lui a2, 209715 +; RV32ZBB-NEXT: addi a2, a2, 819 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: lui a2, 349520 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: lui a2, 349524 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a0, a0, 15 +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: srli a1, a0, 4 +; RV32ZBB-NEXT: lui a2, 61680 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: addi a2, a2, 256 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 4 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: lui a2, 209712 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: lui a2, 209713 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: lui a2, 349525 +; RV32ZBB-NEXT: addi a2, a2, 1365 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: and a0, a0, a2 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: srli a1, a0, 36 +; RV64ZBB-NEXT: lui a2, 61681 +; RV64ZBB-NEXT: addiw a2, a2, -241 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: srli a0, a0, 28 +; RV64ZBB-NEXT: lui a2, 986895 +; RV64ZBB-NEXT: addiw a2, a2, 240 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 2 +; RV64ZBB-NEXT: lui a2, 209715 +; RV64ZBB-NEXT: addiw a2, a2, 819 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slliw a0, a0, 2 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: lui a2, 349520 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: lui a2, 349524 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a0, a0, 15 +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: srli a1, a0, 36 +; RV64ZBB-NEXT: lui a3, 61680 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: srli a0, a0, 28 +; RV64ZBB-NEXT: lui a3, 986881 +; RV64ZBB-NEXT: and a0, a0, a3 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 2 +; RV64ZBB-NEXT: lui a3, 209712 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: lui a3, 209713 +; RV64ZBB-NEXT: and a0, a0, a3 +; RV64ZBB-NEXT: slliw a0, a0, 2 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: lui a3, 349525 +; RV64ZBB-NEXT: addiw a3, a3, 1365 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: addiw a2, a2, 1092 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slliw a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: ret +; +; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 15 +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: ret +; +; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i32: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 47 +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 32 +; RV64ZBKB-NEXT: ret + %1 = call i32 @llvm.bitreverse.i32(i32 %a) + %2 = lshr i32 %1, 15 + %3 = call i32 @llvm.bitreverse.i32(i32 %2) + ret i32 %3 +} + +define i64 @test_bitreverse_srli_bitreverse_i64(i64 %a) nounwind { +; RV32I-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV32I: # %bb.0: +; RV32I-NEXT: srli a1, a0, 8 +; RV32I-NEXT: lui a2, 16 +; RV32I-NEXT: addi a2, a2, -256 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a3, a0, 24 +; RV32I-NEXT: or a1, a1, a3 +; RV32I-NEXT: slli a3, a0, 8 +; RV32I-NEXT: lui a4, 4080 +; RV32I-NEXT: and a3, a3, a4 +; RV32I-NEXT: slli a0, a0, 24 +; RV32I-NEXT: or a0, a0, a3 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: lui a3, 61681 +; RV32I-NEXT: addi a5, a3, -241 +; RV32I-NEXT: and a1, a1, a5 +; RV32I-NEXT: and a0, a0, a5 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: lui a6, 209715 +; RV32I-NEXT: addi a6, a6, 819 +; RV32I-NEXT: and a1, a1, a6 +; RV32I-NEXT: and a0, a0, a6 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: lui a7, 349525 +; RV32I-NEXT: addi a7, a7, 1365 +; RV32I-NEXT: and a1, a1, a7 +; RV32I-NEXT: and a0, a0, a7 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srli t0, a0, 9 +; RV32I-NEXT: and a2, t0, a2 +; RV32I-NEXT: srli t0, a0, 25 +; RV32I-NEXT: or a2, a2, t0 +; RV32I-NEXT: slli a0, a0, 7 +; RV32I-NEXT: and a0, a0, a4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: srli a1, a0, 4 +; RV32I-NEXT: addi a2, a3, -249 +; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: and a0, a0, a5 +; RV32I-NEXT: slli a0, a0, 4 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 2 +; RV32I-NEXT: and a1, a1, a6 +; RV32I-NEXT: and a0, a0, a6 +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: and a1, a1, a7 +; RV32I-NEXT: and a0, a0, a7 +; RV32I-NEXT: slli a0, a0, 1 +; RV32I-NEXT: or a1, a1, a0 +; RV32I-NEXT: li a0, 0 +; RV32I-NEXT: ret +; +; RV64I-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV64I: # %bb.0: +; RV64I-NEXT: srli a1, a0, 24 +; RV64I-NEXT: lui a2, 4080 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: srli a2, a0, 8 +; RV64I-NEXT: li a3, 255 +; RV64I-NEXT: slli a4, a3, 24 +; RV64I-NEXT: and a2, a2, a4 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: srli a2, a0, 40 +; RV64I-NEXT: lui a4, 16 +; RV64I-NEXT: addiw a4, a4, -256 +; RV64I-NEXT: and a2, a2, a4 +; RV64I-NEXT: srli a4, a0, 56 +; RV64I-NEXT: or a2, a2, a4 +; RV64I-NEXT: or a1, a1, a2 +; RV64I-NEXT: slli a2, a0, 24 +; RV64I-NEXT: slli a4, a3, 40 +; RV64I-NEXT: and a2, a2, a4 +; RV64I-NEXT: srliw a5, a0, 24 +; RV64I-NEXT: slli a5, a5, 32 +; RV64I-NEXT: or a2, a2, a5 +; RV64I-NEXT: slli a5, a0, 40 +; RV64I-NEXT: slli a3, a3, 48 +; RV64I-NEXT: and a5, a5, a3 +; RV64I-NEXT: slli a0, a0, 56 +; RV64I-NEXT: or a0, a0, a5 +; RV64I-NEXT: lui a5, %hi(.LCPI3_0) +; RV64I-NEXT: ld a5, %lo(.LCPI3_0)(a5) +; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: and a1, a1, a5 +; RV64I-NEXT: and a0, a0, a5 +; RV64I-NEXT: lui a2, %hi(.LCPI3_1) +; RV64I-NEXT: ld a2, %lo(.LCPI3_1)(a2) +; RV64I-NEXT: slli a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: lui a5, %hi(.LCPI3_2) +; RV64I-NEXT: ld a5, %lo(.LCPI3_2)(a5) +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: and a1, a1, a5 +; RV64I-NEXT: and a0, a0, a5 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 33 +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srli a6, a0, 9 +; RV64I-NEXT: and a4, a6, a4 +; RV64I-NEXT: srli a6, a0, 57 +; RV64I-NEXT: slli a6, a6, 32 +; RV64I-NEXT: or a4, a4, a6 +; RV64I-NEXT: slli a0, a0, 7 +; RV64I-NEXT: and a0, a0, a3 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: or a0, a0, a4 +; RV64I-NEXT: srli a1, a0, 4 +; RV64I-NEXT: lui a3, 61681 +; RV64I-NEXT: addiw a4, a3, -249 +; RV64I-NEXT: slli a4, a4, 32 +; RV64I-NEXT: and a1, a1, a4 +; RV64I-NEXT: addiw a3, a3, -241 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: and a0, a0, a3 +; RV64I-NEXT: slli a0, a0, 4 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 2 +; RV64I-NEXT: and a1, a1, a2 +; RV64I-NEXT: lui a2, 209715 +; RV64I-NEXT: addiw a2, a2, 819 +; RV64I-NEXT: slli a2, a2, 32 +; RV64I-NEXT: and a0, a0, a2 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: srli a1, a0, 1 +; RV64I-NEXT: and a1, a1, a5 +; RV64I-NEXT: and a0, a0, a5 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: ret +; +; RV32ZBB-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: srli a1, a0, 4 +; RV32ZBB-NEXT: lui a2, 61681 +; RV32ZBB-NEXT: addi a3, a2, -241 +; RV32ZBB-NEXT: and a1, a1, a3 +; RV32ZBB-NEXT: and a0, a0, a3 +; RV32ZBB-NEXT: slli a0, a0, 4 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: lui a4, 209715 +; RV32ZBB-NEXT: addi a5, a4, 819 +; RV32ZBB-NEXT: and a1, a1, a5 +; RV32ZBB-NEXT: and a0, a0, a5 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: lui a6, 349525 +; RV32ZBB-NEXT: addi a7, a6, 1364 +; RV32ZBB-NEXT: and a1, a1, a7 +; RV32ZBB-NEXT: addi a6, a6, 1365 +; RV32ZBB-NEXT: and a0, a0, a6 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a0, a0, 1 +; RV32ZBB-NEXT: rev8 a0, a0 +; RV32ZBB-NEXT: srli a1, a0, 4 +; RV32ZBB-NEXT: addi a2, a2, -249 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: and a0, a0, a3 +; RV32ZBB-NEXT: slli a0, a0, 4 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 2 +; RV32ZBB-NEXT: addi a2, a4, 817 +; RV32ZBB-NEXT: and a1, a1, a2 +; RV32ZBB-NEXT: and a0, a0, a5 +; RV32ZBB-NEXT: slli a0, a0, 2 +; RV32ZBB-NEXT: or a0, a1, a0 +; RV32ZBB-NEXT: srli a1, a0, 1 +; RV32ZBB-NEXT: and a1, a1, a6 +; RV32ZBB-NEXT: and a0, a0, a6 +; RV32ZBB-NEXT: slli a0, a0, 1 +; RV32ZBB-NEXT: or a1, a1, a0 +; RV32ZBB-NEXT: li a0, 0 +; RV32ZBB-NEXT: ret +; +; RV64ZBB-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: lui a1, %hi(.LCPI3_0) +; RV64ZBB-NEXT: ld a1, %lo(.LCPI3_0)(a1) +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: srli a2, a0, 4 +; RV64ZBB-NEXT: and a2, a2, a1 +; RV64ZBB-NEXT: and a0, a0, a1 +; RV64ZBB-NEXT: lui a1, %hi(.LCPI3_1) +; RV64ZBB-NEXT: ld a1, %lo(.LCPI3_1)(a1) +; RV64ZBB-NEXT: slli a0, a0, 4 +; RV64ZBB-NEXT: or a0, a2, a0 +; RV64ZBB-NEXT: srli a2, a0, 2 +; RV64ZBB-NEXT: and a2, a2, a1 +; RV64ZBB-NEXT: and a0, a0, a1 +; RV64ZBB-NEXT: slli a0, a0, 2 +; RV64ZBB-NEXT: or a0, a2, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: lui a2, 87381 +; RV64ZBB-NEXT: addiw a2, a2, 1365 +; RV64ZBB-NEXT: slli a2, a2, 34 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: lui a2, 349525 +; RV64ZBB-NEXT: addiw a2, a2, 1365 +; RV64ZBB-NEXT: slli a2, a2, 32 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a0, a0, 33 +; RV64ZBB-NEXT: rev8 a0, a0 +; RV64ZBB-NEXT: srli a1, a0, 4 +; RV64ZBB-NEXT: lui a2, 61681 +; RV64ZBB-NEXT: addiw a3, a2, -249 +; RV64ZBB-NEXT: slli a3, a3, 32 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: addiw a2, a2, -241 +; RV64ZBB-NEXT: slli a2, a2, 32 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slli a0, a0, 4 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 2 +; RV64ZBB-NEXT: lui a2, 209715 +; RV64ZBB-NEXT: addiw a3, a2, 817 +; RV64ZBB-NEXT: slli a3, a3, 32 +; RV64ZBB-NEXT: and a1, a1, a3 +; RV64ZBB-NEXT: addiw a2, a2, 819 +; RV64ZBB-NEXT: slli a2, a2, 32 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: lui a2, %hi(.LCPI3_2) +; RV64ZBB-NEXT: ld a2, %lo(.LCPI3_2)(a2) +; RV64ZBB-NEXT: slli a0, a0, 2 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: srli a1, a0, 1 +; RV64ZBB-NEXT: and a1, a1, a2 +; RV64ZBB-NEXT: and a0, a0, a2 +; RV64ZBB-NEXT: slli a0, a0, 1 +; RV64ZBB-NEXT: or a0, a1, a0 +; RV64ZBB-NEXT: ret +; +; RV32ZBKB-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV32ZBKB: # %bb.0: +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a0, a0 +; RV32ZBKB-NEXT: srli a0, a0, 1 +; RV32ZBKB-NEXT: rev8 a0, a0 +; RV32ZBKB-NEXT: brev8 a1, a0 +; RV32ZBKB-NEXT: li a0, 0 +; RV32ZBKB-NEXT: ret +; +; RV64ZBKB-LABEL: test_bitreverse_srli_bitreverse_i64: +; RV64ZBKB: # %bb.0: +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: srli a0, a0, 33 +; RV64ZBKB-NEXT: rev8 a0, a0 +; RV64ZBKB-NEXT: brev8 a0, a0 +; RV64ZBKB-NEXT: ret + %1 = call i64 @llvm.bitreverse.i64(i64 %a) + %2 = lshr i64 %1, 33 + %3 = call i64 @llvm.bitreverse.i64(i64 %2) + ret i64 %3 +} diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll --- a/llvm/test/CodeGen/X86/combine-bitreverse.ll +++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll @@ -37,6 +37,93 @@ ret i32 %c } +; TODO: fold (bitreverse(srl (bitreverse c), x)) -> (shl c, x) +define i32 @test_bitreverse_srl_bitreverse(i32 %a0) nounwind { +; X86-LABEL: test_bitreverse_srl_bitreverse: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: bswapl %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F +; X86-NEXT: shll $4, %ecx +; X86-NEXT: shrl $4, %eax +; X86-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F +; X86-NEXT: orl %ecx, %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333 +; X86-NEXT: shrl $2, %eax +; X86-NEXT: andl $858993459, %eax # imm = 0x33333333 +; X86-NEXT: leal (%eax,%ecx,4), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $1431655744, %ecx # imm = 0x55555540 +; X86-NEXT: shrl %eax +; X86-NEXT: andl $1431655680, %eax # imm = 0x55555500 +; X86-NEXT: leal (%eax,%ecx,2), %eax +; X86-NEXT: shrl $7, %eax +; X86-NEXT: bswapl %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $252645121, %ecx # imm = 0xF0F0F01 +; X86-NEXT: shll $4, %ecx +; X86-NEXT: shrl $4, %eax +; X86-NEXT: andl $252645120, %eax # imm = 0xF0F0F00 +; X86-NEXT: orl %ecx, %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $858993424, %ecx # imm = 0x33333310 +; X86-NEXT: shrl $2, %eax +; X86-NEXT: andl $858993408, %eax # imm = 0x33333300 +; X86-NEXT: leal (%eax,%ecx,4), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555 +; X86-NEXT: shrl %eax +; X86-NEXT: andl $1431655765, %eax # imm = 0x55555555 +; X86-NEXT: leal (%eax,%ecx,2), %eax +; X86-NEXT: retl +; +; X64-LABEL: test_bitreverse_srl_bitreverse: +; X64: # %bb.0: +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: bswapl %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F +; X64-NEXT: shll $4, %eax +; X64-NEXT: shrl $4, %edi +; X64-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F +; X64-NEXT: orl %eax, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $858993459, %eax # imm = 0x33333333 +; X64-NEXT: shrl $2, %edi +; X64-NEXT: andl $858993459, %edi # imm = 0x33333333 +; X64-NEXT: leal (%rdi,%rax,4), %eax +; X64-NEXT: movl %eax, %ecx +; X64-NEXT: andl $1431655744, %ecx # imm = 0x55555540 +; X64-NEXT: shrl %eax +; X64-NEXT: andl $1431655680, %eax # imm = 0x55555500 +; X64-NEXT: leal (%rax,%rcx,2), %eax +; X64-NEXT: shrl $7, %eax +; X64-NEXT: bswapl %eax +; X64-NEXT: movl %eax, %ecx +; X64-NEXT: andl $252645121, %ecx # imm = 0xF0F0F01 +; X64-NEXT: shll $4, %ecx +; X64-NEXT: shrl $4, %eax +; X64-NEXT: andl $252645120, %eax # imm = 0xF0F0F00 +; X64-NEXT: orl %ecx, %eax +; X64-NEXT: movl %eax, %ecx +; X64-NEXT: andl $858993424, %ecx # imm = 0x33333310 +; X64-NEXT: shrl $2, %eax +; X64-NEXT: andl $858993408, %eax # imm = 0x33333300 +; X64-NEXT: leal (%rax,%rcx,4), %eax +; X64-NEXT: movl %eax, %ecx +; X64-NEXT: andl $1431655765, %ecx # imm = 0x55555555 +; X64-NEXT: shrl %eax +; X64-NEXT: andl $1431655765, %eax # imm = 0x55555555 +; X64-NEXT: leal (%rax,%rcx,2), %eax +; X64-NEXT: retq + %b = call i32 @llvm.bitreverse.i32(i32 %a0) + %c = lshr i32 %b, 7 + %d = call i32 @llvm.bitreverse.i32(i32 %c) + ret i32 %d +} + define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind { ; X86-LABEL: test_demandedbits_bitreverse: ; X86: # %bb.0: