diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll @@ -0,0 +1,1749 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-UNKNOWN +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-256 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-512 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-UNKNOWN +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-256 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-512 + +; +; VECTOR_REVERSE - masks +; + +define @reverse_v2i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v2i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v9 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v2i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: srli a0, a0, 2 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v2i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v8, 0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: srli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v2i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v9 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v2i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: srli a0, a0, 2 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v2i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v8, 0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: srli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2i1( %a) + ret %res +} + +define @reverse_v4i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v4i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v9 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v4i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: srli a0, a0, 1 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v4i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v8, 0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: srli a0, a0, 1 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v4i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v9 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v4i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: srli a0, a0, 1 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v4i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v8, 0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: srli a0, a0, 1 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4i1( %a) + ret %res +} + +define @reverse_v8i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v8i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v10 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v8i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v8i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v8, 0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV32-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v8i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v10 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v8i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-256-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v8i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v8, 0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9 +; RV64-BITS-512-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8i1( %a) + ret %res +} + +define @reverse_v16i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v16i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v12 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v16i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 1 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v10 +; RV32-BITS-256-NEXT: vrsub.vx v10, v10, a0 +; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v10 +; RV32-BITS-256-NEXT: vand.vi v8, v12, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v16i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v8, 0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 1 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vid.v v10 +; RV32-BITS-512-NEXT: vrsub.vx v10, v10, a0 +; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v10 +; RV32-BITS-512-NEXT: vand.vi v8, v12, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v16i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v12 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v16i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 1 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v10 +; RV64-BITS-256-NEXT: vrsub.vx v10, v10, a0 +; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v10 +; RV64-BITS-256-NEXT: vand.vi v8, v12, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v16i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v8, 0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 1 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vid.v v10 +; RV64-BITS-512-NEXT: vrsub.vx v10, v10, a0 +; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v10 +; RV64-BITS-512-NEXT: vand.vi v8, v12, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16i1( %a) + ret %res +} + +define @reverse_v32i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v32i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v16 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v32i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 2 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v12 +; RV32-BITS-256-NEXT: vrsub.vx v12, v12, a0 +; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v12 +; RV32-BITS-256-NEXT: vand.vi v8, v16, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v32i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v8, 0 +; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vid.v v12 +; RV32-BITS-512-NEXT: vrsub.vx v12, v12, a0 +; RV32-BITS-512-NEXT: vrgather.vv v16, v8, v12 +; RV32-BITS-512-NEXT: vand.vi v8, v16, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v32i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v16 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v32i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 2 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v12 +; RV64-BITS-256-NEXT: vrsub.vx v12, v12, a0 +; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v12 +; RV64-BITS-256-NEXT: vand.vi v8, v16, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v32i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v8, 0 +; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vid.v v12 +; RV64-BITS-512-NEXT: vrsub.vx v12, v12, a0 +; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v12 +; RV64-BITS-512-NEXT: vand.vi v8, v16, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v32i1( %a) + ret %res +} + +define @reverse_v64i1( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v64i1: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v8 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 +; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 +; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v64i1: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-256-NEXT: vmv.v.i v8, 0 +; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 3 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vid.v v16 +; RV32-BITS-256-NEXT: vrsub.vx v16, v16, a0 +; RV32-BITS-256-NEXT: vrgather.vv v24, v8, v16 +; RV32-BITS-256-NEXT: vand.vi v8, v24, 1 +; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v64i1: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vid.v v8 +; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-512-NEXT: vmv.v.i v16, 0 +; RV32-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vrgather.vv v28, v16, v8 +; RV32-BITS-512-NEXT: vrgather.vv v24, v20, v8 +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-512-NEXT: vand.vi v8, v24, 1 +; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v64i1: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v8 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 +; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 +; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v64i1: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-256-NEXT: vmv.v.i v8, 0 +; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 3 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vid.v v16 +; RV64-BITS-256-NEXT: vrsub.vx v16, v16, a0 +; RV64-BITS-256-NEXT: vrgather.vv v24, v8, v16 +; RV64-BITS-256-NEXT: vand.vi v8, v24, 1 +; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v64i1: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vid.v v8 +; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-512-NEXT: vmv.v.i v16, 0 +; RV64-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vrgather.vv v28, v16, v8 +; RV64-BITS-512-NEXT: vrgather.vv v24, v20, v8 +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-512-NEXT: vand.vi v8, v24, 1 +; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v64i1( %a) + ret %res +} + +; +; VECTOR_REVERSE - integer +; + +define @reverse_v1i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v1i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 3 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v9 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v1i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: srli a0, a0, 3 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v1i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: srli a0, a0, 3 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v1i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v9 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v1i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: srli a0, a0, 3 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v1i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: srli a0, a0, 3 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1i8( %a) + ret %res +} + +define @reverse_v2i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v2i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v9 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v2i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: srli a0, a0, 2 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v2i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: srli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v2i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v9 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v2i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: srli a0, a0, 2 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v2i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: srli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2i8( %a) + ret %res +} + +define @reverse_v4i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v4i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v9 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v4i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: srli a0, a0, 1 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v4i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: srli a0, a0, 1 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v4i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v9 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v4i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: srli a0, a0, 1 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v4i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: srli a0, a0, 1 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4i8( %a) + ret %res +} + +define @reverse_v8i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v8i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v10 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v8i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV32-BITS-256-NEXT: vid.v v9 +; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-256-NEXT: vmv.v.v v8, v9 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v8i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV32-BITS-512-NEXT: vid.v v9 +; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV32-BITS-512-NEXT: vmv.v.v v8, v9 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v8i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v10 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v8i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV64-BITS-256-NEXT: vid.v v9 +; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-256-NEXT: vmv.v.v v8, v9 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v8i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV64-BITS-512-NEXT: vid.v v9 +; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 +; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 +; RV64-BITS-512-NEXT: vmv.v.v v8, v9 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8i8( %a) + ret %res +} + +define @reverse_v16i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v16i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v12 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v16i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 1 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV32-BITS-256-NEXT: vid.v v10 +; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 +; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 +; RV32-BITS-256-NEXT: vmv.v.v v8, v10 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v16i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 1 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV32-BITS-512-NEXT: vid.v v10 +; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 +; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 +; RV32-BITS-512-NEXT: vmv.v.v v8, v10 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v16i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v12 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v16i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 1 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV64-BITS-256-NEXT: vid.v v10 +; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 +; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 +; RV64-BITS-256-NEXT: vmv.v.v v8, v10 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v16i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 1 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV64-BITS-512-NEXT: vid.v v10 +; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 +; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 +; RV64-BITS-512-NEXT: vmv.v.v v8, v10 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16i8( %a) + ret %res +} + +define @reverse_v32i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v32i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v16 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v32i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 2 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-256-NEXT: vid.v v12 +; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 +; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 +; RV32-BITS-256-NEXT: vmv.v.v v8, v12 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v32i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vid.v v12 +; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 +; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 +; RV32-BITS-512-NEXT: vmv.v.v v8, v12 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v32i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v16 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v32i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 2 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-256-NEXT: vid.v v12 +; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 +; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 +; RV64-BITS-256-NEXT: vmv.v.v v8, v12 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v32i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vid.v v12 +; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 +; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 +; RV64-BITS-512-NEXT: vmv.v.v v8, v12 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v32i8( %a) + ret %res +} + +define @reverse_v64i8( %a) { +; RV32-BITS-UNKNOWN-LABEL: reverse_v64i8: +; RV32-BITS-UNKNOWN: # %bb.0: +; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vid.v v16 +; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 +; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 +; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 +; RV32-BITS-UNKNOWN-NEXT: ret +; +; RV32-BITS-256-LABEL: reverse_v64i8: +; RV32-BITS-256: # %bb.0: +; RV32-BITS-256-NEXT: csrr a0, vlenb +; RV32-BITS-256-NEXT: slli a0, a0, 3 +; RV32-BITS-256-NEXT: addi a0, a0, -1 +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; RV32-BITS-256-NEXT: vid.v v16 +; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 +; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 +; RV32-BITS-256-NEXT: vmv.v.v v8, v16 +; RV32-BITS-256-NEXT: ret +; +; RV32-BITS-512-LABEL: reverse_v64i8: +; RV32-BITS-512: # %bb.0: +; RV32-BITS-512-NEXT: csrr a0, vlenb +; RV32-BITS-512-NEXT: slli a0, a0, 2 +; RV32-BITS-512-NEXT: addi a0, a0, -1 +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vid.v v16 +; RV32-BITS-512-NEXT: vrsub.vx v24, v16, a0 +; RV32-BITS-512-NEXT: vrgather.vv v20, v8, v24 +; RV32-BITS-512-NEXT: vrgather.vv v16, v12, v24 +; RV32-BITS-512-NEXT: vmv8r.v v8, v16 +; RV32-BITS-512-NEXT: ret +; +; RV64-BITS-UNKNOWN-LABEL: reverse_v64i8: +; RV64-BITS-UNKNOWN: # %bb.0: +; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb +; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 +; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vid.v v16 +; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 +; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 +; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 +; RV64-BITS-UNKNOWN-NEXT: ret +; +; RV64-BITS-256-LABEL: reverse_v64i8: +; RV64-BITS-256: # %bb.0: +; RV64-BITS-256-NEXT: csrr a0, vlenb +; RV64-BITS-256-NEXT: slli a0, a0, 3 +; RV64-BITS-256-NEXT: addi a0, a0, -1 +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; RV64-BITS-256-NEXT: vid.v v16 +; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 +; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 +; RV64-BITS-256-NEXT: vmv.v.v v8, v16 +; RV64-BITS-256-NEXT: ret +; +; RV64-BITS-512-LABEL: reverse_v64i8: +; RV64-BITS-512: # %bb.0: +; RV64-BITS-512-NEXT: csrr a0, vlenb +; RV64-BITS-512-NEXT: slli a0, a0, 2 +; RV64-BITS-512-NEXT: addi a0, a0, -1 +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vid.v v16 +; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0 +; RV64-BITS-512-NEXT: vrgather.vv v20, v8, v24 +; RV64-BITS-512-NEXT: vrgather.vv v16, v12, v24 +; RV64-BITS-512-NEXT: vmv8r.v v8, v16 +; RV64-BITS-512-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v64i8( %a) + ret %res +} + +define @reverse_v1i16( %a) { +; CHECK-LABEL: reverse_v1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1i16( %a) + ret %res +} + +define @reverse_v2i16( %a) { +; CHECK-LABEL: reverse_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2i16( %a) + ret %res +} + +define @reverse_v4i16( %a) { +; CHECK-LABEL: reverse_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4i16( %a) + ret %res +} + +define @reverse_v8i16( %a) { +; CHECK-LABEL: reverse_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8i16( %a) + ret %res +} + +define @reverse_v16i16( %a) { +; CHECK-LABEL: reverse_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16i16( %a) + ret %res +} + +define @reverse_v32i16( %a) { +; CHECK-LABEL: reverse_v32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v32i16( %a) + ret %res +} + +define @reverse_v1i32( %a) { +; CHECK-LABEL: reverse_v1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1i32( %a) + ret %res +} + +define @reverse_v2i32( %a) { +; CHECK-LABEL: reverse_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2i32( %a) + ret %res +} + +define @reverse_v4i32( %a) { +; CHECK-LABEL: reverse_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4i32( %a) + ret %res +} + +define @reverse_v8i32( %a) { +; CHECK-LABEL: reverse_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8i32( %a) + ret %res +} + +define @reverse_v16i32( %a) { +; CHECK-LABEL: reverse_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16i32( %a) + ret %res +} + +define @reverse_v1i64( %a) { +; CHECK-LABEL: reverse_v1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1i64( %a) + ret %res +} + +define @reverse_v2i64( %a) { +; CHECK-LABEL: reverse_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2i64( %a) + ret %res +} + +define @reverse_v4i64( %a) { +; CHECK-LABEL: reverse_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4i64( %a) + ret %res +} + +define @reverse_v8i64( %a) { +; CHECK-LABEL: reverse_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8i64( %a) + ret %res +} + +; +; VECTOR_REVERSE - floating point +; + +define @reverse_v1f16( %a) { +; CHECK-LABEL: reverse_v1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1f16( %a) + ret %res +} + +define @reverse_v2f16( %a) { +; CHECK-LABEL: reverse_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2f16( %a) + ret %res +} + +define @reverse_v4f16( %a) { +; CHECK-LABEL: reverse_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4f16( %a) + ret %res +} + +define @reverse_v8f16( %a) { +; CHECK-LABEL: reverse_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8f16( %a) + ret %res +} + +define @reverse_v16f16( %a) { +; CHECK-LABEL: reverse_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16f16( %a) + ret %res +} + +define @reverse_v32f16( %a) { +; CHECK-LABEL: reverse_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v32f16( %a) + ret %res +} + +define @reverse_v1f32( %a) { +; CHECK-LABEL: reverse_v1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1f32( %a) + ret %res +} + +define @reverse_v2f32( %a) { +; CHECK-LABEL: reverse_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2f32( %a) + ret %res +} + +define @reverse_v4f32( %a) { +; CHECK-LABEL: reverse_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4f32( %a) + ret %res +} + +define @reverse_v8f32( %a) { +; CHECK-LABEL: reverse_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8f32( %a) + ret %res +} + +define @reverse_v16f32( %a) { +; CHECK-LABEL: reverse_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v16f32( %a) + ret %res +} + +define @reverse_v1f64( %a) { +; CHECK-LABEL: reverse_v1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 3 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vx v10, v9, a0 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v1f64( %a) + ret %res +} + +define @reverse_v2f64( %a) { +; CHECK-LABEL: reverse_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vx v12, v10, a0 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v2f64( %a) + ret %res +} + +define @reverse_v4f64( %a) { +; CHECK-LABEL: reverse_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v16, v12, a0 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v4f64( %a) + ret %res +} + +define @reverse_v8f64( %a) { +; CHECK-LABEL: reverse_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v24, v16, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v24 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v8f64( %a) + ret %res +} + +; Test widen reverse vector + +define @reverse_v3i64( %a) { +; CHECK-LABEL: reverse_v3i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vx v12, v12, a0 +; CHECK-NEXT: vrgather.vv v16, v8, v12 +; CHECK-NEXT: vmv1r.v v8, v17 +; CHECK-NEXT: vmv1r.v v9, v18 +; CHECK-NEXT: vmv1r.v v10, v19 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v3i64( %a) + ret %res +} + +define @reverse_v6i64( %a) { +; CHECK-LABEL: reverse_v6i64: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vid.v v16 +; CHECK-NEXT: vrsub.vx v16, v16, a0 +; CHECK-NEXT: vrgather.vv v24, v8, v16 +; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv2r.v v10, v28 +; CHECK-NEXT: vmv2r.v v12, v30 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v6i64( %a) + ret %res +} + +define @reverse_v12i64( %a) { +; CHECK-LABEL: reverse_v12i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: addi s0, sp, 64 +; CHECK-NEXT: .cfi_def_cfa s0, 0 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: sub sp, sp, a0 +; CHECK-NEXT: andi sp, sp, -64 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: addi a1, a0, -1 +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vid.v v24 +; CHECK-NEXT: vrsub.vx v24, v24, a1 +; CHECK-NEXT: vrgather.vv v0, v16, v24 +; CHECK-NEXT: vmv4r.v v16, v4 +; CHECK-NEXT: vrgather.vv v0, v8, v24 +; CHECK-NEXT: vmv4r.v v20, v0 +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: addi a1, sp, 64 +; CHECK-NEXT: add a0, a1, a0 +; CHECK-NEXT: vs4r.v v4, (a0) +; CHECK-NEXT: vs8r.v v16, (a1) +; CHECK-NEXT: vl8re64.v v16, (a0) +; CHECK-NEXT: vl8re64.v v8, (a1) +; CHECK-NEXT: addi sp, s0, -64 +; CHECK-NEXT: addi sp, sp, 64 +; CHECK-NEXT: ret + %res = call @llvm.experimental.vector.reverse.v12i64( %a) + ret %res +} + +declare @llvm.experimental.vector.reverse.v2i1() +declare @llvm.experimental.vector.reverse.v4i1() +declare @llvm.experimental.vector.reverse.v8i1() +declare @llvm.experimental.vector.reverse.v16i1() +declare @llvm.experimental.vector.reverse.v32i1() +declare @llvm.experimental.vector.reverse.v64i1() +declare @llvm.experimental.vector.reverse.v1i8() +declare @llvm.experimental.vector.reverse.v2i8() +declare @llvm.experimental.vector.reverse.v4i8() +declare @llvm.experimental.vector.reverse.v8i8() +declare @llvm.experimental.vector.reverse.v16i8() +declare @llvm.experimental.vector.reverse.v32i8() +declare @llvm.experimental.vector.reverse.v64i8() +declare @llvm.experimental.vector.reverse.v1i16() +declare @llvm.experimental.vector.reverse.v2i16() +declare @llvm.experimental.vector.reverse.v4i16() +declare @llvm.experimental.vector.reverse.v8i16() +declare @llvm.experimental.vector.reverse.v16i16() +declare @llvm.experimental.vector.reverse.v32i16() +declare @llvm.experimental.vector.reverse.v1i32() +declare @llvm.experimental.vector.reverse.v2i32() +declare @llvm.experimental.vector.reverse.v4i32() +declare @llvm.experimental.vector.reverse.v8i32() +declare @llvm.experimental.vector.reverse.v16i32() +declare @llvm.experimental.vector.reverse.v1i64() +declare @llvm.experimental.vector.reverse.v2i64() +declare @llvm.experimental.vector.reverse.v4i64() +declare @llvm.experimental.vector.reverse.v8i64() +declare @llvm.experimental.vector.reverse.v1f16() +declare @llvm.experimental.vector.reverse.v2f16() +declare @llvm.experimental.vector.reverse.v4f16() +declare @llvm.experimental.vector.reverse.v8f16() +declare @llvm.experimental.vector.reverse.v16f16() +declare @llvm.experimental.vector.reverse.v32f16() +declare @llvm.experimental.vector.reverse.v1f32() +declare @llvm.experimental.vector.reverse.v2f32() +declare @llvm.experimental.vector.reverse.v4f32() +declare @llvm.experimental.vector.reverse.v8f32() +declare @llvm.experimental.vector.reverse.v16f32() +declare @llvm.experimental.vector.reverse.v1f64() +declare @llvm.experimental.vector.reverse.v2f64() +declare @llvm.experimental.vector.reverse.v4f64() +declare @llvm.experimental.vector.reverse.v8f64() +declare @llvm.experimental.vector.reverse.v3i64() +declare @llvm.experimental.vector.reverse.v6i64() +declare @llvm.experimental.vector.reverse.v12i64() diff --git a/llvm/test/CodeGen/RISCV/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll @@ -0,0 +1,658 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+f,+zfh,+experimental-zvfh,+d -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+f,+zfh,+experimental-zvfh,+d -verify-machineinstrs | FileCheck %s + +define <2 x i8> @v2i8(<2 x i8> %a) { +; CHECK-LABEL: v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2i8 = shufflevector <2 x i8> %a, <2 x i8> undef, <2 x i32> + ret <2 x i8> %v2i8 +} + +define <4 x i8> @v2i8_2(<2 x i8> %a, <2 x i8> %b) { +; CHECK-LABEL: v2i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vslideup.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vslideup.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v8, v10, 2 +; CHECK-NEXT: ret + %v4i8 = shufflevector <2 x i8> %a, <2 x i8> %b, <4 x i32> + ret <4 x i8> %v4i8 +} + +define <4 x i8> @v4i8(<4 x i8> %a) { +; CHECK-LABEL: v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 3 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v4i8 = shufflevector <4 x i8> %a, <4 x i8> undef, <4 x i32> + ret <4 x i8> %v4i8 +} + +define <8 x i8> @v4i8_2(<4 x i8> %a, <4 x i8> %b) { +; CHECK-LABEL: v4i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vid.v v11 +; CHECK-NEXT: vrsub.vi v12, v11, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v11, 3 +; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret + %v8i8 = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> + ret <8 x i8> %v8i8 +} + +define <8 x i8> @v8i8(<8 x i8> %a) { +; CHECK-LABEL: v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 7 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v8i8 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> + ret <8 x i8> %v8i8 +} + +define <16 x i8> @v8i8_2(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: v8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vid.v v11 +; CHECK-NEXT: vrsub.vi v12, v11, 15 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: li a0, 255 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vrsub.vi v8, v11, 7 +; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v16i8 = shufflevector <8 x i8> %a, <8 x i8> %b, <16 x i32> + ret <16 x i8> %v16i8 +} + +define <16 x i8> @v16i8(<16 x i8> %a) { +; CHECK-LABEL: v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 15 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v16i8 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> + ret <16 x i8> %v16i8 +} + +define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) { + %v32i8 = shufflevector <16 x i8> %a, <16 x i8> %b, <32 x i32> + ret <32 x i8> %v32i8 +} + +define <2 x i16> @v2i16(<2 x i16> %a) { +; CHECK-LABEL: v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2i16 = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> + ret <2 x i16> %v2i16 +} + +define <4 x i16> @v2i16_2(<2 x i16> %a, <2 x i16> %b) { +; CHECK-LABEL: v2i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v8, v10, 2 +; CHECK-NEXT: ret + %v4i16 = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> + ret <4 x i16> %v4i16 +} + +define <4 x i16> @v4i16(<4 x i16> %a) { +; CHECK-LABEL: v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 3 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v4i16 = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> + ret <4 x i16> %v4i16 +} + +define <8 x i16> @v4i16_2(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: v4i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v11 +; CHECK-NEXT: vrsub.vi v12, v11, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v11, 3 +; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8i16 = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> + ret <8 x i16> %v8i16 +} + +define <8 x i16> @v8i16(<8 x i16> %a) { +; CHECK-LABEL: v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 7 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v8i16 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> + ret <8 x i16> %v8i16 +} + +define <16 x i16> @v8i16_2(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: v8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v9 +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2 +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vid.v v14 +; CHECK-NEXT: vrsub.vi v16, v14, 15 +; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: li a0, 255 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v14, 7 +; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v16i16 = shufflevector <8 x i16> %a, <8 x i16> %b, <16 x i32> + ret <16 x i16> %v16i16 +} + +define <16 x i16> @v16i16(<16 x i16> %a) { +; CHECK-LABEL: v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vi v12, v10, 15 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v16i16 = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> + ret <16 x i16> %v16i16 +} + +define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) { + %v32i16 = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> + ret <32 x i16> %v32i16 +} + +define <2 x i32> @v2i32(<2 x i32> %a) { +; CHECK-LABEL: v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2i32 = shufflevector <2 x i32> %a, <2 x i32> undef, <2 x i32> + ret <2 x i32> %v2i32 +} + +define <4 x i32> @v2i32_2(<2 x i32> %a, < 2 x i32> %b) { +; CHECK-LABEL: v2i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vslideup.vi v8, v10, 2 +; CHECK-NEXT: ret + %v4i32 = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> + ret <4 x i32> %v4i32 +} + +define <4 x i32> @v4i32(<4 x i32> %a) { +; CHECK-LABEL: v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 3 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v4i32 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> + ret <4 x i32> %v4i32 +} + +define <8 x i32> @v4i32_2(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: v4i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v9 +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2 +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vid.v v14 +; CHECK-NEXT: vrsub.vi v16, v14, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v14, 3 +; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8i32 = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> + ret <8 x i32> %v8i32 +} + +define <8 x i32> @v8i32(<8 x i32> %a) { +; CHECK-LABEL: v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vi v12, v10, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8i32 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> + ret <8 x i32> %v8i32 +} + +define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) { +; CHECK-LABEL: v8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vid.v v20 +; CHECK-NEXT: vrsub.vi v24, v20, 15 +; CHECK-NEXT: vrgather.vv v12, v8, v24 +; CHECK-NEXT: li a0, 255 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v20, 7 +; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v16i32 = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> + ret <16 x i32> %v16i32 +} + +define <16 x i32> @v16i32(<16 x i32> %a) { +; CHECK-LABEL: v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vid.v v12 +; CHECK-NEXT: vrsub.vi v16, v12, 15 +; CHECK-NEXT: vrgather.vv v12, v8, v16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v16i32 = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> + ret <16 x i32> %v16i32 +} + +define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) { + %v32i32 = shufflevector <16 x i32> %a, <16 x i32> %b, <32 x i32> + ret <32 x i32> %v32i32 +} + +define <2 x i64> @v2i64(<2 x i64> %a) { +; CHECK-LABEL: v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2i64 = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> + ret <2 x i64> %v2i64 +} + +define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) { +; CHECK-LABEL: v2i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v12, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v12, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v10, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vslideup.vi v10, v12, 2 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v4i64 = shufflevector <2 x i64> %a, <2 x i64> %b, <4 x i32> + ret <4 x i64> %v4i64 +} + +define <4 x i64> @v4i64(<4 x i64> %a) { + %v4i64 = shufflevector <4 x i64> %a, <4 x i64> undef, <4 x i32> + ret <4 x i64> %v4i64 +} + +define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) { + %v8i64 = shufflevector <4 x i64> %a, <4 x i64> %b, <8 x i32> + ret <8 x i64> %v8i64 +} + +define <2 x half> @v2f16(<2 x half> %a) { +; CHECK-LABEL: v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2f16 = shufflevector <2 x half> %a, <2 x half> undef, <2 x i32> + ret <2 x half> %v2f16 +} + +define <4 x half> @v2f16_2(<2 x half> %a, <2 x half> %b) { +; CHECK-LABEL: v2f16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vslideup.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v8, v10, 2 +; CHECK-NEXT: ret + %v4f16 = shufflevector <2 x half> %a, <2 x half> %b, <4 x i32> + ret <4 x half> %v4f16 +} + +define <4 x half> @v4f16(<4 x half> %a) { +; CHECK-LABEL: v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 3 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v4f16 = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> + ret <4 x half> %v4f16 +} + +define <8 x half> @v4f16_2(<4 x half> %a, <4 x half> %b) { +; CHECK-LABEL: v4f16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v11 +; CHECK-NEXT: vrsub.vi v12, v11, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v11, 3 +; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8f16 = shufflevector <4 x half> %a, <4 x half> %b, <8 x i32> + ret <8 x half> %v8f16 +} + +define <8 x half> @v8f16(<8 x half> %a) { +; CHECK-LABEL: v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 7 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v8f16 = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> + ret <8 x half> %v8f16 +} + +define <16 x half> @v8f16_2(<8 x half> %a, <8 x half> %b) { +; CHECK-LABEL: v8f16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v9 +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2 +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vid.v v14 +; CHECK-NEXT: vrsub.vi v16, v14, 15 +; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: li a0, 255 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v14, 7 +; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v16f16 = shufflevector <8 x half> %a, <8 x half> %b, <16 x i32> + ret <16 x half> %v16f16 +} + +define <16 x half> @v16f16(<16 x half> %a) { +; CHECK-LABEL: v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vi v12, v10, 15 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v16f16 = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> + ret <16 x half> %v16f16 +} + +define <32 x half> @v16f16_2(<16 x half> %a) { +; CHECK-LABEL: v16f16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 +; CHECK-NEXT: lui a0, %hi(.LCPI35_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0) +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vle16.v v12, (a0) +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vsetivli zero, 16, e16, m4, tu, mu +; CHECK-NEXT: vslideup.vi v16, v8, 0 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vrgather.vv v8, v16, v12 +; CHECK-NEXT: ret + %v32f16 = shufflevector <16 x half> %a, <16 x half> undef, <32 x i32> + ret <32 x half> %v32f16 +} + +define <2 x float> @v2f32(<2 x float> %a) { +; CHECK-LABEL: v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2f32 = shufflevector <2 x float> %a, <2 x float> undef, <2 x i32> + ret <2 x float> %v2f32 +} + +define <4 x float> @v2f32_2(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: v2f32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v10, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vslideup.vi v8, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vslideup.vi v8, v10, 2 +; CHECK-NEXT: ret + %v4f32 = shufflevector <2 x float> %a, <2 x float> %b, <4 x i32> + ret <4 x float> %v4f32 +} + +define <4 x float> @v4f32(<4 x float> %a) { +; CHECK-LABEL: v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vid.v v9 +; CHECK-NEXT: vrsub.vi v10, v9, 3 +; CHECK-NEXT: vrgather.vv v9, v8, v10 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v4f32 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> + ret <4 x float> %v4f32 +} + +define <8 x float> @v4f32_2(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: v4f32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv1r.v v12, v9 +; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m2 +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vid.v v14 +; CHECK-NEXT: vrsub.vi v16, v14, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v16 +; CHECK-NEXT: li a0, 15 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v14, 3 +; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8f32 = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> + ret <8 x float> %v8f32 +} + +define <8 x float> @v8f32(<8 x float> %a) { +; CHECK-LABEL: v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vid.v v10 +; CHECK-NEXT: vrsub.vi v12, v10, 7 +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v8f32 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> + ret <8 x float> %v8f32 +} + +define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) { +; CHECK-LABEL: v8f32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vid.v v20 +; CHECK-NEXT: vrsub.vi v24, v20, 15 +; CHECK-NEXT: vrgather.vv v12, v8, v24 +; CHECK-NEXT: li a0, 255 +; CHECK-NEXT: vmv.s.x v0, a0 +; CHECK-NEXT: vrsub.vi v8, v20, 7 +; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v16f32 = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> + ret <16 x float> %v16f32 +} + +define <2 x double> @v2f64(<2 x double> %a) { +; CHECK-LABEL: v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v9, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v2f64 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> + ret <2 x double> %v2f64 +} + +define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) { +; CHECK-LABEL: v2f64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v12, v8, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v12, v8, 1 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v10, v9, 1 +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vslideup.vi v10, v9, 1 +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vslideup.vi v10, v12, 2 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret + %v4f64 = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> + ret <4 x double> %v4f64 +} + +define <4 x double> @v4f64(<4 x double> %a) { + %v4f64 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> + ret <4 x double> %v4f64 +} + +define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) { + %v8f64 = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> + ret <8 x double> %v8f64 +} + +define <32 x i8> @v32i8(<32 x i8> %a) { +; CHECK-LABEL: v32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI46_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0) +; CHECK-NEXT: li a1, 32 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vle8.v v12, (a0) +; CHECK-NEXT: vrgather.vv v10, v8, v12 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v32i8 = shufflevector <32 x i8> %a, <32 x i8> undef, <32 x i32> + ret <32 x i8> %v32i8 +} +