diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4118,6 +4118,56 @@ return Interleaved; } +// If we have a vector of bits that we want to reverse, we can use a vbrev on a +// larger element type, e.g. v32i1 can be reversed with a v1i32 bitreverse. +static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN, + SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + SDLoc DL(SVN); + MVT VT = SVN->getSimpleValueType(0); + SDValue V = SVN->getOperand(0); + unsigned NumElts = VT.getVectorNumElements(); + + assert(VT.getVectorElementType() == MVT::i1); + + if (!ShuffleVectorInst::isReverseMask(SVN->getMask()) || + !SVN->getOperand(1).isUndef()) + return SDValue(); + + unsigned ViaEltSize = std::max((uint64_t)8, PowerOf2Ceil(NumElts)); + MVT ViaVT = MVT::getVectorVT(MVT::getIntegerVT(ViaEltSize), 1); + MVT ViaBitVT = MVT::getVectorVT(MVT::i1, ViaVT.getScalarSizeInBits()); + + // If we don't have zvbb or the larger element type > ELEN, the operation will + // be illegal. + if (!Subtarget.getTargetLowering()->isOperationLegalOrCustom(ISD::BITREVERSE, + ViaVT)) + return SDValue(); + + // If the bit vector doesn't fit exactly into the larger element type, we need + // to insert it into the larger vector and then shift up the reversed bits + // afterwards to get rid of the gap introduced. + if (ViaEltSize > NumElts) + V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ViaBitVT, DAG.getUNDEF(ViaBitVT), + V, DAG.getVectorIdxConstant(0, DL)); + + SDValue Res = + DAG.getNode(ISD::BITREVERSE, DL, ViaVT, DAG.getBitcast(ViaVT, V)); + + // Shift up the reversed bits if the vector didn't exactly fit into the larger + // element type. + if (ViaEltSize > NumElts) + Res = DAG.getNode(ISD::SRL, DL, ViaVT, Res, + DAG.getConstant(ViaEltSize - NumElts, DL, ViaVT)); + + Res = DAG.getBitcast(ViaBitVT, Res); + + if (ViaEltSize > NumElts) + Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, + DAG.getVectorIdxConstant(0, DL)); + return Res; +} + static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { SDValue V1 = Op.getOperand(0); @@ -4128,8 +4178,11 @@ unsigned NumElts = VT.getVectorNumElements(); ShuffleVectorSDNode *SVN = cast(Op.getNode()); - // Promote i1 shuffle to i8 shuffle. if (VT.getVectorElementType() == MVT::i1) { + if (SDValue V = lowerBitreverseShuffle(SVN, DAG, Subtarget)) + return V; + + // Promote i1 shuffle to i8 shuffle. MVT WidenVT = MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()); V1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V1); V2 = V2.isUndef() ? DAG.getUNDEF(WidenVT) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll @@ -1,108 +1,164 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-UNKNOWN -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-256 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-512 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-UNKNOWN -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-256 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-512 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-UNKNOWN +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-256 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-512 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-UNKNOWN +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-256 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-512 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB,RV32-ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB,RV64-ZVBB ; ; VECTOR_REVERSE - masks ; define <2 x i1> @reverse_v2i1(<2 x i1> %a) { -; CHECK-LABEL: reverse_v2i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vmsne.vi v0, v9, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v2i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1 +; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1 +; NO-ZVBB-NEXT: vmsne.vi v0, v9, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v2i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v8, v0 +; ZVBB-NEXT: vsrl.vi v0, v8, 6 +; ZVBB-NEXT: ret %res = call <2 x i1> @llvm.experimental.vector.reverse.v2i1(<2 x i1> %a) ret <2 x i1> %res } define <4 x i1> @reverse_v4i1(<4 x i1> %a) { -; CHECK-LABEL: reverse_v4i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 3 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v4i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 3 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v4i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v8, v0 +; ZVBB-NEXT: vsrl.vi v0, v8, 4 +; ZVBB-NEXT: ret %res = call <4 x i1> @llvm.experimental.vector.reverse.v4i1(<4 x i1> %a) ret <4 x i1> %res } define <8 x i1> @reverse_v8i1(<8 x i1> %a) { -; CHECK-LABEL: reverse_v8i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 7 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v8i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 7 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v8i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <8 x i1> @llvm.experimental.vector.reverse.v8i1(<8 x i1> %a) ret <8 x i1> %res } define <16 x i1> @reverse_v16i1(<16 x i1> %a) { -; CHECK-LABEL: reverse_v16i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 15 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v16i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 15 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v16i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <16 x i1> @llvm.experimental.vector.reverse.v16i1(<16 x i1> %a) ret <16 x i1> %res } define <32 x i1> @reverse_v32i1(<32 x i1> %a) { -; CHECK-LABEL: reverse_v32i1: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) -; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vrgather.vv v12, v10, v8 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v32i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: lui a0, %hi(.LCPI4_0) +; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI4_0) +; NO-ZVBB-NEXT: li a1, 32 +; NO-ZVBB-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; NO-ZVBB-NEXT: vle8.v v8, (a0) +; NO-ZVBB-NEXT: vmv.v.i v10, 0 +; NO-ZVBB-NEXT: vmerge.vim v10, v10, 1, v0 +; NO-ZVBB-NEXT: vrgather.vv v12, v10, v8 +; NO-ZVBB-NEXT: vmsne.vi v0, v12, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v32i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <32 x i1> @llvm.experimental.vector.reverse.v32i1(<32 x i1> %a) ret <32 x i1> %res } define <64 x i1> @reverse_v64i1(<64 x i1> %a) { -; CHECK-LABEL: reverse_v64i1: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0) -; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vrgather.vv v16, v12, v8 -; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v64i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: lui a0, %hi(.LCPI5_0) +; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI5_0) +; NO-ZVBB-NEXT: li a1, 64 +; NO-ZVBB-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; NO-ZVBB-NEXT: vle8.v v8, (a0) +; NO-ZVBB-NEXT: vmv.v.i v12, 0 +; NO-ZVBB-NEXT: vmerge.vim v12, v12, 1, v0 +; NO-ZVBB-NEXT: vrgather.vv v16, v12, v8 +; NO-ZVBB-NEXT: vmsne.vi v0, v16, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v64i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <64 x i1> @llvm.experimental.vector.reverse.v64i1(<64 x i1> %a) ret <64 x i1> %res } +define <128 x i1> @reverse_v128i1(<128 x i1> %a) { +; CHECK-LABEL: reverse_v128i1: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI6_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) +; CHECK-NEXT: li a1, 128 +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmv.v.i v16, 0 +; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 +; CHECK-NEXT: vrgather.vv v24, v16, v8 +; CHECK-NEXT: vmsne.vi v0, v24, 0 +; CHECK-NEXT: ret + %res = call <128 x i1> @llvm.experimental.vector.reverse.v128i1(<128 x i1> %a) + ret <128 x i1> %res +} define <1 x i8> @reverse_v1i8(<1 x i8> %a) { ; CHECK-LABEL: reverse_v1i8: @@ -166,8 +222,8 @@ define <32 x i8> @reverse_v32i8(<32 x i8> %a) { ; CHECK-LABEL: reverse_v32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI11_0) +; CHECK-NEXT: lui a0, %hi(.LCPI12_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) @@ -181,8 +237,8 @@ define <64 x i8> @reverse_v64i8(<64 x i8> %a) { ; CHECK-LABEL: reverse_v64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) +; CHECK-NEXT: lui a0, %hi(.LCPI13_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI13_0) ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) @@ -255,8 +311,8 @@ define <32 x i16> @reverse_v32i16(<32 x i16> %a) { ; CHECK-LABEL: reverse_v32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI18_0) +; CHECK-NEXT: lui a0, %hi(.LCPI19_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0) ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) @@ -403,6 +459,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v4i64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-ZVBB-NEXT: vid.v v10 +; RV32-ZVBB-NEXT: vrsub.vi v12, v10, 3 +; RV32-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-ZVBB-NEXT: vrgatherei16.vv v10, v8, v12 +; RV32-ZVBB-NEXT: vmv.v.v v8, v10 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v4i64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-ZVBB-NEXT: vid.v v10 +; RV64-ZVBB-NEXT: vrsub.vi v12, v10, 3 +; RV64-ZVBB-NEXT: vrgather.vv v10, v8, v12 +; RV64-ZVBB-NEXT: vmv.v.v v8, v10 +; RV64-ZVBB-NEXT: ret %res = call <4 x i64> @llvm.experimental.vector.reverse.v4i64(<4 x i64> %a) ret <4 x i64> %res } @@ -464,6 +539,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v8i64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-ZVBB-NEXT: vid.v v12 +; RV32-ZVBB-NEXT: vrsub.vi v16, v12, 7 +; RV32-ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 +; RV32-ZVBB-NEXT: vmv.v.v v8, v12 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v8i64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-ZVBB-NEXT: vid.v v12 +; RV64-ZVBB-NEXT: vrsub.vi v16, v12, 7 +; RV64-ZVBB-NEXT: vrgather.vv v12, v8, v16 +; RV64-ZVBB-NEXT: vmv.v.v v8, v12 +; RV64-ZVBB-NEXT: ret %res = call <8 x i64> @llvm.experimental.vector.reverse.v8i64(<8 x i64> %a) ret <8 x i64> %res } @@ -531,8 +625,8 @@ define <32 x half> @reverse_v32f16(<32 x half> %a) { ; CHECK-LABEL: reverse_v32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI33_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI33_0) +; CHECK-NEXT: lui a0, %hi(.LCPI34_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0) ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) @@ -679,6 +773,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v4f64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-ZVBB-NEXT: vid.v v10 +; RV32-ZVBB-NEXT: vrsub.vi v12, v10, 3 +; RV32-ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma +; RV32-ZVBB-NEXT: vrgatherei16.vv v10, v8, v12 +; RV32-ZVBB-NEXT: vmv.v.v v8, v10 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v4f64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-ZVBB-NEXT: vid.v v10 +; RV64-ZVBB-NEXT: vrsub.vi v12, v10, 3 +; RV64-ZVBB-NEXT: vrgather.vv v10, v8, v12 +; RV64-ZVBB-NEXT: vmv.v.v v8, v10 +; RV64-ZVBB-NEXT: ret %res = call <4 x double> @llvm.experimental.vector.reverse.v4f64(<4 x double> %a) ret <4 x double> %res } @@ -740,6 +853,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v8f64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-ZVBB-NEXT: vid.v v12 +; RV32-ZVBB-NEXT: vrsub.vi v16, v12, 7 +; RV32-ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma +; RV32-ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 +; RV32-ZVBB-NEXT: vmv.v.v v8, v12 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v8f64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-ZVBB-NEXT: vid.v v12 +; RV64-ZVBB-NEXT: vrsub.vi v16, v12, 7 +; RV64-ZVBB-NEXT: vrgather.vv v12, v8, v16 +; RV64-ZVBB-NEXT: vmv.v.v v8, v12 +; RV64-ZVBB-NEXT: ret %res = call <8 x double> @llvm.experimental.vector.reverse.v8f64(<8 x double> %a) ret <8 x double> %res } @@ -748,8 +880,8 @@ define <3 x i64> @reverse_v3i64(<3 x i64> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v3i64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI43_0) -; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI43_0) +; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI44_0) +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI44_0) ; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v12, (a0) ; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v10, v8, v12 @@ -758,8 +890,8 @@ ; ; RV32-BITS-256-LABEL: reverse_v3i64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI43_0) -; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI43_0) +; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI44_0) +; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI44_0) ; RV32-BITS-256-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-256-NEXT: vle32.v v12, (a0) ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -768,8 +900,8 @@ ; ; RV32-BITS-512-LABEL: reverse_v3i64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI43_0) -; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI43_0) +; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI44_0) +; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI44_0) ; RV32-BITS-512-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-512-NEXT: vle32.v v12, (a0) ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -802,6 +934,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV64-BITS-512-NEXT: vmv.v.v v8, v10 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v3i64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI44_0) +; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI44_0) +; RV32-ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-ZVBB-NEXT: vle32.v v12, (a0) +; RV32-ZVBB-NEXT: vrgather.vv v10, v8, v12 +; RV32-ZVBB-NEXT: vmv.v.v v8, v10 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v3i64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-ZVBB-NEXT: vid.v v10 +; RV64-ZVBB-NEXT: vrsub.vi v12, v10, 2 +; RV64-ZVBB-NEXT: vrgather.vv v10, v8, v12 +; RV64-ZVBB-NEXT: vmv.v.v v8, v10 +; RV64-ZVBB-NEXT: ret %res = call <3 x i64> @llvm.experimental.vector.reverse.v3i64(<3 x i64> %a) ret <3 x i64> %res } @@ -809,8 +960,8 @@ define <6 x i64> @reverse_v6i64(<6 x i64> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v6i64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI44_0) -; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI44_0) +; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI45_0) +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v16, (a0) ; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v12, v8, v16 @@ -819,8 +970,8 @@ ; ; RV32-BITS-256-LABEL: reverse_v6i64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI44_0) -; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI44_0) +; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI45_0) +; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-256-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-256-NEXT: vle32.v v16, (a0) ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -829,8 +980,8 @@ ; ; RV32-BITS-512-LABEL: reverse_v6i64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI44_0) -; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI44_0) +; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI45_0) +; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-512-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-512-NEXT: vle32.v v16, (a0) ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -863,6 +1014,25 @@ ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV64-BITS-512-NEXT: vmv.v.v v8, v12 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v6i64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI45_0) +; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI45_0) +; RV32-ZVBB-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-ZVBB-NEXT: vle32.v v16, (a0) +; RV32-ZVBB-NEXT: vrgather.vv v12, v8, v16 +; RV32-ZVBB-NEXT: vmv.v.v v8, v12 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v6i64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; RV64-ZVBB-NEXT: vid.v v12 +; RV64-ZVBB-NEXT: vrsub.vi v16, v12, 5 +; RV64-ZVBB-NEXT: vrgather.vv v12, v8, v16 +; RV64-ZVBB-NEXT: vmv.v.v v8, v12 +; RV64-ZVBB-NEXT: ret %res = call <6 x i64> @llvm.experimental.vector.reverse.v6i64(<6 x i64> %a) ret <6 x i64> %res } @@ -870,8 +1040,8 @@ define <12 x i64> @reverse_v12i64(<12 x i64> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v12i64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI45_0) -; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI45_0) +; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI46_0) +; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI46_0) ; RV32-BITS-UNKNOWN-NEXT: li a1, 32 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v24, (a0) @@ -881,8 +1051,8 @@ ; ; RV32-BITS-256-LABEL: reverse_v12i64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI45_0) -; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI45_0) +; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI46_0) +; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI46_0) ; RV32-BITS-256-NEXT: li a1, 32 ; RV32-BITS-256-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-256-NEXT: vle32.v v24, (a0) @@ -892,8 +1062,8 @@ ; ; RV32-BITS-512-LABEL: reverse_v12i64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI45_0) -; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI45_0) +; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI46_0) +; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI46_0) ; RV32-BITS-512-NEXT: li a1, 32 ; RV32-BITS-512-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-512-NEXT: vle32.v v24, (a0) @@ -927,6 +1097,26 @@ ; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v24 ; RV64-BITS-512-NEXT: vmv.v.v v8, v16 ; RV64-BITS-512-NEXT: ret +; +; RV32-ZVBB-LABEL: reverse_v12i64: +; RV32-ZVBB: # %bb.0: +; RV32-ZVBB-NEXT: lui a0, %hi(.LCPI46_0) +; RV32-ZVBB-NEXT: addi a0, a0, %lo(.LCPI46_0) +; RV32-ZVBB-NEXT: li a1, 32 +; RV32-ZVBB-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-ZVBB-NEXT: vle32.v v24, (a0) +; RV32-ZVBB-NEXT: vrgather.vv v16, v8, v24 +; RV32-ZVBB-NEXT: vmv.v.v v8, v16 +; RV32-ZVBB-NEXT: ret +; +; RV64-ZVBB-LABEL: reverse_v12i64: +; RV64-ZVBB: # %bb.0: +; RV64-ZVBB-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; RV64-ZVBB-NEXT: vid.v v16 +; RV64-ZVBB-NEXT: vrsub.vi v24, v16, 11 +; RV64-ZVBB-NEXT: vrgather.vv v16, v8, v24 +; RV64-ZVBB-NEXT: vmv.v.v v8, v16 +; RV64-ZVBB-NEXT: ret %res = call <12 x i64> @llvm.experimental.vector.reverse.v12i64(<12 x i64> %a) ret <12 x i64> %res } @@ -937,6 +1127,7 @@ declare <16 x i1> @llvm.experimental.vector.reverse.v16i1(<16 x i1>) declare <32 x i1> @llvm.experimental.vector.reverse.v32i1(<32 x i1>) declare <64 x i1> @llvm.experimental.vector.reverse.v64i1(<64 x i1>) +declare <128 x i1> @llvm.experimental.vector.reverse.v128i1(<128 x i1>) declare <1 x i8> @llvm.experimental.vector.reverse.v1i8(<1 x i8>) declare <2 x i8> @llvm.experimental.vector.reverse.v2i8(<2 x i8>) declare <4 x i8> @llvm.experimental.vector.reverse.v4i8(<4 x i8>)