diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4118,6 +4118,56 @@ return Interleaved; } +// If we have a vector of bits that we want to reverse, we can use a vbrev on a +// larger element type, e.g. v32i1 can be reversed with a v1i32 bitreverse. +static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN, + SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + SDLoc DL(SVN); + MVT VT = SVN->getSimpleValueType(0); + SDValue V = SVN->getOperand(0); + unsigned NumElts = VT.getVectorNumElements(); + + assert(VT.getVectorElementType() == MVT::i1); + + if (!ShuffleVectorInst::isReverseMask(SVN->getMask()) || + !SVN->getOperand(1).isUndef()) + return SDValue(); + + unsigned ViaEltSize = std::max((uint64_t)8, PowerOf2Ceil(NumElts)); + MVT ViaVT = MVT::getVectorVT(MVT::getIntegerVT(ViaEltSize), 1); + MVT ViaBitVT = MVT::getVectorVT(MVT::i1, ViaVT.getScalarSizeInBits()); + + // If we don't have zvbb or the larger element type > ELEN, the operation will + // be illegal. + if (!Subtarget.getTargetLowering()->isOperationLegalOrCustom(ISD::BITREVERSE, + ViaVT)) + return SDValue(); + + // If the bit vector doesn't fit exactly into the larger element type, we need + // to insert it into the larger vector and then shift up the reversed bits + // afterwards to get rid of the gap introduced. + if (ViaEltSize > NumElts) + V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ViaBitVT, DAG.getUNDEF(ViaBitVT), + V, DAG.getVectorIdxConstant(0, DL)); + + SDValue Res = + DAG.getNode(ISD::BITREVERSE, DL, ViaVT, DAG.getBitcast(ViaVT, V)); + + // Shift up the reversed bits if the vector didn't exactly fit into the larger + // element type. + if (ViaEltSize > NumElts) + Res = DAG.getNode(ISD::SRL, DL, ViaVT, Res, + DAG.getConstant(ViaEltSize - NumElts, DL, ViaVT)); + + Res = DAG.getBitcast(ViaBitVT, Res); + + if (ViaEltSize > NumElts) + Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, + DAG.getVectorIdxConstant(0, DL)); + return Res; +} + static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget) { SDValue V1 = Op.getOperand(0); @@ -4128,8 +4178,11 @@ unsigned NumElts = VT.getVectorNumElements(); ShuffleVectorSDNode *SVN = cast(Op.getNode()); - // Promote i1 shuffle to i8 shuffle. if (VT.getVectorElementType() == MVT::i1) { + if (SDValue V = lowerBitreverseShuffle(SVN, DAG, Subtarget)) + return V; + + // Promote i1 shuffle to i8 shuffle. MVT WidenVT = MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()); V1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, V1); V2 = V2.isUndef() ? DAG.getUNDEF(WidenVT) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll @@ -1,104 +1,144 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-UNKNOWN -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-256 -; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32-BITS-512 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-UNKNOWN -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-256 -; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64-BITS-512 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-UNKNOWN +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-256 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV32-BITS-512 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-UNKNOWN +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-256 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=512 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,NO-ZVBB,RV64-BITS-512 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+zvfh,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVBB ; ; VECTOR_REVERSE - masks ; define <2 x i1> @reverse_v2i1(<2 x i1> %a) { -; CHECK-LABEL: reverse_v2i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vmsne.vi v0, v9, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v2i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vslidedown.vi v9, v8, 1 +; NO-ZVBB-NEXT: vslideup.vi v9, v8, 1 +; NO-ZVBB-NEXT: vmsne.vi v0, v9, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v2i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v8, v0 +; ZVBB-NEXT: vsrl.vi v0, v8, 6 +; ZVBB-NEXT: ret %res = call <2 x i1> @llvm.experimental.vector.reverse.v2i1(<2 x i1> %a) ret <2 x i1> %res } define <4 x i1> @reverse_v4i1(<4 x i1> %a) { -; CHECK-LABEL: reverse_v4i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 3 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v4i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 3 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v4i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v8, v0 +; ZVBB-NEXT: vsrl.vi v0, v8, 4 +; ZVBB-NEXT: ret %res = call <4 x i1> @llvm.experimental.vector.reverse.v4i1(<4 x i1> %a) ret <4 x i1> %res } define <8 x i1> @reverse_v8i1(<8 x i1> %a) { -; CHECK-LABEL: reverse_v8i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 7 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v8i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 7 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v8i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <8 x i1> @llvm.experimental.vector.reverse.v8i1(<8 x i1> %a) ret <8 x i1> %res } define <16 x i1> @reverse_v16i1(<16 x i1> %a) { -; CHECK-LABEL: reverse_v16i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vid.v v9 -; CHECK-NEXT: vrsub.vi v9, v9, 15 -; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v16i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; NO-ZVBB-NEXT: vmv.v.i v8, 0 +; NO-ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 +; NO-ZVBB-NEXT: vid.v v9 +; NO-ZVBB-NEXT: vrsub.vi v9, v9, 15 +; NO-ZVBB-NEXT: vrgather.vv v10, v8, v9 +; NO-ZVBB-NEXT: vmsne.vi v0, v10, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v16i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <16 x i1> @llvm.experimental.vector.reverse.v16i1(<16 x i1> %a) ret <16 x i1> %res } define <32 x i1> @reverse_v32i1(<32 x i1> %a) { -; CHECK-LABEL: reverse_v32i1: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) -; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vrgather.vv v12, v10, v8 -; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v32i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: lui a0, %hi(.LCPI4_0) +; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI4_0) +; NO-ZVBB-NEXT: li a1, 32 +; NO-ZVBB-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; NO-ZVBB-NEXT: vle8.v v8, (a0) +; NO-ZVBB-NEXT: vmv.v.i v10, 0 +; NO-ZVBB-NEXT: vmerge.vim v10, v10, 1, v0 +; NO-ZVBB-NEXT: vrgather.vv v12, v10, v8 +; NO-ZVBB-NEXT: vmsne.vi v0, v12, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v32i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <32 x i1> @llvm.experimental.vector.reverse.v32i1(<32 x i1> %a) ret <32 x i1> %res } define <64 x i1> @reverse_v64i1(<64 x i1> %a) { -; CHECK-LABEL: reverse_v64i1: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI5_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0) -; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmv.v.i v12, 0 -; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vrgather.vv v16, v12, v8 -; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: ret +; NO-ZVBB-LABEL: reverse_v64i1: +; NO-ZVBB: # %bb.0: +; NO-ZVBB-NEXT: lui a0, %hi(.LCPI5_0) +; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI5_0) +; NO-ZVBB-NEXT: li a1, 64 +; NO-ZVBB-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; NO-ZVBB-NEXT: vle8.v v8, (a0) +; NO-ZVBB-NEXT: vmv.v.i v12, 0 +; NO-ZVBB-NEXT: vmerge.vim v12, v12, 1, v0 +; NO-ZVBB-NEXT: vrgather.vv v16, v12, v8 +; NO-ZVBB-NEXT: vmsne.vi v0, v16, 0 +; NO-ZVBB-NEXT: ret +; +; ZVBB-LABEL: reverse_v64i1: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; ZVBB-NEXT: vbrev.v v0, v0 +; ZVBB-NEXT: ret %res = call <64 x i1> @llvm.experimental.vector.reverse.v64i1(<64 x i1> %a) ret <64 x i1> %res }