diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -955,6 +955,7 @@ SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBitreverse(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1025,6 +1025,8 @@ setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); setOperationAction(ISD::BITREVERSE, MVT::v8i8, Legal); setOperationAction(ISD::BITREVERSE, MVT::v16i8, Legal); + setOperationAction(ISD::BITREVERSE, MVT::v4i32, Custom); + setOperationAction(ISD::BITREVERSE, MVT::v2i64, Custom); // AArch64 doesn't have MUL.2d: setOperationAction(ISD::MUL, MVT::v2i64, Expand); @@ -4687,8 +4689,7 @@ case ISD::ABS: return LowerABS(Op, DAG); case ISD::BITREVERSE: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU, - /*OverrideNEON=*/true); + return LowerBitreverse(Op, DAG); case ISD::BSWAP: return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU); case ISD::CTLZ: @@ -6861,6 +6862,27 @@ return DAG.getNode(ISD::CTLZ, DL, VT, RBIT); } +SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + + if (VT.isScalableVector() || + useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)) { + return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU, + true); + } + + SDLoc DL(Op); + SDValue REVB; + if (VT.getScalarSizeInBits() == 32) + REVB = DAG.getNode(AArch64ISD::REV32, DL, MVT::v16i8, Op.getOperand(0)); + if (VT.getScalarSizeInBits() == 64) + REVB = DAG.getNode(AArch64ISD::REV64, DL, MVT::v16i8, Op.getOperand(0)); + SDValue Bitreverse = DAG.getNode(ISD::BITREVERSE, DL, MVT::v16i8, REVB); + + return DAG.getNode(AArch64ISD::NVCAST, DL, VT, Bitreverse); +} + SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType().isVector()) diff --git a/llvm/test/CodeGen/AArch64/bitreverse.ll b/llvm/test/CodeGen/AArch64/bitreverse.ll --- a/llvm/test/CodeGen/AArch64/bitreverse.ll +++ b/llvm/test/CodeGen/AArch64/bitreverse.ll @@ -137,18 +137,8 @@ define <4 x i32> @g_vec_4x32(<4 x i32> %a) { ; CHECK-LABEL: g_vec_4x32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: rbit w10, w10 -; CHECK-NEXT: mov w9, v0.s[2] -; CHECK-NEXT: mov w11, v0.s[3] -; CHECK-NEXT: fmov s0, w10 -; CHECK-NEXT: rbit w8, w8 -; CHECK-NEXT: rbit w9, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: mov v0.s[2], w9 -; CHECK-NEXT: rbit w8, w11 -; CHECK-NEXT: mov v0.s[3], w8 +; CHECK-NEXT: rev32 v0.16b, v0.16b +; CHECK-NEXT: rbit v0.16b, v0.16b ; CHECK-NEXT: ret %b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a) ret <4 x i32> %b @@ -173,12 +163,8 @@ define <2 x i64> @g_vec_2x64(<2 x i64> %a) { ; CHECK-LABEL: g_vec_2x64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: rbit x8, x8 -; CHECK-NEXT: mov x9, v0.d[1] -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: rbit x8, x9 -; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: rev64 v0.16b, v0.16b +; CHECK-NEXT: rbit v0.16b, v0.16b ; CHECK-NEXT: ret %b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a) ret <2 x i64> %b diff --git a/llvm/test/CodeGen/AArch64/neon_rbit.ll b/llvm/test/CodeGen/AArch64/neon_rbit.ll --- a/llvm/test/CodeGen/AArch64/neon_rbit.ll +++ b/llvm/test/CodeGen/AArch64/neon_rbit.ll @@ -73,18 +73,8 @@ define <4 x i32> @rbit_4x32(<4 x i32> %A) { ; CHECK-LABEL: rbit_4x32: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov w10, s0 -; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: rbit w10, w10 -; CHECK-NEXT: mov w9, v0.s[2] -; CHECK-NEXT: mov w11, v0.s[3] -; CHECK-NEXT: fmov s0, w10 -; CHECK-NEXT: rbit w8, w8 -; CHECK-NEXT: rbit w9, w9 -; CHECK-NEXT: mov v0.s[1], w8 -; CHECK-NEXT: mov v0.s[2], w9 -; CHECK-NEXT: rbit w8, w11 -; CHECK-NEXT: mov v0.s[3], w8 +; CHECK-NEXT: rev32 v0.16b, v0.16b +; CHECK-NEXT: rbit v0.16b, v0.16b ; CHECK-NEXT: ret %tmp3 = call <4 x i32> @llvm.aarch64.neon.rbit.v4i32(<4 x i32> %A) ret <4 x i32> %tmp3 @@ -109,12 +99,8 @@ define <2 x i64> @rbit_2x64(<2 x i64> %A) { ; CHECK-LABEL: rbit_2x64: ; CHECK: // %bb.0: -; CHECK-NEXT: fmov x8, d0 -; CHECK-NEXT: rbit x8, x8 -; CHECK-NEXT: mov x9, v0.d[1] -; CHECK-NEXT: fmov d0, x8 -; CHECK-NEXT: rbit x8, x9 -; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: rev64 v0.16b, v0.16b +; CHECK-NEXT: rbit v0.16b, v0.16b ; CHECK-NEXT: ret %tmp3 = call <2 x i64> @llvm.aarch64.neon.rbit.v2i64(<2 x i64> %A) ret <2 x i64> %tmp3