diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1089,6 +1089,7 @@ SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const; + SDValue LowerAVG(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const; SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1293,12 +1293,10 @@ setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); - if (Subtarget->hasSVE2()) { - setOperationAction(ISD::AVGFLOORS, VT, Custom); - setOperationAction(ISD::AVGFLOORU, VT, Custom); - setOperationAction(ISD::AVGCEILS, VT, Custom); - setOperationAction(ISD::AVGCEILU, VT, Custom); - } + setOperationAction(ISD::AVGFLOORS, VT, Custom); + setOperationAction(ISD::AVGFLOORU, VT, Custom); + setOperationAction(ISD::AVGCEILS, VT, Custom); + setOperationAction(ISD::AVGCEILU, VT, Custom); } // Illegal unpacked integer vector types. @@ -6095,13 +6093,13 @@ case ISD::ABDU: return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED); case ISD::AVGFLOORS: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDS_PRED); + return LowerAVG(Op, DAG, AArch64ISD::HADDS_PRED); case ISD::AVGFLOORU: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDU_PRED); + return LowerAVG(Op, DAG, AArch64ISD::HADDU_PRED); case ISD::AVGCEILS: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDS_PRED); + return LowerAVG(Op, DAG, AArch64ISD::RHADDS_PRED); case ISD::AVGCEILU: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDU_PRED); + return LowerAVG(Op, DAG, AArch64ISD::RHADDU_PRED); case ISD::BITREVERSE: return LowerBitreverse(Op, DAG); case ISD::BSWAP: @@ -13395,6 +13393,81 @@ return Chain; } +static bool IsZeroExtended(SDValue Node) { + APInt SplatVal; + if (Node.getOpcode() == ISD::AND) + if (ISD::isConstantSplatVector(Node.getOperand(1).getNode(), SplatVal)) + if (SplatVal.isMask() && SplatVal.countTrailingOnes() < + Node->getValueType(0).getScalarSizeInBits()) + return true; + + return false; +} + +static bool IsSignExtended(SDValue Node) { + if (Node.getOpcode() != ISD::SIGN_EXTEND_INREG) + return false; + return cast(Node.getOperand(1))->getVT().getScalarSizeInBits() < + Node->getValueType(0).getScalarSizeInBits(); +} + +SDValue AArch64TargetLowering::LowerAVG(SDValue Op, SelectionDAG &DAG, + unsigned NewOp) const { + if (Subtarget->hasSVE2()) + return LowerToPredicatedOp(Op, DAG, NewOp); + + SDLoc dl(Op); + SDValue OpA = Op->getOperand(0); + SDValue OpB = Op->getOperand(1); + EVT VT = Op->getValueType(0); + SDValue ConstantOne = DAG.getConstant(1, dl, VT); + + bool IsCeil = false; + if ((Op->getOpcode() == ISD::AVGCEILS || Op->getOpcode() == ISD::AVGCEILU)) + IsCeil = true; + + bool IsSigned = false; + if ((Op->getOpcode() == ISD::AVGFLOORS || Op->getOpcode() == ISD::AVGCEILS)) + IsSigned = true; + + unsigned ShiftOpc = IsSigned ? ISD::SRA : ISD::SRL; + + assert(VT.isScalableVector() && "Only expect to lower scalable vector op!"); + + // Check if it's better to emit the original code: + if (IsZeroExtended(OpA) || IsZeroExtended(OpB)) { + // In this case omiting the original code is better than custom lowering + // AVGFloor/Ceil + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, OpA, OpB); + if (IsCeil) + Add = DAG.getNode(ISD::ADD, dl, VT, Add, ConstantOne); + return DAG.getNode(ISD::SRL, dl, VT, Add, ConstantOne); + } + + if (IsSignExtended(OpA) || IsSignExtended(OpB)) { + // In this case omiting the original code is better than custom lowering + // AVGFloor/Ceil + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, OpA, OpB); + if (IsCeil) + Add = DAG.getNode(ISD::ADD, dl, VT, Add, ConstantOne); + return DAG.getNode(ISD::SRA, dl, VT, Add, ConstantOne); + } + + SDValue ShiftOpA, ShiftOpB; + ShiftOpA = DAG.getNode(ShiftOpc, dl, VT, OpA, ConstantOne); + ShiftOpB = DAG.getNode(ShiftOpc, dl, VT, OpB, ConstantOne); + + SDValue tmp; + if (IsCeil) + tmp = DAG.getNode(ISD::OR, dl, VT, OpA, OpB); + else + tmp = DAG.getNode(ISD::AND, dl, VT, OpA, OpB); + + tmp = DAG.getNode(ISD::AND, dl, VT, tmp, ConstantOne); + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, ShiftOpA, ShiftOpB); + return DAG.getNode(ISD::ADD, dl, VT, Add, tmp); +} + SDValue AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { diff --git a/llvm/test/CodeGen/AArch64/sve2-hadd.ll b/llvm/test/CodeGen/AArch64/sve-hadd.ll rename from llvm/test/CodeGen/AArch64/sve2-hadd.ll rename to llvm/test/CodeGen/AArch64/sve-hadd.ll --- a/llvm/test/CodeGen/AArch64/sve2-hadd.ll +++ b/llvm/test/CodeGen/AArch64/sve-hadd.ll @@ -1,12 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple aarch64-none-eabi -mattr=+sve2 -o - | FileCheck %s +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE +; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+sve2 | FileCheck %s -check-prefixes=CHECK,SVE2 define @hadds_v2i64( %s0, %s1) { -; CHECK-LABEL: hadds_v2i64: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: shadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: hadds_v2i64: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.d, z1.d, #1 +; SVE-NEXT: asr z3.d, z0.d, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.d, z3.d, z2.d +; SVE-NEXT: and z0.d, z0.d, #0x1 +; SVE-NEXT: add z0.d, z1.d, z0.d +; SVE-NEXT: ret +; +; SVE2-LABEL: hadds_v2i64: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -17,11 +28,21 @@ } define @haddu_v2i64( %s0, %s1) { -; CHECK-LABEL: haddu_v2i64: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: uhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v2i64: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.d, z1.d, #1 +; SVE-NEXT: lsr z3.d, z0.d, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.d, z3.d, z2.d +; SVE-NEXT: and z0.d, z0.d, #0x1 +; SVE-NEXT: add z0.d, z1.d, z0.d +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v2i64: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -49,13 +70,20 @@ } define @haddu_v2i32( %s0, %s1) { -; CHECK-LABEL: haddu_v2i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff -; CHECK-NEXT: uhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v2i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: and z0.d, z0.d, #0xffffffff +; SVE-NEXT: adr z0.d, [z0.d, z1.d, uxtw] +; SVE-NEXT: lsr z0.d, z0.d, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v2i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: and z0.d, z0.d, #0xffffffff +; SVE2-NEXT: and z1.d, z1.d, #0xffffffff +; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -66,11 +94,21 @@ } define @hadds_v4i32( %s0, %s1) { -; CHECK-LABEL: hadds_v4i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: shadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: hadds_v4i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.s, z1.s, #1 +; SVE-NEXT: asr z3.s, z0.s, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.s, z3.s, z2.s +; SVE-NEXT: and z0.s, z0.s, #0x1 +; SVE-NEXT: add z0.s, z1.s, z0.s +; SVE-NEXT: ret +; +; SVE2-LABEL: hadds_v4i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -81,11 +119,21 @@ } define @haddu_v4i32( %s0, %s1) { -; CHECK-LABEL: haddu_v4i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: uhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v4i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.s, z1.s, #1 +; SVE-NEXT: lsr z3.s, z0.s, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.s, z3.s, z2.s +; SVE-NEXT: and z0.s, z0.s, #0x1 +; SVE-NEXT: add z0.s, z1.s, z0.s +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v4i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -115,13 +163,21 @@ } define @haddu_v2i16( %s0, %s1) { -; CHECK-LABEL: haddu_v2i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: and z0.d, z0.d, #0xffff -; CHECK-NEXT: and z1.d, z1.d, #0xffff -; CHECK-NEXT: uhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v2i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: and z0.d, z0.d, #0xffff +; SVE-NEXT: and z1.d, z1.d, #0xffff +; SVE-NEXT: add z0.d, z0.d, z1.d +; SVE-NEXT: lsr z0.d, z0.d, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v2i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: and z0.d, z0.d, #0xffff +; SVE2-NEXT: and z1.d, z1.d, #0xffff +; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -150,13 +206,21 @@ } define @haddu_v4i16( %s0, %s1) { -; CHECK-LABEL: haddu_v4i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: and z0.s, z0.s, #0xffff -; CHECK-NEXT: and z1.s, z1.s, #0xffff -; CHECK-NEXT: uhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v4i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: and z0.s, z0.s, #0xffff +; SVE-NEXT: and z1.s, z1.s, #0xffff +; SVE-NEXT: add z0.s, z0.s, z1.s +; SVE-NEXT: lsr z0.s, z0.s, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v4i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: and z0.s, z0.s, #0xffff +; SVE2-NEXT: and z1.s, z1.s, #0xffff +; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -167,11 +231,21 @@ } define @hadds_v8i16( %s0, %s1) { -; CHECK-LABEL: hadds_v8i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: shadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: hadds_v8i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.h, z1.h, #1 +; SVE-NEXT: asr z3.h, z0.h, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.h, z3.h, z2.h +; SVE-NEXT: and z0.h, z0.h, #0x1 +; SVE-NEXT: add z0.h, z1.h, z0.h +; SVE-NEXT: ret +; +; SVE2-LABEL: hadds_v8i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: shadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -182,11 +256,21 @@ } define @haddu_v8i16( %s0, %s1) { -; CHECK-LABEL: haddu_v8i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: uhadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v8i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.h, z1.h, #1 +; SVE-NEXT: lsr z3.h, z0.h, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.h, z3.h, z2.h +; SVE-NEXT: and z0.h, z0.h, #0x1 +; SVE-NEXT: add z0.h, z1.h, z0.h +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v8i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: uhadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -216,13 +300,21 @@ } define @haddu_v4i8( %s0, %s1) { -; CHECK-LABEL: haddu_v4i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: and z0.s, z0.s, #0xff -; CHECK-NEXT: and z1.s, z1.s, #0xff -; CHECK-NEXT: uhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v4i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: and z0.s, z0.s, #0xff +; SVE-NEXT: and z1.s, z1.s, #0xff +; SVE-NEXT: add z0.s, z0.s, z1.s +; SVE-NEXT: lsr z0.s, z0.s, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v4i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: and z0.s, z0.s, #0xff +; SVE2-NEXT: and z1.s, z1.s, #0xff +; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -251,13 +343,21 @@ } define @haddu_v8i8( %s0, %s1) { -; CHECK-LABEL: haddu_v8i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: and z0.h, z0.h, #0xff -; CHECK-NEXT: and z1.h, z1.h, #0xff -; CHECK-NEXT: uhadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v8i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: and z0.h, z0.h, #0xff +; SVE-NEXT: and z1.h, z1.h, #0xff +; SVE-NEXT: add z0.h, z0.h, z1.h +; SVE-NEXT: lsr z0.h, z0.h, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v8i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: and z0.h, z0.h, #0xff +; SVE2-NEXT: and z1.h, z1.h, #0xff +; SVE2-NEXT: uhadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -268,11 +368,21 @@ } define @hadds_v16i8( %s0, %s1) { -; CHECK-LABEL: hadds_v16i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: shadd z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: ret +; SVE-LABEL: hadds_v16i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.b, z1.b, #1 +; SVE-NEXT: asr z3.b, z0.b, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.b, z3.b, z2.b +; SVE-NEXT: and z0.b, z0.b, #0x1 +; SVE-NEXT: add z0.b, z1.b, z0.b +; SVE-NEXT: ret +; +; SVE2-LABEL: hadds_v16i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.b +; SVE2-NEXT: shadd z0.b, p0/m, z0.b, z1.b +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -283,11 +393,21 @@ } define @haddu_v16i8( %s0, %s1) { -; CHECK-LABEL: haddu_v16i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: uhadd z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: ret +; SVE-LABEL: haddu_v16i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.b, z1.b, #1 +; SVE-NEXT: lsr z3.b, z0.b, #1 +; SVE-NEXT: and z0.d, z0.d, z1.d +; SVE-NEXT: add z1.b, z3.b, z2.b +; SVE-NEXT: and z0.b, z0.b, #0x1 +; SVE-NEXT: add z0.b, z1.b, z0.b +; SVE-NEXT: ret +; +; SVE2-LABEL: haddu_v16i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.b +; SVE2-NEXT: uhadd z0.b, p0/m, z0.b, z1.b +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -298,11 +418,21 @@ } define @rhadds_v2i64( %s0, %s1) { -; CHECK-LABEL: rhadds_v2i64: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: srhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: rhadds_v2i64: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.d, z1.d, #1 +; SVE-NEXT: asr z3.d, z0.d, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.d, z3.d, z2.d +; SVE-NEXT: and z0.d, z0.d, #0x1 +; SVE-NEXT: add z0.d, z1.d, z0.d +; SVE-NEXT: ret +; +; SVE2-LABEL: rhadds_v2i64: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -314,11 +444,21 @@ } define @rhaddu_v2i64( %s0, %s1) { -; CHECK-LABEL: rhaddu_v2i64: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: urhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v2i64: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.d, z1.d, #1 +; SVE-NEXT: lsr z3.d, z0.d, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.d, z3.d, z2.d +; SVE-NEXT: and z0.d, z0.d, #0x1 +; SVE-NEXT: add z0.d, z1.d, z0.d +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v2i64: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -351,13 +491,23 @@ } define @rhaddu_v2i32( %s0, %s1) { -; CHECK-LABEL: rhaddu_v2i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: and z0.d, z0.d, #0xffffffff -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff -; CHECK-NEXT: urhadd z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v2i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff +; SVE-NEXT: and z0.d, z0.d, #0xffffffff +; SVE-NEXT: and z1.d, z1.d, #0xffffffff +; SVE-NEXT: eor z0.d, z0.d, z2.d +; SVE-NEXT: sub z0.d, z1.d, z0.d +; SVE-NEXT: lsr z0.d, z0.d, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v2i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.d +; SVE2-NEXT: and z0.d, z0.d, #0xffffffff +; SVE2-NEXT: and z1.d, z1.d, #0xffffffff +; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -369,11 +519,21 @@ } define @rhadds_v4i32( %s0, %s1) { -; CHECK-LABEL: rhadds_v4i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: srhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: rhadds_v4i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.s, z1.s, #1 +; SVE-NEXT: asr z3.s, z0.s, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.s, z3.s, z2.s +; SVE-NEXT: and z0.s, z0.s, #0x1 +; SVE-NEXT: add z0.s, z1.s, z0.s +; SVE-NEXT: ret +; +; SVE2-LABEL: rhadds_v4i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -385,11 +545,21 @@ } define @rhaddu_v4i32( %s0, %s1) { -; CHECK-LABEL: rhaddu_v4i32: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: urhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v4i32: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.s, z1.s, #1 +; SVE-NEXT: lsr z3.s, z0.s, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.s, z3.s, z2.s +; SVE-NEXT: and z0.s, z0.s, #0x1 +; SVE-NEXT: add z0.s, z1.s, z0.s +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v4i32: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -464,13 +634,23 @@ } define @rhaddu_v4i16( %s0, %s1) { -; CHECK-LABEL: rhaddu_v4i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: and z0.s, z0.s, #0xffff -; CHECK-NEXT: and z1.s, z1.s, #0xffff -; CHECK-NEXT: urhadd z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v4i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff +; SVE-NEXT: and z0.s, z0.s, #0xffff +; SVE-NEXT: and z1.s, z1.s, #0xffff +; SVE-NEXT: eor z0.d, z0.d, z2.d +; SVE-NEXT: sub z0.s, z1.s, z0.s +; SVE-NEXT: lsr z0.s, z0.s, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v4i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.s +; SVE2-NEXT: and z0.s, z0.s, #0xffff +; SVE2-NEXT: and z1.s, z1.s, #0xffff +; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -482,11 +662,21 @@ } define @rhadds_v8i16( %s0, %s1) { -; CHECK-LABEL: rhadds_v8i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: srhadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: rhadds_v8i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.h, z1.h, #1 +; SVE-NEXT: asr z3.h, z0.h, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.h, z3.h, z2.h +; SVE-NEXT: and z0.h, z0.h, #0x1 +; SVE-NEXT: add z0.h, z1.h, z0.h +; SVE-NEXT: ret +; +; SVE2-LABEL: rhadds_v8i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: srhadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -498,11 +688,21 @@ } define @rhaddu_v8i16( %s0, %s1) { -; CHECK-LABEL: rhaddu_v8i16: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: urhadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v8i16: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.h, z1.h, #1 +; SVE-NEXT: lsr z3.h, z0.h, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.h, z3.h, z2.h +; SVE-NEXT: and z0.h, z0.h, #0x1 +; SVE-NEXT: add z0.h, z1.h, z0.h +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v8i16: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: urhadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -577,13 +777,23 @@ } define @rhaddu_v8i8( %s0, %s1) { -; CHECK-LABEL: rhaddu_v8i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: and z0.h, z0.h, #0xff -; CHECK-NEXT: and z1.h, z1.h, #0xff -; CHECK-NEXT: urhadd z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v8i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: mov z2.h, #-1 // =0xffffffffffffffff +; SVE-NEXT: and z0.h, z0.h, #0xff +; SVE-NEXT: and z1.h, z1.h, #0xff +; SVE-NEXT: eor z0.d, z0.d, z2.d +; SVE-NEXT: sub z0.h, z1.h, z0.h +; SVE-NEXT: lsr z0.h, z0.h, #1 +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v8i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.h +; SVE2-NEXT: and z0.h, z0.h, #0xff +; SVE2-NEXT: and z1.h, z1.h, #0xff +; SVE2-NEXT: urhadd z0.h, p0/m, z0.h, z1.h +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to @@ -595,11 +805,21 @@ } define @rhadds_v16i8( %s0, %s1) { -; CHECK-LABEL: rhadds_v16i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: srhadd z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: ret +; SVE-LABEL: rhadds_v16i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: asr z2.b, z1.b, #1 +; SVE-NEXT: asr z3.b, z0.b, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.b, z3.b, z2.b +; SVE-NEXT: and z0.b, z0.b, #0x1 +; SVE-NEXT: add z0.b, z1.b, z0.b +; SVE-NEXT: ret +; +; SVE2-LABEL: rhadds_v16i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.b +; SVE2-NEXT: srhadd z0.b, p0/m, z0.b, z1.b +; SVE2-NEXT: ret entry: %s0s = sext %s0 to %s1s = sext %s1 to @@ -611,11 +831,21 @@ } define @rhaddu_v16i8( %s0, %s1) { -; CHECK-LABEL: rhaddu_v16i8: -; CHECK: // %bb.0: // %entry -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: urhadd z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: ret +; SVE-LABEL: rhaddu_v16i8: +; SVE: // %bb.0: // %entry +; SVE-NEXT: lsr z2.b, z1.b, #1 +; SVE-NEXT: lsr z3.b, z0.b, #1 +; SVE-NEXT: orr z0.d, z0.d, z1.d +; SVE-NEXT: add z1.b, z3.b, z2.b +; SVE-NEXT: and z0.b, z0.b, #0x1 +; SVE-NEXT: add z0.b, z1.b, z0.b +; SVE-NEXT: ret +; +; SVE2-LABEL: rhaddu_v16i8: +; SVE2: // %bb.0: // %entry +; SVE2-NEXT: ptrue p0.b +; SVE2-NEXT: urhadd z0.b, p0/m, z0.b, z1.b +; SVE2-NEXT: ret entry: %s0s = zext %s0 to %s1s = zext %s1 to