diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -1087,6 +1087,7 @@ SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const; + SDValue LowerAVGFloor_AVGCeil(SDValue Node, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorIntDivideToSVE(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1289,12 +1289,10 @@ setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); - if (Subtarget->hasSVE2()) { - setOperationAction(ISD::AVGFLOORS, VT, Custom); - setOperationAction(ISD::AVGFLOORU, VT, Custom); - setOperationAction(ISD::AVGCEILS, VT, Custom); - setOperationAction(ISD::AVGCEILU, VT, Custom); - } + setOperationAction(ISD::AVGFLOORS, VT, Custom); + setOperationAction(ISD::AVGFLOORU, VT, Custom); + setOperationAction(ISD::AVGCEILS, VT, Custom); + setOperationAction(ISD::AVGCEILU, VT, Custom); } // Illegal unpacked integer vector types. @@ -6089,13 +6087,21 @@ case ISD::ABDU: return LowerToPredicatedOp(Op, DAG, AArch64ISD::ABDU_PRED); case ISD::AVGFLOORS: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDS_PRED); + if (Subtarget->hasSVE2()) + return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDS_PRED); + return LowerAVGFloor_AVGCeil(Op, DAG); case ISD::AVGFLOORU: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDU_PRED); + if (Subtarget->hasSVE2()) + return LowerToPredicatedOp(Op, DAG, AArch64ISD::HADDU_PRED); + return LowerAVGFloor_AVGCeil(Op, DAG); case ISD::AVGCEILS: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDS_PRED); + if (Subtarget->hasSVE2()) + return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDS_PRED); + return LowerAVGFloor_AVGCeil(Op, DAG); case ISD::AVGCEILU: - return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDU_PRED); + if (Subtarget->hasSVE2()) + return LowerToPredicatedOp(Op, DAG, AArch64ISD::RHADDU_PRED); + return LowerAVGFloor_AVGCeil(Op, DAG); case ISD::BITREVERSE: return LowerBitreverse(Op, DAG); case ISD::BSWAP: @@ -13378,6 +13384,56 @@ return Chain; } +SDValue AArch64TargetLowering::LowerAVGFloor_AVGCeil(SDValue Node, + SelectionDAG &DAG) const { + SDLoc dl(Node); + SDValue OpA = Node->getOperand(0); + SDValue OpB = Node->getOperand(1); + EVT VT = Node->getValueType(0); + SDValue ConstantOne = DAG.getConstant(1, dl, VT); + + assert(VT.isScalableVector() && "Only expect to lower scalable vector op!"); + + // check if it's better to emit the original code: + if (OpA.getOpcode() == ISD::AND) { + // in this case omiting the original code is better than custom lowering + // AVGFloor/Ceil + APInt ApintTemp; + if (ISD::isConstantSplatVector(OpA.getOperand(1).getNode(), ApintTemp)) { + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, OpA, OpB); + return DAG.getNode(ISD::SRL, dl, VT, Add, ConstantOne); + } + } + + else if (OpA.getOpcode() == ISD::SIGN_EXTEND_INREG) { + // in this case omiting the original code is better than custom lowering + // AVGFloor/Ceil + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, OpA, OpB); + return DAG.getNode(ISD::SRA, dl, VT, Add, ConstantOne); + } + + SDValue ShiftOpA, ShiftOpB; + if (Node->getOpcode() == ISD::AVGFLOORS || + Node->getOpcode() == ISD::AVGCEILS) { + ShiftOpA = DAG.getNode(ISD::SRA, dl, VT, OpA, ConstantOne); + ShiftOpB = DAG.getNode(ISD::SRA, dl, VT, OpB, ConstantOne); + } else { + ShiftOpA = DAG.getNode(ISD::SRL, dl, VT, OpA, ConstantOne); + ShiftOpB = DAG.getNode(ISD::SRL, dl, VT, OpB, ConstantOne); + } + + SDValue tmp; + if (Node->getOpcode() == ISD::AVGFLOORU || + Node->getOpcode() == ISD::AVGFLOORS) + tmp = DAG.getNode(ISD::AND, dl, VT, OpA, OpB); + else + tmp = DAG.getNode(ISD::OR, dl, VT, OpA, OpB); + + tmp = DAG.getNode(ISD::AND, dl, VT, tmp, ConstantOne); + SDValue Add = DAG.getNode(ISD::ADD, dl, VT, ShiftOpA, ShiftOpB); + return DAG.getNode(ISD::ADD, dl, VT, Add, tmp); +} + SDValue AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { diff --git a/llvm/test/CodeGen/AArch64/sve-avg_floor_ceil.ll b/llvm/test/CodeGen/AArch64/sve-avg_floor_ceil.ll --- a/llvm/test/CodeGen/AArch64/sve-avg_floor_ceil.ll +++ b/llvm/test/CodeGen/AArch64/sve-avg_floor_ceil.ll @@ -1,6 +1,43 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=neoverse-v1 | FileCheck %s -; + +define @hadds_v2i64( %s0, %s1) { +; CHECK-LABEL: hadds_v2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: asr z2.d, z1.d, #1 +; CHECK-NEXT: asr z3.d, z0.d, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.d, z3.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, #0x1 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +entry: + %s0s = sext %s0 to + %s1s = sext %s1 to + %m = add nsw %s0s, %s1s + %s = ashr %m, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %s2 = trunc %s to + ret %s2 +} + +define @haddu_v2i64( %s0, %s1) { +; CHECK-LABEL: haddu_v2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: lsr z2.d, z1.d, #1 +; CHECK-NEXT: lsr z3.d, z0.d, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.d, z3.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, #0x1 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +entry: + %s0s = zext %s0 to + %s1s = zext %s1 to + %m = add nuw nsw %s0s, %s1s + %s = lshr %m, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %s2 = trunc %s to + ret %s2 +} define @hadds_v2i32( %s0, %s1) { ; CHECK-LABEL: hadds_v2i32: @@ -38,15 +75,12 @@ define @hadds_v4i32( %s0, %s1) { ; CHECK-LABEL: hadds_v4i32: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.d, z0.s -; CHECK-NEXT: sunpklo z0.d, z0.s -; CHECK-NEXT: sunpkhi z3.d, z1.s -; CHECK-NEXT: sunpklo z1.d, z1.s -; CHECK-NEXT: add z0.d, z0.d, z1.d -; CHECK-NEXT: add z1.d, z2.d, z3.d -; CHECK-NEXT: lsr z1.d, z1.d, #1 -; CHECK-NEXT: lsr z0.d, z0.d, #1 -; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: asr z2.s, z1.s, #1 +; CHECK-NEXT: asr z3.s, z0.s, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.s, z3.s, z2.s +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: add z0.s, z2.s, z0.s ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -60,15 +94,12 @@ define @haddu_v4i32( %s0, %s1) { ; CHECK-LABEL: haddu_v4i32: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.d, z0.s -; CHECK-NEXT: uunpklo z0.d, z0.s -; CHECK-NEXT: uunpkhi z3.d, z1.s -; CHECK-NEXT: uunpklo z1.d, z1.s -; CHECK-NEXT: add z0.d, z0.d, z1.d -; CHECK-NEXT: add z1.d, z2.d, z3.d -; CHECK-NEXT: lsr z1.d, z1.d, #1 -; CHECK-NEXT: lsr z0.d, z0.d, #1 -; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: lsr z2.s, z1.s, #1 +; CHECK-NEXT: lsr z3.s, z0.s, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.s, z3.s, z2.s +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: add z0.s, z2.s, z0.s ; CHECK-NEXT: ret entry: %s0s = zext %s0 to @@ -152,15 +183,12 @@ define @hadds_v8i16( %s0, %s1) { ; CHECK-LABEL: hadds_v8i16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.s, z0.h -; CHECK-NEXT: sunpklo z0.s, z0.h -; CHECK-NEXT: sunpkhi z3.s, z1.h -; CHECK-NEXT: sunpklo z1.s, z1.h -; CHECK-NEXT: add z0.s, z0.s, z1.s -; CHECK-NEXT: add z1.s, z2.s, z3.s -; CHECK-NEXT: lsr z1.s, z1.s, #1 -; CHECK-NEXT: lsr z0.s, z0.s, #1 -; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: asr z2.h, z1.h, #1 +; CHECK-NEXT: asr z3.h, z0.h, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.h, z3.h, z2.h +; CHECK-NEXT: and z0.h, z0.h, #0x1 +; CHECK-NEXT: add z0.h, z2.h, z0.h ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -174,15 +202,12 @@ define @haddu_v8i16( %s0, %s1) { ; CHECK-LABEL: haddu_v8i16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.s, z0.h -; CHECK-NEXT: uunpklo z0.s, z0.h -; CHECK-NEXT: uunpkhi z3.s, z1.h -; CHECK-NEXT: uunpklo z1.s, z1.h -; CHECK-NEXT: add z0.s, z0.s, z1.s -; CHECK-NEXT: add z1.s, z2.s, z3.s -; CHECK-NEXT: lsr z1.s, z1.s, #1 -; CHECK-NEXT: lsr z0.s, z0.s, #1 -; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: lsr z2.h, z1.h, #1 +; CHECK-NEXT: lsr z3.h, z0.h, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.h, z3.h, z2.h +; CHECK-NEXT: and z0.h, z0.h, #0x1 +; CHECK-NEXT: add z0.h, z2.h, z0.h ; CHECK-NEXT: ret entry: %s0s = zext %s0 to @@ -266,15 +291,12 @@ define @hadds_v16i8( %s0, %s1) { ; CHECK-LABEL: hadds_v16i8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.h, z0.b -; CHECK-NEXT: sunpklo z0.h, z0.b -; CHECK-NEXT: sunpkhi z3.h, z1.b -; CHECK-NEXT: sunpklo z1.h, z1.b -; CHECK-NEXT: add z0.h, z0.h, z1.h -; CHECK-NEXT: add z1.h, z2.h, z3.h -; CHECK-NEXT: lsr z1.h, z1.h, #1 -; CHECK-NEXT: lsr z0.h, z0.h, #1 -; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: asr z2.b, z1.b, #1 +; CHECK-NEXT: asr z3.b, z0.b, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.b, z3.b, z2.b +; CHECK-NEXT: and z0.b, z0.b, #0x1 +; CHECK-NEXT: add z0.b, z2.b, z0.b ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -288,15 +310,12 @@ define @haddu_v16i8( %s0, %s1) { ; CHECK-LABEL: haddu_v16i8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.h, z0.b -; CHECK-NEXT: uunpklo z0.h, z0.b -; CHECK-NEXT: uunpkhi z3.h, z1.b -; CHECK-NEXT: uunpklo z1.h, z1.b -; CHECK-NEXT: add z0.h, z0.h, z1.h -; CHECK-NEXT: add z1.h, z2.h, z3.h -; CHECK-NEXT: lsr z1.h, z1.h, #1 -; CHECK-NEXT: lsr z0.h, z0.h, #1 -; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: lsr z2.b, z1.b, #1 +; CHECK-NEXT: lsr z3.b, z0.b, #1 +; CHECK-NEXT: and z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.b, z3.b, z2.b +; CHECK-NEXT: and z0.b, z0.b, #0x1 +; CHECK-NEXT: add z0.b, z2.b, z0.b ; CHECK-NEXT: ret entry: %s0s = zext %s0 to @@ -307,6 +326,46 @@ ret %s2 } +define @rhadds_v2i64( %s0, %s1) { +; CHECK-LABEL: rhadds_v2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: asr z2.d, z1.d, #1 +; CHECK-NEXT: asr z3.d, z0.d, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.d, z3.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, #0x1 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +entry: + %s0s = sext %s0 to + %s1s = sext %s1 to + %add = add %s0s, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %add2 = add %add, %s1s + %s = ashr %add2, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %result = trunc %s to + ret %result +} + +define @rhaddu_v2i64( %s0, %s1) { +; CHECK-LABEL: rhaddu_v2i64: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: lsr z2.d, z1.d, #1 +; CHECK-NEXT: lsr z3.d, z0.d, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.d, z3.d, z2.d +; CHECK-NEXT: and z0.d, z0.d, #0x1 +; CHECK-NEXT: add z0.d, z2.d, z0.d +; CHECK-NEXT: ret +entry: + %s0s = zext %s0 to + %s1s = zext %s1 to + %add = add nuw nsw %s0s, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %add2 = add nuw nsw %add, %s1s + %s = lshr %add2, shufflevector ( insertelement ( poison, i128 1, i32 0), poison, zeroinitializer) + %result = trunc %s to + ret %result +} + define @rhadds_v2i32( %s0, %s1) { ; CHECK-LABEL: rhadds_v2i32: ; CHECK: // %bb.0: // %entry @@ -332,10 +391,7 @@ ; CHECK-LABEL: rhaddu_v2i32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: and z0.d, z0.d, #0xffffffff -; CHECK-NEXT: mov z2.d, #-1 // =0xffffffffffffffff -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff -; CHECK-NEXT: eor z0.d, z0.d, z2.d -; CHECK-NEXT: sub z0.d, z1.d, z0.d +; CHECK-NEXT: adr z0.d, [z0.d, z1.d, uxtw] ; CHECK-NEXT: lsr z0.d, z0.d, #1 ; CHECK-NEXT: ret entry: @@ -351,18 +407,12 @@ define @rhadds_v4i32( %s0, %s1) { ; CHECK-LABEL: rhadds_v4i32: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.d, z0.s -; CHECK-NEXT: sunpklo z0.d, z0.s -; CHECK-NEXT: mov z4.d, #-1 // =0xffffffffffffffff -; CHECK-NEXT: sunpkhi z3.d, z1.s -; CHECK-NEXT: sunpklo z1.d, z1.s -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.d, z1.d, z0.d -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.d, z3.d, z1.d -; CHECK-NEXT: lsr z0.d, z0.d, #1 -; CHECK-NEXT: lsr z1.d, z1.d, #1 -; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: asr z2.s, z1.s, #1 +; CHECK-NEXT: asr z3.s, z0.s, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.s, z3.s, z2.s +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: add z0.s, z2.s, z0.s ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -377,18 +427,12 @@ define @rhaddu_v4i32( %s0, %s1) { ; CHECK-LABEL: rhaddu_v4i32: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.d, z0.s -; CHECK-NEXT: uunpklo z0.d, z0.s -; CHECK-NEXT: mov z4.d, #-1 // =0xffffffffffffffff -; CHECK-NEXT: uunpkhi z3.d, z1.s -; CHECK-NEXT: uunpklo z1.d, z1.s -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.d, z1.d, z0.d -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.d, z3.d, z1.d -; CHECK-NEXT: lsr z0.d, z0.d, #1 -; CHECK-NEXT: lsr z1.d, z1.d, #1 -; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: lsr z2.s, z1.s, #1 +; CHECK-NEXT: lsr z3.s, z0.s, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.s, z3.s, z2.s +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: add z0.s, z2.s, z0.s ; CHECK-NEXT: ret entry: %s0s = zext %s0 to @@ -466,10 +510,8 @@ ; CHECK-LABEL: rhaddu_v4i16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: and z0.s, z0.s, #0xffff -; CHECK-NEXT: mov z2.s, #-1 // =0xffffffffffffffff ; CHECK-NEXT: and z1.s, z1.s, #0xffff -; CHECK-NEXT: eor z0.d, z0.d, z2.d -; CHECK-NEXT: sub z0.s, z1.s, z0.s +; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: lsr z0.s, z0.s, #1 ; CHECK-NEXT: ret entry: @@ -485,18 +527,12 @@ define @rhadds_v8i16( %s0, %s1) { ; CHECK-LABEL: rhadds_v8i16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.s, z0.h -; CHECK-NEXT: sunpklo z0.s, z0.h -; CHECK-NEXT: mov z4.s, #-1 // =0xffffffffffffffff -; CHECK-NEXT: sunpkhi z3.s, z1.h -; CHECK-NEXT: sunpklo z1.s, z1.h -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.s, z1.s, z0.s -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.s, z3.s, z1.s -; CHECK-NEXT: lsr z0.s, z0.s, #1 -; CHECK-NEXT: lsr z1.s, z1.s, #1 -; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: asr z2.h, z1.h, #1 +; CHECK-NEXT: asr z3.h, z0.h, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.h, z3.h, z2.h +; CHECK-NEXT: and z0.h, z0.h, #0x1 +; CHECK-NEXT: add z0.h, z2.h, z0.h ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -511,18 +547,12 @@ define @rhaddu_v8i16( %s0, %s1) { ; CHECK-LABEL: rhaddu_v8i16: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.s, z0.h -; CHECK-NEXT: uunpklo z0.s, z0.h -; CHECK-NEXT: mov z4.s, #-1 // =0xffffffffffffffff -; CHECK-NEXT: uunpkhi z3.s, z1.h -; CHECK-NEXT: uunpklo z1.s, z1.h -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.s, z1.s, z0.s -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.s, z3.s, z1.s -; CHECK-NEXT: lsr z0.s, z0.s, #1 -; CHECK-NEXT: lsr z1.s, z1.s, #1 -; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: lsr z2.h, z1.h, #1 +; CHECK-NEXT: lsr z3.h, z0.h, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.h, z3.h, z2.h +; CHECK-NEXT: and z0.h, z0.h, #0x1 +; CHECK-NEXT: add z0.h, z2.h, z0.h ; CHECK-NEXT: ret entry: %s0s = zext %s0 to @@ -600,10 +630,8 @@ ; CHECK-LABEL: rhaddu_v8i8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: and z0.h, z0.h, #0xff -; CHECK-NEXT: mov z2.h, #-1 // =0xffffffffffffffff ; CHECK-NEXT: and z1.h, z1.h, #0xff -; CHECK-NEXT: eor z0.d, z0.d, z2.d -; CHECK-NEXT: sub z0.h, z1.h, z0.h +; CHECK-NEXT: add z0.h, z0.h, z1.h ; CHECK-NEXT: lsr z0.h, z0.h, #1 ; CHECK-NEXT: ret entry: @@ -619,18 +647,12 @@ define @rhadds_v16i8( %s0, %s1) { ; CHECK-LABEL: rhadds_v16i8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: sunpkhi z2.h, z0.b -; CHECK-NEXT: sunpklo z0.h, z0.b -; CHECK-NEXT: mov z4.h, #-1 // =0xffffffffffffffff -; CHECK-NEXT: sunpkhi z3.h, z1.b -; CHECK-NEXT: sunpklo z1.h, z1.b -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.h, z1.h, z0.h -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.h, z3.h, z1.h -; CHECK-NEXT: lsr z0.h, z0.h, #1 -; CHECK-NEXT: lsr z1.h, z1.h, #1 -; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: asr z2.b, z1.b, #1 +; CHECK-NEXT: asr z3.b, z0.b, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.b, z3.b, z2.b +; CHECK-NEXT: and z0.b, z0.b, #0x1 +; CHECK-NEXT: add z0.b, z2.b, z0.b ; CHECK-NEXT: ret entry: %s0s = sext %s0 to @@ -645,18 +667,12 @@ define @rhaddu_v16i8( %s0, %s1) { ; CHECK-LABEL: rhaddu_v16i8: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: uunpkhi z2.h, z0.b -; CHECK-NEXT: uunpklo z0.h, z0.b -; CHECK-NEXT: mov z4.h, #-1 // =0xffffffffffffffff -; CHECK-NEXT: uunpkhi z3.h, z1.b -; CHECK-NEXT: uunpklo z1.h, z1.b -; CHECK-NEXT: eor z0.d, z0.d, z4.d -; CHECK-NEXT: sub z0.h, z1.h, z0.h -; CHECK-NEXT: eor z1.d, z2.d, z4.d -; CHECK-NEXT: sub z1.h, z3.h, z1.h -; CHECK-NEXT: lsr z0.h, z0.h, #1 -; CHECK-NEXT: lsr z1.h, z1.h, #1 -; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: lsr z2.b, z1.b, #1 +; CHECK-NEXT: lsr z3.b, z0.b, #1 +; CHECK-NEXT: orr z0.d, z0.d, z1.d +; CHECK-NEXT: add z2.b, z3.b, z2.b +; CHECK-NEXT: and z0.b, z0.b, #0x1 +; CHECK-NEXT: add z0.b, z2.b, z0.b ; CHECK-NEXT: ret entry: %s0s = zext %s0 to