diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -12276,6 +12276,45 @@ return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); } +// ADD(UADDV a, UADDV b) --> UADDV((ADD a, b)) +static SDValue performUADDVCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + EVT VT = N->getValueType(0); + // Only scalar integer and vector types. + if (N->getOpcode() != ISD::ADD || !VT.isScalarInteger()) + return SDValue(); + + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || LHS.getValueType() != VT) + return SDValue(); + + auto *LHSN1 = dyn_cast(LHS->getOperand(1)); + auto *RHSN1 = dyn_cast(RHS->getOperand(1)); + if (!LHSN1 || !RHSN1 || !LHSN1->isNullValue() || !RHSN1->isNullValue()) + return SDValue(); + + SDValue Op1 = LHS->getOperand(0); + SDValue Op2 = RHS->getOperand(0); + EVT OpVT1 = Op1.getValueType(); + EVT OpVT2 = Op2.getValueType(); + if (Op1.getOpcode() != AArch64ISD::UADDV || OpVT1 != OpVT2 || + Op2.getOpcode() != AArch64ISD::UADDV || + OpVT1.getVectorElementType() != VT) + return SDValue(); + + SDValue Val1 = Op1.getOperand(0); + SDValue Val2 = Op2.getOperand(0); + EVT ValVT = Val1->getValueType(0); + SDLoc DL(N); + SDValue AddVal = DAG.getNode(ISD::ADD, DL, ValVT, Val1, Val2); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, + DAG.getNode(AArch64ISD::UADDV, DL, ValVT, AddVal), + DAG.getConstant(0, DL, MVT::i64)); +} + // The basic add/sub long vector instructions have variants with "2" on the end // which act on the high-half of their inputs. They are normally matched by // patterns like: @@ -12329,6 +12368,18 @@ return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); } +static SDValue performAddSubCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + // Try to change sum of two reductions. + SDValue Val = performUADDVCombine(N, DCI, DAG); + if (Val.getNode()) { + return Val; + } + + return performAddSubLongCombine(N, DCI, DAG); +} + // Massage DAGs which we can use the high-half "long" operations on into // something isel will recognize better. E.g. // @@ -14677,7 +14728,7 @@ break; case ISD::ADD: case ISD::SUB: - return performAddSubLongCombine(N, DCI, DAG); + return performAddSubCombine(N, DCI, DAG); case ISD::XOR: return performXorCombine(N, DAG, DCI, Subtarget); case ISD::MUL: diff --git a/llvm/test/CodeGen/AArch64/aarch64-addv.ll b/test/CodeGen/AArch64/aarch64-addv.ll --- a/llvm/test/CodeGen/AArch64/aarch64-addv.ll +++ b/test/CodeGen/AArch64/aarch64-addv.ll @@ -4,6 +4,7 @@ declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) +declare i16 @llvm.vector.reduce.add.v4i16(<4 x i16>) declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) define i8 @add_B(<16 x i8>* %arr) { @@ -68,3 +69,25 @@ %r = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %bin.rdx) ret i32 %r } + +define i32 @addv_combine_i32(<4 x i32> %a1, <4 x i32> %a2) { +; CHECK-LABEL: addv_combine_i32 +; CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s +; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s +entry: + %rdx.1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a1) + %rdx.2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a2) + %r = add i32 %rdx.1, %rdx.2 + ret i32 %r +} + +define i64 @addv_combine_i64(<2 x i64> %a1, <2 x i64> %a2) { +; CHECK-LABEL: addv_combine_i64 +; CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d +; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d +entry: + %rdx.1 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a1) + %rdx.2 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %a2) + %r = add i64 %rdx.1, %rdx.2 + ret i64 %r +}