diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -471,6 +471,10 @@ Node->getValueType(0), Scale); break; } + case ISD::VECREDUCE_SEQ_FADD: + Action = TLI.getOperationAction(Node->getOpcode(), + Node->getOperand(1).getValueType()); + break; case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: case ISD::VECREDUCE_ADD: diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -780,6 +780,14 @@ return !useSVEForFixedLengthVectors(); } + // FIXME: Move useSVEForFixedLengthVectors*() back to private scope once + // reduction legalization is complete. + bool useSVEForFixedLengthVectors() const; + // Normally SVE is only used for byte size vectors that do not fit within a + // NEON vector. This changes when OverrideNEON is true, allowing SVE to be + // used for 64bit and 128bit vectors as well. + bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const; + private: /// Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. @@ -935,6 +943,7 @@ SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, SelectionDAG &DAG) const; SDValue LowerFixedLengthReductionToSVE(unsigned Opcode, SDValue ScalarOp, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorSelectToSVE(SDValue Op, SelectionDAG &DAG) const; @@ -1006,12 +1015,6 @@ bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const override; - - bool useSVEForFixedLengthVectors() const; - // Normally SVE is only used for byte size vectors that do not fit within a - // NEON vector. This changes when OverrideNEON is true, allowing SVE to be - // used for 64bit and 128bit vectors as well. - bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const; }; namespace AArch64 { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1119,6 +1119,8 @@ setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_UMAX, MVT::v2i64, Custom); setOperationAction(ISD::VECREDUCE_UMIN, MVT::v2i64, Custom); + + // Int operations with no NEON support. for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}) { setOperationAction(ISD::VECREDUCE_AND, VT, Custom); @@ -1126,6 +1128,11 @@ setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); } + // FP operations with no NEON support. + for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, + MVT::v1f64, MVT::v2f64}) + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); + // Use SVE for vectors with more than 2 elements. for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v4f32}) setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); @@ -1266,6 +1273,7 @@ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); setOperationAction(ISD::VECREDUCE_AND, VT, Custom); setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); + setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_OR, VT, Custom); @@ -3964,6 +3972,8 @@ return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::VECREDUCE_SEQ_FADD: + return LowerVECREDUCE_SEQ_FADD(Op, DAG); case ISD::VECREDUCE_ADD: case ISD::VECREDUCE_AND: case ISD::VECREDUCE_OR: @@ -16257,6 +16267,34 @@ return convertFromScalableVector(DAG, VT, ScalableRes); } +SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp, + SelectionDAG &DAG) const { + SDLoc DL(ScalarOp); + SDValue AccOp = ScalarOp.getOperand(0); + SDValue VecOp = ScalarOp.getOperand(1); + EVT SrcVT = VecOp.getValueType(); + EVT ResVT = SrcVT.getVectorElementType(); + + // Only fixed length FADDA handled for now. + if (!useSVEForFixedLengthVectorVT(SrcVT, /*OverrideNEON=*/true)) + return SDValue(); + + SDValue Pg = getPredicateForVector(DAG, DL, SrcVT); + EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT); + SDValue Zero = DAG.getConstant(0, DL, MVT::i64); + + // Convert operands to Scalable. + AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT, + DAG.getUNDEF(ContainerVT), AccOp, Zero); + VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); + + // Perform reduction. + SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT, + Pg, AccOp, VecOp); + + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Rdx, Zero); +} + SDValue AArch64TargetLowering::LowerFixedLengthReductionToSVE(unsigned Opcode, SDValue ScalarOp, SelectionDAG &DAG) const { SDLoc DL(ScalarOp); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -222,7 +222,15 @@ bool shouldExpandReduction(const IntrinsicInst *II) const { switch (II->getIntrinsicID()) { - case Intrinsic::vector_reduce_fadd: + case Intrinsic::vector_reduce_fadd: { + Value *VecOp = II->getArgOperand(1); + EVT VT = TLI->getValueType(getDataLayout(), VecOp->getType()); + if (ST->hasSVE() && + TLI->useSVEForFixedLengthVectorVT(VT, /*OverrideNEON=*/true)) + return false; + + return !II->getFastMathFlags().allowReassoc(); + } case Intrinsic::vector_reduce_fmul: // We don't have legalization support for ordered FP reductions. return !II->getFastMathFlags().allowReassoc(); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-reduce.ll @@ -20,6 +20,214 @@ ; Don't use SVE when its registers are no bigger than NEON. ; NO_SVE-NOT: ptrue +; +; FADDA +; + +; No single instruction NEON support. Use SVE. +define half @fadda_v4f16(half %start, <4 x half> %a) #0 { +; CHECK-LABEL: fadda_v4f16: +; CHECK: ptrue [[PG:p[0-9]+]].h, vl4 +; CHECK-NEXT: fadda h0, [[PG]], h0, z1.h +; CHECK-NEXT: ret + %res = call half @llvm.vector.reduce.fadd.v4f16(half %start, <4 x half> %a) + ret half %res +} + +; No single instruction NEON support. Use SVE. +define half @fadda_v8f16(half %start, <8 x half> %a) #0 { +; CHECK-LABEL: fadda_v8f16: +; CHECK: ptrue [[PG:p[0-9]+]].h, vl8 +; CHECK-NEXT: fadda h0, [[PG]], h0, z1.h +; CHECK-NEXT: ret + %res = call half @llvm.vector.reduce.fadd.v8f16(half %start, <8 x half> %a) + ret half %res +} + +define half @fadda_v16f16(half %start, <16 x half>* %a) #0 { +; CHECK-LABEL: fadda_v16f16: +; CHECK: ptrue [[PG:p[0-9]+]].h, vl16 +; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0] +; CHECK-NEXT: fadda h0, [[PG]], h0, [[OP]].h +; CHECK-NEXT: ret + %op = load <16 x half>, <16 x half>* %a + %res = call half @llvm.vector.reduce.fadd.v16f16(half %start, <16 x half> %op) + ret half %res +} + +define half @fadda_v32f16(half %start, <32 x half>* %a) #0 { +; CHECK-LABEL: fadda_v32f16: +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fadda h0, [[PG]], h0, [[OP]].h +; VBITS_GE_512-NEXT: ret + +; Ensure sensible type legalisation. +; VBITS_EQ_256-COUNT-32: fadd +; VBITS_EQ_256: ret + %op = load <32 x half>, <32 x half>* %a + %res = call half @llvm.vector.reduce.fadd.v32f16(half %start, <32 x half> %op) + ret half %res +} + +define half @fadda_v64f16(half %start, <64 x half>* %a) #0 { +; CHECK-LABEL: fadda_v64f16: +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64 +; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fadda h0, [[PG]], h0, [[OP]].h +; VBITS_GE_1024-NEXT: ret + %op = load <64 x half>, <64 x half>* %a + %res = call half @llvm.vector.reduce.fadd.v64f16(half %start, <64 x half> %op) + ret half %res +} + +define half @fadda_v128f16(half %start, <128 x half>* %a) #0 { +; CHECK-LABEL: fadda_v128f16: +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128 +; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fadda h0, [[PG]], h0, [[OP]].h +; VBITS_GE_2048-NEXT: ret + %op = load <128 x half>, <128 x half>* %a + %res = call half @llvm.vector.reduce.fadd.v128f16(half %start, <128 x half> %op) + ret half %res +} + +; No single instruction NEON support. Use SVE. +define float @fadda_v2f32(float %start, <2 x float> %a) #0 { +; CHECK-LABEL: fadda_v2f32: +; CHECK: ptrue [[PG:p[0-9]+]].s, vl2 +; CHECK-NEXT: fadda s0, [[PG]], s0, z1.s +; CHECK-NEXT: ret + %res = call float @llvm.vector.reduce.fadd.v2f32(float %start, <2 x float> %a) + ret float %res +} + +; No single instruction NEON support. Use SVE. +define float @fadda_v4f32(float %start, <4 x float> %a) #0 { +; CHECK-LABEL: fadda_v4f32: +; CHECK: ptrue [[PG:p[0-9]+]].s, vl4 +; CHECK-NEXT: fadda s0, [[PG]], s0, z1.s +; CHECK-NEXT: ret + %res = call float @llvm.vector.reduce.fadd.v4f32(float %start, <4 x float> %a) + ret float %res +} + +define float @fadda_v8f32(float %start, <8 x float>* %a) #0 { +; CHECK-LABEL: fadda_v8f32: +; CHECK: ptrue [[PG:p[0-9]+]].s, vl8 +; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; CHECK-NEXT: fadda s0, [[PG]], s0, [[OP]].s +; CHECK-NEXT: ret + %op = load <8 x float>, <8 x float>* %a + %res = call float @llvm.vector.reduce.fadd.v8f32(float %start, <8 x float> %op) + ret float %res +} + +define float @fadda_v16f32(float %start, <16 x float>* %a) #0 { +; CHECK-LABEL: fadda_v16f32: +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fadda s0, [[PG]], s0, [[OP]].s +; VBITS_GE_512-NEXT: ret + +; Ensure sensible type legalisation. +; VBITS_EQ_256-COUNT-16: fadd +; VBITS_EQ_256: ret + %op = load <16 x float>, <16 x float>* %a + %res = call float @llvm.vector.reduce.fadd.v16f32(float %start, <16 x float> %op) + ret float %res +} + +define float @fadda_v32f32(float %start, <32 x float>* %a) #0 { +; CHECK-LABEL: fadda_v32f32: +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32 +; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fadda s0, [[PG]], s0, [[OP]].s +; VBITS_GE_1024-NEXT: ret + %op = load <32 x float>, <32 x float>* %a + %res = call float @llvm.vector.reduce.fadd.v32f32(float %start, <32 x float> %op) + ret float %res +} + +define float @fadda_v64f32(float %start, <64 x float>* %a) #0 { +; CHECK-LABEL: fadda_v64f32: +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64 +; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fadda s0, [[PG]], s0, [[OP]].s +; VBITS_GE_2048-NEXT: ret + %op = load <64 x float>, <64 x float>* %a + %res = call float @llvm.vector.reduce.fadd.v64f32(float %start, <64 x float> %op) + ret float %res +} + +; No single instruction NEON support. Use SVE. +define double @fadda_v1f64(double %start, <1 x double> %a) #0 { +; CHECK-LABEL: fadda_v1f64: +; CHECK: ptrue [[PG:p[0-9]+]].d, vl1 +; CHECK-NEXT: fadda d0, [[PG]], d0, z1.d +; CHECK-NEXT: ret + %res = call double @llvm.vector.reduce.fadd.v1f64(double %start, <1 x double> %a) + ret double %res +} + +; No single instruction NEON support. Use SVE. +define double @fadda_v2f64(double %start, <2 x double> %a) #0 { +; CHECK-LABEL: fadda_v2f64: +; CHECK: ptrue [[PG:p[0-9]+]].d, vl2 +; CHECK-NEXT: fadda d0, [[PG]], d0, z1.d +; CHECK-NEXT: ret + %res = call double @llvm.vector.reduce.fadd.v2f64(double %start, <2 x double> %a) + ret double %res +} + +define double @fadda_v4f64(double %start, <4 x double>* %a) #0 { +; CHECK-LABEL: fadda_v4f64: +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: fadda d0, [[PG]], d0, [[OP]].d +; CHECK-NEXT: ret + %op = load <4 x double>, <4 x double>* %a + %res = call double @llvm.vector.reduce.fadd.v4f64(double %start, <4 x double> %op) + ret double %res +} + +define double @fadda_v8f64(double %start, <8 x double>* %a) #0 { +; CHECK-LABEL: fadda_v8f64: +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fadda d0, [[PG]], d0, [[OP]].d +; VBITS_GE_512-NEXT: ret + +; Ensure sensible type legalisation. +; VBITS_EQ_256-COUNT-8: fadd +; VBITS_EQ_256: ret + %op = load <8 x double>, <8 x double>* %a + %res = call double @llvm.vector.reduce.fadd.v8f64(double %start, <8 x double> %op) + ret double %res +} + +define double @fadda_v16f64(double %start, <16 x double>* %a) #0 { +; CHECK-LABEL: fadda_v16f64: +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fadda d0, [[PG]], d0, [[OP]].d +; VBITS_GE_1024-NEXT: ret + %op = load <16 x double>, <16 x double>* %a + %res = call double @llvm.vector.reduce.fadd.v16f64(double %start, <16 x double> %op) + ret double %res +} + +define double @fadda_v32f64(double %start, <32 x double>* %a) #0 { +; CHECK-LABEL: fadda_v32f64: +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fadda d0, [[PG]], d0, [[OP]].d +; VBITS_GE_2048-NEXT: ret + %op = load <32 x double>, <32 x double>* %a + %res = call double @llvm.vector.reduce.fadd.v32f64(double %start, <32 x double> %op) + ret double %res +} + ; ; FADDV ;