Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -202,6 +202,8 @@ SADDV, UADDV, + UADDV_PRED, + // Vector halving addition SHADD, UHADD, @@ -911,6 +913,8 @@ SDValue LowerFixedLengthVectorIntExtendToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorLoadToSVE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFixedLengthUADDVToSVE(unsigned Op, SDValue ScalarOp, + SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorSetccToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorStoreToSVE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFixedLengthVectorTruncateToSVE(SDValue Op, Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1153,6 +1153,7 @@ setOperationAction(ISD::UDIV, VT, Custom); setOperationAction(ISD::UMAX, VT, Custom); setOperationAction(ISD::UMIN, VT, Custom); + setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); setOperationAction(ISD::XOR, VT, Custom); setOperationAction(ISD::ZERO_EXTEND, VT, Custom); } @@ -1548,6 +1549,7 @@ MAKE_CASE(AArch64ISD::FCMLTz) MAKE_CASE(AArch64ISD::SADDV) MAKE_CASE(AArch64ISD::UADDV) + MAKE_CASE(AArch64ISD::UADDV_PRED) MAKE_CASE(AArch64ISD::SRHADD) MAKE_CASE(AArch64ISD::URHADD) MAKE_CASE(AArch64ISD::SHADD) @@ -9441,9 +9443,13 @@ SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const { + SDValue VecOp = Op.getOperand(0); + SDLoc dl(Op); switch (Op.getOpcode()) { case ISD::VECREDUCE_ADD: + if (useSVEForFixedLengthVectorVT(VecOp.getValueType())) + return LowerFixedLengthUADDVToSVE(AArch64ISD::UADDV, Op, DAG); return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); case ISD::VECREDUCE_SMAX: return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); @@ -12046,6 +12052,28 @@ DAG.getConstant(0, dl, MVT::i64)); } +static SDValue LowerSVEUADDVReduction(SDNode *N, unsigned Opc, + SelectionDAG &DAG) { + SDLoc dl(N); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + + EVT VT = N->getValueType(0); + SDValue Pred = N->getOperand(1); + SDValue Data = N->getOperand(2); + EVT DataVT = Data.getValueType(); + + if (DataVT.getVectorElementType().isScalarInteger() && + (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)) { + if (!TLI.isTypeLegal(DataVT)) + return SDValue(); + + SDValue Result = DAG.getNode(Opc, dl, MVT::i64, Pred, Data); + return Result; + } + + return SDValue(); +} + static SDValue LowerSVEIntReduction(SDNode *N, unsigned Opc, SelectionDAG &DAG) { SDLoc dl(N); @@ -12330,6 +12358,8 @@ case Intrinsic::aarch64_crc32h: case Intrinsic::aarch64_crc32ch: return tryCombineCRC32(0xffff, N, DAG); + case Intrinsic::aarch64_sve_uaddv: + return LowerSVEUADDVReduction(N, AArch64ISD::UADDV_PRED, DAG); case Intrinsic::aarch64_sve_smaxv: return LowerSVEIntReduction(N, AArch64ISD::SMAXV_PRED, DAG); case Intrinsic::aarch64_sve_umaxv: @@ -15690,6 +15720,21 @@ return convertFromScalableVector(DAG, VT, ScalableRes); } +SDValue AArch64TargetLowering::LowerFixedLengthUADDVToSVE(unsigned Op, + SDValue ScalarOp, SelectionDAG &DAG) const { + SDLoc DL(ScalarOp); + SDValue VecOp = ScalarOp.getOperand(0); + EVT VT = VecOp.getValueType(); + + SDValue Pg = getPredicateForVector(DAG, DL, VT); + EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); + VecOp = convertToScalableVector(DAG, ContainerVT, VecOp); + + SDValue Rdx = DAG.getNode(AArch64ISD::UADDV_PRED, DL, MVT::i64, Pg, VecOp); + SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, ScalarOp.getValueType(), Rdx); + return Res; +} + SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE( SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -152,6 +152,7 @@ def AArch64fmaxnmv_p : SDNode<"AArch64ISD::FMAXNMV_PRED", SDT_AArch64Reduce>; def AArch64fminv_p : SDNode<"AArch64ISD::FMINV_PRED", SDT_AArch64Reduce>; def AArch64fminnmv_p : SDNode<"AArch64ISD::FMINNMV_PRED", SDT_AArch64Reduce>; +def AArch64uaddv_p : SDNode<"AArch64ISD::UADDV_PRED", SDT_AArch64Reduce>; def AArch64smaxv_p : SDNode<"AArch64ISD::SMAXV_PRED", SDT_AArch64Reduce>; def AArch64umaxv_p : SDNode<"AArch64ISD::UMAXV_PRED", SDT_AArch64Reduce>; def AArch64sminv_p : SDNode<"AArch64ISD::SMINV_PRED", SDT_AArch64Reduce>; @@ -293,7 +294,7 @@ // SVE predicated integer reductions. defm SADDV_VPZ : sve_int_reduce_0_saddv<0b000, "saddv", int_aarch64_sve_saddv>; - defm UADDV_VPZ : sve_int_reduce_0_uaddv<0b001, "uaddv", int_aarch64_sve_uaddv, int_aarch64_sve_saddv>; + defm UADDV_VPZ : sve_int_reduce_0_uaddv<0b001, "uaddv", AArch64uaddv_p, int_aarch64_sve_saddv>; defm SMAXV_VPZ : sve_int_reduce_1<0b000, "smaxv", AArch64smaxv_p>; defm UMAXV_VPZ : sve_int_reduce_1<0b001, "umaxv", AArch64umaxv_p>; defm SMINV_VPZ : sve_int_reduce_1<0b010, "sminv", AArch64sminv_p>; Index: llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll @@ -0,0 +1,104 @@ +; RUN: llc -aarch64-sve-vector-bits-min=128 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE +; RUN: llc -aarch64-sve-vector-bits-min=256 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_EQ_256 +; RUN: llc -aarch64-sve-vector-bits-min=384 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK +; RUN: llc -aarch64-sve-vector-bits-min=512 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=640 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=768 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=896 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_GE_512 +; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024 +; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048 + +target triple = "aarch64-unknown-linux-gnu" + +; Don't use SVE when its registers are no bigger than NEON. +; NO_SVE-NOT: ptrue + +; +; UADDV +; + +; Don't use SVE for 64-bit vectors. +define i8 @uaddv_v8i8(<8 x i8> %a) #0 { +; CHECK-LABEL: uaddv_v8i8: +; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.8b +; CHECK: ret + %res = call i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8> %a) + ret i8 %res +} + +; Don't use SVE for 128-bit vectors. +define i8 @uaddv_v16i8(<16 x i8> %a) #0 { +; CHECK-LABEL: uaddv_v16i8: +; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b +; CHECK: ret + %res = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %a) + ret i8 %res +} + +define i8 @uaddv_v32i8(<32 x i8>* %a) #0 { +; CHECK-LABEL: uaddv_v32i8: +; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]] +; VBITS_GE_256-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b +; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]] +; VBITS_GE_256-NEXT: ret + %op = load <32 x i8>, <32 x i8>* %a + %res = call i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8> %op) + ret i8 %res +} + +define i8 @uaddv_v64i8(<64 x i8>* %a) #0 { +; CHECK-LABEL: uaddv_v64i8: +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,64)]] +; VBITS_GE_512-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b +; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]] +; VBITS_GE_512-NEXT: ret + %op = load <64 x i8>, <64 x i8>* %a + %res = call i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8> %op) + ret i8 %res +} + +define i8 @uaddv_v128i8(<128 x i8>* %a) #0 { +; CHECK-LABEL: uaddv_v128i8: +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,128)]] +; VBITS_GE_1024-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b +; VBITS_GE_1024-NEXT: fmov x0, [[REDUCE]] +; VBITS_GE_1024-NEXT: ret + %op = load <128 x i8>, <128 x i8>* %a + %res = call i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8> %op) + ret i8 %res +} + +define i8 @uaddv_v256i8(<256 x i8>* %a) #0 { +; CHECK-LABEL: uaddv_v256i8: +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]] +; VBITS_GE_2048-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b +; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]] +; VBITS_GE_2048-NEXT: ret + %op = load <256 x i8>, <256 x i8>* %a + %res = call i8 @llvm.experimental.vector.reduce.add.v256i8(<256 x i8> %op) + ret i8 %res +} + +attributes #0 = { "target-features"="+sve" } + +declare i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8>) +declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>) +declare i8 @llvm.experimental.vector.reduce.add.v32i8(<32 x i8>) +declare i8 @llvm.experimental.vector.reduce.add.v64i8(<64 x i8>) +declare i8 @llvm.experimental.vector.reduce.add.v128i8(<128 x i8>) +declare i8 @llvm.experimental.vector.reduce.add.v256i8(<256 x i8>) +declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>) +declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) +declare i8 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>) +