diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -21,6 +21,7 @@ #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H +#include "llvm/Analysis/IVDescriptors.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PassManager.h" @@ -1308,6 +1309,10 @@ bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const; + /// \returns True if it is legal to vectorize the given reduction kind. + bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const; + /// \returns The new vector factor value if the target doesn't support \p /// SizeInBytes loads or has a better vector factor. unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, @@ -1643,6 +1648,8 @@ virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const = 0; + virtual bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const = 0; virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const = 0; @@ -2169,6 +2176,10 @@ return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment, AddrSpace); } + bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const override { + return Impl.isLegalToVectorizeReduction(RdxDesc, VF); + } unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const override { diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -689,6 +689,11 @@ return true; } + bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const { + return true; + } + unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const { diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -1033,6 +1033,11 @@ AddrSpace); } +bool TargetTransformInfo::isLegalToVectorizeReduction( + RecurrenceDescriptor RdxDesc, ElementCount VF) const { + return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF); +} + unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -186,12 +186,14 @@ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); - bool isLegalScalarTypeForSVEMaskedMemOp(Type *Ty) const { + bool isLegalElementTypeForSVE(Type *Ty) const { if (Ty->isPointerTy()) return true; - if (Ty->isBFloatTy() || Ty->isHalfTy() || - Ty->isFloatTy() || Ty->isDoubleTy()) + if (Ty->isBFloatTy() && ST->hasBF16()) + return true; + + if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) return true; if (Ty->isIntegerTy(8) || Ty->isIntegerTy(16) || @@ -205,7 +207,7 @@ if (isa(DataType) || !ST->hasSVE()) return false; - return isLegalScalarTypeForSVEMaskedMemOp(DataType->getScalarType()); + return isLegalElementTypeForSVE(DataType->getScalarType()); } bool isLegalMaskedLoad(Type *DataType, Align Alignment) { @@ -220,7 +222,7 @@ if (isa(DataType) || !ST->hasSVE()) return false; - return isLegalScalarTypeForSVEMaskedMemOp(DataType->getScalarType()); + return isLegalElementTypeForSVE(DataType->getScalarType()); } bool isLegalMaskedGather(Type *DataType, Align Alignment) const { @@ -266,6 +268,9 @@ bool supportsScalableVectors() const { return ST->hasSVE(); } + bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const; + int getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, bool IsPairwiseForm, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -1089,6 +1089,33 @@ return Considerable; } +bool AArch64TTIImpl::isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc, + ElementCount VF) const { + if (!VF.isScalable()) + return true; + + Type *Ty = RdxDesc.getRecurrenceType(); + if (Ty->isBFloatTy() || !isLegalElementTypeForSVE(Ty)) + return false; + + switch (RdxDesc.getRecurrenceKind()) { + case RecurKind::Add: + case RecurKind::FAdd: + case RecurKind::And: + case RecurKind::Or: + case RecurKind::Xor: + case RecurKind::SMin: + case RecurKind::SMax: + case RecurKind::UMin: + case RecurKind::UMax: + case RecurKind::FMin: + case RecurKind::FMax: + return true; + default: + return false; + } +} + int AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsPairwise, bool IsUnsigned, TTI::TargetCostKind CostKind) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1525,6 +1525,15 @@ (SI && isLegalMaskedScatter(Ty, Align)); } + /// Returns true if the target machine supports all of the reduction + /// variables found for the given VF. + bool canVectorizeReductions(ElementCount VF) { + return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { + RecurrenceDescriptor RdxDesc = Reduction.second; + return TTI.isLegalToVectorizeReduction(RdxDesc, VF); + })); + } + /// Returns true if \p I is an instruction that will be scalarized with /// predication. Such instructions include conditional stores and /// instructions that may divide by zero. @@ -4626,7 +4635,6 @@ RecurrenceDescriptor *RdxDesc, Value *StartV, VPValue *Def, VPTransformState &State) { - assert(!State.VF.isScalable() && "scalable vectors not yet supported."); PHINode *P = cast(PN); if (EnableVPlanNativePath) { // Currently we enter here in the VPlan-native path for non-induction @@ -5688,9 +5696,22 @@ // then a suitable VF is chosen. If UserVF is specified and there are // dependencies, check if it's legal. However, if a UserVF is specified and // there are no dependencies, then there's nothing to do. - if (UserVF.isNonZero() && !IgnoreScalableUserVF && - Legal->isSafeForAnyVectorWidth()) - return UserVF; + if (UserVF.isNonZero() && !IgnoreScalableUserVF) { + if (!canVectorizeReductions(UserVF)) { + reportVectorizationFailure( + "LV: Scalable vectorization not supported for the reduction " + "operations found in this loop. Using fixed-width " + "vectorization instead.", + "Scalable vectorization not supported for the reduction operations " + "found in this loop. Using fixed-width vectorization instead.", + "ScalableVFUnfeasible", ORE, TheLoop); + return computeFeasibleMaxVF( + ConstTripCount, ElementCount::getFixed(UserVF.getKnownMinValue())); + } + + if (Legal->isSafeForAnyVectorWidth()) + return UserVF; + } MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); unsigned SmallestType, WidestType; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll @@ -0,0 +1,400 @@ +; RUN: opt < %s -loop-vectorize -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize -pass-remarks-missed=loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S 2>%t | FileCheck %s -check-prefix=CHECK +; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK + +; Reduction can be vectorized + +; ADD + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define i32 @add(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @add +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[ADD1:.*]] = add %[[LOAD1]] +; CHECK: %[[ADD2:.*]] = add %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[ADD:.*]] = add %[[ADD2]], %[[ADD1]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.add.nxv8i32( %[[ADD]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %add = add nsw i32 %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: ; preds = %for.body, %entry + ret i32 %add +} + +; OR + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define i32 @or(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @or +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[OR1:.*]] = or %[[LOAD1]] +; CHECK: %[[OR2:.*]] = or %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[OR:.*]] = or %[[OR2]], %[[OR1]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.or.nxv8i32( %[[OR]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %or = or i32 %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: ; preds = %for.body, %entry + ret i32 %or +} + +; AND + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define i32 @and(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @and +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[AND1:.*]] = and %[[LOAD1]] +; CHECK: %[[AND2:.*]] = and %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[ABD:.*]] = and %[[ADD2]], %[[AND1]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.and.nxv8i32( %[[ADD]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %and = and i32 %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: ; preds = %for.body, %entry + ret i32 %and +} + +; XOR + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define i32 @xor(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @xor +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[XOR1:.*]] = xor %[[LOAD1]] +; CHECK: %[[XOR2:.*]] = xor %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[XOR:.*]] = xor %[[XOR2]], %[[XOR1]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.xor.nxv8i32( %[[XOR]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %xor = xor i32 %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: ; preds = %for.body, %entry + ret i32 %xor +} + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +; SMIN + +define i32 @smin(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @smin +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[ICMP1:.*]] = icmp slt %[[LOAD1]] +; CHECK: %[[ICMP2:.*]] = icmp slt %[[LOAD2]] +; CHECK: %[[SEL1:.*]] = select %[[ICMP1]], %[[LOAD1]] +; CHECK: %[[SEL2:.*]] = select %[[ICMP2]], %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[ICMP:.*]] = icmp slt %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: %[[SEL:.*]] = select %[[ICMP]], %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.smin.nxv8i32( %[[SEL]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %cmp.i = icmp slt i32 %0, %sum.010 + %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret i32 %.sroa.speculated +} + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +; UMAX + +define i32 @umax(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @umax +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[ICMP1:.*]] = icmp ugt %[[LOAD1]] +; CHECK: %[[ICMP2:.*]] = icmp ugt %[[LOAD2]] +; CHECK: %[[SEL1:.*]] = select %[[ICMP1]], %[[LOAD1]] +; CHECK: %[[SEL2:.*]] = select %[[ICMP2]], %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[ICMP:.*]] = icmp ugt %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: %[[SEL:.*]] = select %[[ICMP]], %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: call i32 @llvm.vector.reduce.umax.nxv8i32( %[[SEL]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %cmp.i = icmp ugt i32 %0, %sum.010 + %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret i32 %.sroa.speculated +} + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +; FADD (FAST) + +define float @fadd_fast(float* noalias nocapture readonly %a, i64 %n) { +; CHECK-LABEL: @fadd_fast +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[ADD1:.*]] = fadd fast %[[LOAD1]] +; CHECK: %[[ADD2:.*]] = fadd fast %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[ADD:.*]] = fadd fast %[[ADD2]], %[[ADD1]] +; CHECK-NEXT: call fast float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, %[[ADD]]) +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %iv + %0 = load float, float* %arrayidx, align 4 + %add = fadd fast float %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret float %add +} + +; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop. Using fixed-width vectorization instead. +; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2) +define bfloat @fadd_fast_bfloat(bfloat* noalias nocapture readonly %a, i64 %n) { +; CHECK-LABEL: @fadd_fast_bfloat +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load <8 x bfloat> +; CHECK: %[[LOAD2:.*]] = load <8 x bfloat> +; CHECK: %[[FADD1:.*]] = fadd fast <8 x bfloat> %[[LOAD1]] +; CHECK: %[[FADD2:.*]] = fadd fast <8 x bfloat> %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[RDX:.*]] = fadd fast <8 x bfloat> %[[FADD2]], %[[FADD1]] +; CHECK: call fast bfloat @llvm.vector.reduce.fadd.v8bf16(bfloat 0xR8000, <8 x bfloat> %[[RDX]]) +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds bfloat, bfloat* %a, i64 %iv + %0 = load bfloat, bfloat* %arrayidx, align 4 + %add = fadd fast bfloat %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret bfloat %add +} + +; FMIN (FAST) + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define float @fmin_fast(float* noalias nocapture readonly %a, i64 %n) #0 { +; CHECK-LABEL: @fmin_fast +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[FCMP1:.*]] = fcmp olt %[[LOAD1]] +; CHECK: %[[FCMP2:.*]] = fcmp olt %[[LOAD2]] +; CHECK: %[[SEL1:.*]] = select %[[FCMP1]], %[[LOAD1]] +; CHECK: %[[SEL2:.*]] = select %[[FCMP2]], %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[FCMP:.*]] = fcmp olt %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: %[[SEL:.*]] = select %[[FCMP]], %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: call float @llvm.vector.reduce.fmin.nxv8f32( %[[SEL]]) +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %iv + %0 = load float, float* %arrayidx, align 4 + %cmp.i = fcmp olt float %0, %sum.07 + %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret float %.sroa.speculated +} + +; FMAX (FAST) + +; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2) +define float @fmax_fast(float* noalias nocapture readonly %a, i64 %n) #0 { +; CHECK-LABEL: @fmax_fast +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load +; CHECK: %[[LOAD2:.*]] = load +; CHECK: %[[FCMP1:.*]] = fcmp fast ogt %[[LOAD1]] +; CHECK: %[[FCMP2:.*]] = fcmp fast ogt %[[LOAD2]] +; CHECK: %[[SEL1:.*]] = select %[[FCMP1]], %[[LOAD1]] +; CHECK: %[[SEL2:.*]] = select %[[FCMP2]], %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[FCMP:.*]] = fcmp fast ogt %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: %[[SEL:.*]] = select fast %[[FCMP]], %[[SEL1]], %[[SEL2]] +; CHECK-NEXT: call fast float @llvm.vector.reduce.fmax.nxv8f32( %[[SEL]]) +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %iv + %0 = load float, float* %arrayidx, align 4 + %cmp.i = fcmp fast ogt float %0, %sum.07 + %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret float %.sroa.speculated +} + +; Reduction cannot be vectorized + +; MUL + +; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop. Using fixed-width vectorization instead. +; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2) +define i32 @mul(i32* nocapture %a, i32* nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @mul +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load <8 x i32> +; CHECK: %[[LOAD2:.*]] = load <8 x i32> +; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD1]] +; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD2]] +; CHECK: middle.block: +; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]] +; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]]) +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %iv + %0 = load i32, i32* %arrayidx, align 4 + %mul = mul nsw i32 %0, %sum.07 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: ; preds = %for.body, %entry + ret i32 %mul +} + +; Note: This test was added to ensure we always check the legality of reductions (end emit a warning if necessary) before checking for memory dependencies +; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop. Using fixed-width vectorization instead. +; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2) +define i32 @memory_dependence(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i64 %n) { +; CHECK-LABEL: @memory_dependence +; CHECK: vector.body: +; CHECK: %[[LOAD1:.*]] = load <8 x i32> +; CHECK: %[[LOAD2:.*]] = load <8 x i32> +; CHECK: %[[LOAD3:.*]] = load <8 x i32> +; CHECK: %[[LOAD4:.*]] = load <8 x i32> +; CHECK: %[[ADD1:.*]] = add nsw <8 x i32> %[[LOAD3]], %[[LOAD1]] +; CHECK: %[[ADD2:.*]] = add nsw <8 x i32> %[[LOAD4]], %[[LOAD2]] +; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD3]] +; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD4]] +; CHECK: middle.block: +; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]] +; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]]) +entry: + br label %for.body + +for.body: + %i = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %i + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %b, i64 %i + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %add2 = add nuw nsw i64 %i, 32 + %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %add2 + store i32 %add, i32* %arrayidx3, align 4 + %mul = mul nsw i32 %1, %sum + %inc = add nuw nsw i64 %i, 1 + %exitcond.not = icmp eq i64 %inc, %n + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret i32 %mul +} + +attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" } + +!0 = distinct !{!0, !1, !2, !3, !4} +!1 = !{!"llvm.loop.vectorize.width", i32 8} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!3 = !{!"llvm.loop.interleave.count", i32 2} +!4 = !{!"llvm.loop.vectorize.enable", i1 true}