diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -161,6 +161,19 @@ bool skipScalarizationCost() const { return ScalarizationCost.isValid(); } }; +enum class ScalableVectorizationKind : int { + /// Not selected. + Unspecified = -1, + /// Disables vectorization with scalable vectors. + FixedWidthOnly = 0, + /// Vectorize loops using scalable vectors or fixed-width vectors, but favor + /// scalable vectors when the cost-model is inconclusive. + PreferScalable = 1, + /// Vectorize loops using scalable vectors or fixed-width vectors, but + /// favor fixed-width vectors when the cost is inconclusive. + PreferFixedWidth = 2, +}; + class TargetTransformInfo; typedef TargetTransformInfo TTI; @@ -1392,6 +1405,9 @@ /// to a stack reload. unsigned getGISelRematGlobalCost() const; + /// \return the preferred style of vectorization. + ScalableVectorizationKind getScalableVectorizationKind() const; + /// \returns True if the target supports scalable vectors. bool supportsScalableVectors() const; @@ -1761,6 +1777,7 @@ ReductionFlags) const = 0; virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0; virtual unsigned getGISelRematGlobalCost() const = 0; + virtual ScalableVectorizationKind getScalableVectorizationKind() const = 0; virtual bool supportsScalableVectors() const = 0; virtual bool hasActiveVectorLength(unsigned Opcode, Type *DataType, Align Alignment) const = 0; @@ -2358,6 +2375,10 @@ return Impl.getGISelRematGlobalCost(); } + ScalableVectorizationKind getScalableVectorizationKind() const override { + return Impl.getScalableVectorizationKind(); + } + bool supportsScalableVectors() const override { return Impl.supportsScalableVectors(); } diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -777,6 +777,10 @@ unsigned getGISelRematGlobalCost() const { return 1; } + ScalableVectorizationKind getScalableVectorizationKind() const { + return ScalableVectorizationKind::FixedWidthOnly; + } + bool supportsScalableVectors() const { return false; } bool hasActiveVectorLength(unsigned Opcode, Type *DataType, diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -29,6 +29,7 @@ #include "llvm/ADT/MapVector.h" #include "llvm/Analysis/LoopAccessAnalysis.h" #include "llvm/Analysis/OptimizationRemarkEmitter.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Support/TypeSize.h" #include "llvm/Transforms/Utils/LoopUtils.h" @@ -81,7 +82,7 @@ Hint Predicate; /// Says whether we should use fixed width or scalable vectorization. - Hint Scalable; + Hint ScalableVectorization; /// Return the loop metadata prefix. static StringRef Prefix() { return "llvm.loop."; } @@ -96,21 +97,8 @@ FK_Enabled = 1, ///< Forcing enabled. }; - enum ScalableForceKind { - /// Not selected. - SK_Unspecified = -1, - /// Disables vectorization with scalable vectors. - SK_FixedWidthOnly = 0, - /// Vectorize loops using scalable vectors or fixed-width vectors, but favor - /// scalable vectors when the cost-model is inconclusive. This is the - /// default when the scalable.enable hint is enabled through a pragma. - SK_PreferScalable = 1, - /// Vectorize loops using scalable vectors or fixed-width vectors, but - /// favor fixed-width vectors when the cost is inconclusive. - SK_PreferFixedWidth = 2, - }; - LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced, + ScalableVectorizationKind Kind, OptimizationRemarkEmitter &ORE); /// Mark the loop L as already vectorized by setting the width to 1. @@ -148,18 +136,22 @@ /// favor vectorization with scalable vectors over fixed-width vectors when /// the cost-model is inconclusive. bool isScalableVectorizationPreferred() const { - return Scalable.Value == SK_PreferScalable; + return (ScalableVectorizationKind)ScalableVectorization.Value == + ScalableVectorizationKind::PreferScalable; } /// \return true if scalable vectorization has been explicitly enabled. bool isScalableVectorizationExplicitlyEnabled() const { - return Scalable.Value == SK_PreferFixedWidth || - Scalable.Value == SK_PreferScalable; + return (ScalableVectorizationKind)ScalableVectorization.Value == + ScalableVectorizationKind::PreferFixedWidth || + (ScalableVectorizationKind)ScalableVectorization.Value == + ScalableVectorizationKind::PreferScalable; } /// \return true if scalable vectorization has been explicitly disabled. bool isScalableVectorizationDisabled() const { - return Scalable.Value == SK_FixedWidthOnly; + return (ScalableVectorizationKind)ScalableVectorization.Value == + ScalableVectorizationKind::FixedWidthOnly; } /// If hints are provided that force vectorization, use the AlwaysPrint diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -1068,6 +1068,11 @@ return TTIImpl->getGISelRematGlobalCost(); } +ScalableVectorizationKind +TargetTransformInfo::getScalableVectorizationKind() const { + return TTIImpl->getScalableVectorizationKind(); +} + bool TargetTransformInfo::supportsScalableVectors() const { return TTIImpl->supportsScalableVectors(); } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -307,6 +307,11 @@ return 2; } + ScalableVectorizationKind getScalableVectorizationKind() const { + return ST->hasSVE() ? ScalableVectorizationKind::PreferScalable + : ScalableVectorizationKind::FixedWidthOnly; + } + bool supportsScalableVectors() const { return ST->hasSVE(); } bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -56,19 +56,19 @@ "vectorize(enable) pragma")); // FIXME: When scalable vectorization is stable enough, change the default -// to SK_PreferFixedWidth. -static cl::opt ScalableVectorization( - "scalable-vectorization", cl::init(LoopVectorizeHints::SK_FixedWidthOnly), +// to ScalableVectorizationKind::PreferFixedWidth. +static cl::opt ForceScalableVectorization( + "scalable-vectorization", cl::init(ScalableVectorizationKind::Unspecified), cl::Hidden, cl::desc("Control whether the compiler can use scalable vectors to " "vectorize a loop"), cl::values( - clEnumValN(LoopVectorizeHints::SK_FixedWidthOnly, "off", + clEnumValN(ScalableVectorizationKind::FixedWidthOnly, "off", "Scalable vectorization is disabled."), - clEnumValN(LoopVectorizeHints::SK_PreferFixedWidth, "on", + clEnumValN(ScalableVectorizationKind::PreferFixedWidth, "on", "Scalable vectorization is available, but favor fixed-width " "vectorization when the cost is inconclusive."), - clEnumValN(LoopVectorizeHints::SK_PreferScalable, "preferred", + clEnumValN(ScalableVectorizationKind::PreferScalable, "preferred", "Scalable vectorization is available and favored when the " "cost is inconclusive."))); @@ -95,13 +95,16 @@ LoopVectorizeHints::LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced, + ScalableVectorizationKind Scalable, OptimizationRemarkEmitter &ORE) : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH), Interleave("interleave.count", InterleaveOnlyWhenForced, HK_INTERLEAVE), Force("vectorize.enable", FK_Undefined, HK_FORCE), IsVectorized("isvectorized", 0, HK_ISVECTORIZED), Predicate("vectorize.predicate.enable", FK_Undefined, HK_PREDICATE), - Scalable("vectorize.scalable.enable", SK_Unspecified, HK_SCALABLE), + ScalableVectorization("vectorize.scalable.enable", + (unsigned)ScalableVectorizationKind::Unspecified, + HK_SCALABLE), TheLoop(L), ORE(ORE) { // Populate values with existing loop metadata. getHintsFromMetadata(); @@ -110,15 +113,38 @@ if (VectorizerParams::isInterleaveForced()) Interleave.Value = VectorizerParams::VectorizationInterleave; - if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) - // If the width is set, but the metadata says nothing about the scalable - // property, then assume it concerns only a fixed-width UserVF. - // If width is not set, the flag takes precedence. - Scalable.Value = Width.Value ? SK_FixedWidthOnly : ScalableVectorization; - else if (ScalableVectorization == SK_FixedWidthOnly) - // If the flag is set to disable any use of scalable vectors, override the - // loop hint. - Scalable.Value = SK_FixedWidthOnly; + if ((ScalableVectorizationKind)ScalableVectorization.Value == + ScalableVectorizationKind::Unspecified) { + // If the metadata doesn't explicitly specify whether to enable scalable + // vectorization, then decide based on the following criteria (increasing + // level of priority): + // - Target default + // - Loop-vectorizer force option + // - Metadata width + ScalableVectorization.Value = (unsigned)Scalable; + + if (ForceScalableVectorization != ScalableVectorizationKind::Unspecified) + ScalableVectorization.Value = + (unsigned)ForceScalableVectorization.getValue(); + + if (Width.Value) + // If the width is set, but the metadata says nothing about the scalable + // property, then assume it concerns only a fixed-width UserVF. + // If width is not set, the flag takes precedence. + ScalableVectorization.Value = + (unsigned)ScalableVectorizationKind::FixedWidthOnly; + } + + // If the flag is set to disable any use of scalable vectors, override the + // loop hints. + if (ForceScalableVectorization == ScalableVectorizationKind::FixedWidthOnly) + ScalableVectorization.Value = + (unsigned)ScalableVectorizationKind::FixedWidthOnly; + + assert( + (ScalableVectorizationKind)ScalableVectorization.Value != + ScalableVectorizationKind::Unspecified && + "Decision on scalable vectorization should have been made at this point"); if (IsVectorized.Value != 1) // If the vectorization width and interleaving count are both 1 then @@ -274,7 +300,7 @@ unsigned Val = C->getZExtValue(); Hint *Hints[] = {&Width, &Interleave, &Force, - &IsVectorized, &Predicate, &Scalable}; + &IsVectorized, &Predicate, &ScalableVectorization}; for (auto H : Hints) { if (Name == H->Name) { if (H->validate(Val)) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1118,7 +1118,8 @@ OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); - LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); + LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, + ScalableVectorizationKind::FixedWidthOnly, *ORE); ORE->emit( createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) << "loop not vectorized: " << OREMsg); @@ -1128,7 +1129,8 @@ OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I) { LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); - LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); + LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, + ScalableVectorizationKind::FixedWidthOnly, *ORE); ORE->emit( createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) << Msg); @@ -2209,7 +2211,8 @@ static bool isExplicitVecOuterLoop(Loop *OuterLp, OptimizationRemarkEmitter *ORE) { assert(!OuterLp->isInnermost() && "This is not an outer loop"); - LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); + LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, + ScalableVectorizationKind::FixedWidthOnly, *ORE); // Only outer loops with an explicit vectorization hint are supported. // Unannotated outer loops are ignored. @@ -3621,7 +3624,8 @@ if (MDNode *LID = OrigLoop->getLoopID()) L->setLoopID(LID); - LoopVectorizeHints Hints(L, true, *ORE); + auto PreferredScalableVectorization = TTI->getScalableVectorizationKind(); + LoopVectorizeHints Hints(L, true, PreferredScalableVectorization, *ORE); Hints.setAlreadyVectorized(); #ifdef EXPENSIVE_CHECKS @@ -10293,7 +10297,9 @@ << L->getHeader()->getParent()->getName() << "\" from " << DebugLocStr << "\n"); - LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); + auto PreferredScalableVectorization = TTI->getScalableVectorizationKind(); + LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, + PreferredScalableVectorization, *ORE); LLVM_DEBUG( dbgs() << "LV: Loop hints:" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/eliminate-tail-predication.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -force-target-instruction-cost=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s 2>&1 | FileCheck %s +; RUN: opt -loop-vectorize -force-target-instruction-cost=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s 2>&1 | FileCheck %s ; This test currently fails when the LV calculates a maximums safe ; distance for scalable vectors, because the code to eliminate the tail is diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll @@ -1,5 +1,5 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -force-vector-width=4 -force-vector-interleave=1 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1 -; RUN: opt -loop-vectorize -scalable-vectorization=on -force-vector-width=4 -force-vector-interleave=2 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2 +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1 +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2 ; We vectorize this first order recurrence, with a set of insertelements for ; each unrolled part. Make sure these insertelements are generated in-order, diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-op-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-op-cost.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-op-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-op-cost.ll @@ -1,5 +1,5 @@ ; REQUIRES: asserts -; RUN: opt -loop-vectorize -scalable-vectorization=on -force-vector-interleave=1 -S -debug-only=loop-vectorize < %s 2>%t | FileCheck %s +; RUN: opt -loop-vectorize -force-vector-interleave=1 -S -debug-only=loop-vectorize < %s 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=CHECK-COST target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-alloca.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-alloca.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-alloca.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-alloca.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -loop-vectorize -mattr=+sve -mtriple aarch64-unknown-linux-gnu -force-vector-width=2 -scalable-vectorization=preferred -pass-remarks-analysis=loop-vectorize -pass-remarks-missed=loop-vectorize < %s 2>%t | FileCheck %s +; RUN: opt -S -loop-vectorize -mattr=+sve -mtriple aarch64-unknown-linux-gnu -force-vector-width=2 -pass-remarks-analysis=loop-vectorize -pass-remarks-missed=loop-vectorize < %s 2>%t | FileCheck %s ; RUN: FileCheck %s --check-prefix=CHECK-REMARKS < %t ; CHECK-REMARKS: UserVF ignored because of invalid costs. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-call.ll @@ -1,8 +1,8 @@ -; RUN: opt -S -loop-vectorize -force-vector-interleave=1 -instcombine -mattr=+sve -mtriple aarch64-unknown-linux-gnu -scalable-vectorization=on \ +; RUN: opt -S -loop-vectorize -force-vector-interleave=1 -instcombine -mattr=+sve -mtriple aarch64-unknown-linux-gnu \ ; RUN: -pass-remarks-missed=loop-vectorize < %s 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=CHECK-REMARKS ; RUN: opt -S -loop-vectorize -force-vector-interleave=1 -force-target-instruction-cost=1 -instcombine -mattr=+sve -mtriple aarch64-unknown-linux-gnu \ -; RUN: -scalable-vectorization=on -pass-remarks-missed=loop-vectorize < %s 2>%t | FileCheck %s +; RUN: -pass-remarks-missed=loop-vectorize < %s 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=CHECK-REMARKS define void @vec_load(i64 %N, double* nocapture %a, double* nocapture readonly %b) { diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-predicate-instruction.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -S | FileCheck %s -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s +; RUN: opt < %s -loop-vectorize -S | FileCheck %s +; RUN: opt < %s -loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reductions.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -loop-vectorize -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize -pass-remarks-missed=loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve,+bf16 -S -scalable-vectorization=on 2>%t | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -loop-vectorize -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize -pass-remarks-missed=loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve,+bf16 -S 2>%t | FileCheck %s -check-prefix=CHECK ; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK ; Reduction can be vectorized diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -1,8 +1,8 @@ -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=false -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-NOT-VECTORIZED -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=false -hints-allow-reordering=true -S 2>%t | FileCheck %s --check-prefix=CHECK-UNORDERED -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=true -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-ORDERED -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=true -hints-allow-reordering=true -S 2>%t | FileCheck %s --check-prefix=CHECK-UNORDERED -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mtriple aarch64-unknown-linux-gnu -mattr=+sve -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-ORDERED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=false -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-NOT-VECTORIZED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=false -hints-allow-reordering=true -S 2>%t | FileCheck %s --check-prefix=CHECK-UNORDERED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=true -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-ORDERED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-ordered-reductions=true -hints-allow-reordering=true -S 2>%t | FileCheck %s --check-prefix=CHECK-UNORDERED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -hints-allow-reordering=false -S 2>%t | FileCheck %s --check-prefix=CHECK-ORDERED define float @fadd_strict(float* noalias nocapture readonly %a, i64 %n) #0 { ; CHECK-ORDERED-LABEL: @fadd_strict diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vectorization-cost-tuning.ll @@ -1,21 +1,21 @@ ; REQUIRES: asserts -; RUN: opt -mtriple=aarch64 -mattr=+sve -scalable-vectorization=on \ +; RUN: opt -mtriple=aarch64 -mattr=+sve \ ; RUN: -force-target-instruction-cost=1 -loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE4 -; RUN: opt -mtriple=aarch64 -mattr=+sve -mcpu=generic -scalable-vectorization=on \ +; RUN: opt -mtriple=aarch64 -mattr=+sve -mcpu=generic \ ; RUN: -force-target-instruction-cost=1 -loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=GENERIC,VF-VSCALE4 -; RUN: opt -mtriple=aarch64 -mcpu=neoverse-v1 -scalable-vectorization=on \ +; RUN: opt -mtriple=aarch64 -mcpu=neoverse-v1 \ ; RUN: -force-target-instruction-cost=1 -loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=NEOVERSE-V1,VF-VSCALE4 -; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 -scalable-vectorization=on \ +; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 \ ; RUN: -force-target-instruction-cost=1 -loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ -; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-4 +; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE4 -; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 -scalable-vectorization=preferred \ +; RUN: opt -mtriple=aarch64 -mcpu=neoverse-n2 \ ; RUN: -force-target-instruction-cost=1 -loop-vectorize -S -debug-only=loop-vectorize < %s 2>&1 \ ; RUN: | FileCheck %s --check-prefixes=NEOVERSE-N2,VF-VSCALE4 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-vf-hint.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts -; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -loop-vectorize -S -scalable-vectorization=on < %s 2>&1 | FileCheck %s -; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -loop-vectorize -pass-remarks-analysis=loop-vectorize -debug-only=loop-vectorize -S -scalable-vectorization=on < %s 2>&1 | FileCheck --check-prefix=CHECK-DBG %s -; RUN: opt -mtriple=aarch64-none-linux-gnu -loop-vectorize -pass-remarks-analysis=loop-vectorize -debug-only=loop-vectorize -S -scalable-vectorization=on < %s 2>%t | FileCheck --check-prefix=CHECK-NO-SVE %s +; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -loop-vectorize -S < %s 2>&1 | FileCheck %s +; RUN: opt -mtriple=aarch64-none-linux-gnu -mattr=+sve -loop-vectorize -pass-remarks-analysis=loop-vectorize -debug-only=loop-vectorize -S < %s 2>&1 | FileCheck --check-prefix=CHECK-DBG %s +; RUN: opt -mtriple=aarch64-none-linux-gnu -loop-vectorize -pass-remarks-analysis=loop-vectorize -debug-only=loop-vectorize -S < %s 2>%t | FileCheck --check-prefix=CHECK-NO-SVE %s ; RUN: cat %t | FileCheck %s -check-prefix=CHECK-NO-SVE-REMARKS target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -scalable-vectorization=on < %s -S | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve < %s -S | FileCheck %s target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -o - | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -o - | FileCheck %s define void @cond_inv_load_i32i32i16(i32* noalias nocapture %a, i32* noalias nocapture readonly %cond, i16* noalias nocapture readonly %inv, i64 %n) #0 { ; CHECK-LABEL: @cond_inv_load_i32i32i16 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; REQUIRES: asserts -; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-minimum-VF=0 --debug-only=loop-vectorize -force-target-instruction-cost=1 -S -scalable-vectorization=preferred 2>%t | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-minimum-VF=0 --debug-only=loop-vectorize -force-target-instruction-cost=1 -S 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=DEBUG -; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-minimum-VF=8 --debug-only=loop-vectorize -S -scalable-vectorization=preferred 2>%t | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-minimum-VF=8 --debug-only=loop-vectorize -S 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=DEBUG -; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-force-VF=8 --debug-only=loop-vectorize -S -scalable-vectorization=preferred 2>%t | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-interleave=2 -epilogue-vectorization-force-VF=8 --debug-only=loop-vectorize -S 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=DEBUG-FORCED target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-extract-last-veclane.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-extract-last-veclane.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-extract-last-veclane.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-extract-last-veclane.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -scalable-vectorization=preferred -force-target-instruction-cost=1 -o - | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -force-target-instruction-cost=1 -o - | FileCheck %s define void @gather_nxv4i32_ind64(float* noalias nocapture readonly %a, i64* noalias nocapture readonly %b, float* noalias nocapture %c, i64 %n) #0 { ; CHECK-LABEL: @gather_nxv4i32_ind64 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-illegal-type.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-illegal-type.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-illegal-type.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-illegal-type.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -mattr=+sve -force-vector-width=4 -pass-remarks-analysis=loop-vectorize -S 2>%t | FileCheck %s +; RUN: opt < %s -loop-vectorize -mattr=+sve -force-vector-width=4 -pass-remarks-analysis=loop-vectorize -S 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARKS target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll @@ -1,5 +1,5 @@ ; REQUIRES: asserts -; RUN: opt -scalable-vectorization=on -loop-vectorize -S < %s -debug 2>%t | FileCheck %s +; RUN: opt -loop-vectorize -S < %s -debug 2>%t | FileCheck %s ; RUN: cat %t | FileCheck %s --check-prefix=DEBUG target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -force-target-instruction-cost=1 -dce -instcombine < %s -S | FileCheck %s +; RUN: opt -loop-vectorize -force-target-instruction-cost=1 -dce -instcombine < %s -S | FileCheck %s target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-loads.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-loads.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -loop-vectorize -scalable-vectorization=on -mattr=+sve -mtriple aarch64-linux-gnu < %s | FileCheck %s +; RUN: opt -S -loop-vectorize -mattr=+sve -mtriple aarch64-linux-gnu < %s | FileCheck %s define void @invariant_load(i64 %n, i32* noalias nocapture %a, i32* nocapture readonly %b) { ; CHECK-LABEL: @invariant_load diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -S < %s | FileCheck %s +; RUN: opt -loop-vectorize -S < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-large-strides.ll @@ -1,4 +1,4 @@ -; RUN: opt -mtriple aarch64-linux-gnu -mattr=+sve -loop-vectorize -scalable-vectorization=on -dce -instcombine -S <%s | FileCheck %s +; RUN: opt -mtriple aarch64-linux-gnu -mattr=+sve -loop-vectorize -dce -instcombine -S <%s | FileCheck %s define void @stride7_i32(i32* noalias nocapture %dst, i64 %n) #0 { ; CHECK-LABEL: @stride7_i32( diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-masked-loadstore.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -scalable-vectorization=on -o - | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -mattr=+sve -S %s -o - | FileCheck %s define void @mloadstore_f32(float* noalias nocapture %a, float* noalias nocapture readonly %b, i64 %n) { ; CHECK-LABEL: @mloadstore_f32 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll @@ -1,5 +1,5 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=preferred -force-vector-interleave=1 -force-vector-width=4 -S < %s | FileCheck %s --check-prefix=CHECK-VF4IC1 -; RUN: opt -loop-vectorize -scalable-vectorization=preferred -force-vector-interleave=4 -force-vector-width=4 -S < %s | FileCheck %s --check-prefix=CHECK-VF4IC4 +; RUN: opt -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S < %s | FileCheck %s --check-prefix=CHECK-VF4IC1 +; RUN: opt -loop-vectorize -force-vector-interleave=4 -force-vector-width=4 -S < %s | FileCheck %s --check-prefix=CHECK-VF4IC4 target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-strict-fadd-cost.ll @@ -1,10 +1,10 @@ ; REQUIRES: asserts ; RUN: opt < %s -loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -scalable-vectorization=on -force-vector-width=4 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF4 +; RUN: -force-vector-width=4 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF4 ; RUN: opt < %s -loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -scalable-vectorization=on -force-vector-width=8 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF8 +; RUN: -force-vector-width=8 -force-vector-interleave=1 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF8 ; RUN: opt < %s -loop-vectorize -debug -disable-output -force-ordered-reductions=true -hints-allow-reordering=false \ -; RUN: -scalable-vectorization=on -force-vector-width=4 -force-vector-interleave=1 -mcpu=neoverse-n2 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF4-CPU-NEOVERSE-N2 +; RUN: -force-vector-width=4 -force-vector-interleave=1 -mcpu=neoverse-n2 -S 2>&1 | FileCheck %s --check-prefix=CHECK-VF4-CPU-NEOVERSE-N2 target triple="aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -loop-vectorize -scalable-vectorization=preferred -prefer-predicate-over-epilogue=predicate-dont-vectorize < %s | FileCheck %s +; RUN: opt -S -loop-vectorize -prefer-predicate-over-epilogue=predicate-dont-vectorize < %s | FileCheck %s ; CHECK-NOT: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-type-conv.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-type-conv.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-type-conv.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-type-conv.ll @@ -1,4 +1,4 @@ -; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine < %s -S | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine < %s -S | FileCheck %s target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse-mask4.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse-mask4.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse-mask4.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse-mask4.ll @@ -10,7 +10,7 @@ ; The test checks if the mask is being correctly created, reverted and used -; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll @@ -5,7 +5,7 @@ ; for (int i = N-1; i >= 0; --i) ; a[i] = b[i] + 1.0; -; RUN: opt -loop-vectorize -scalable-vectorization=on -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s +; RUN: opt -loop-vectorize -dce -instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s define void @vector_reverse_f64(i64 %N, double* %a, double* %b) #0{ ; CHECK-LABEL: @vector_reverse_f64( diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -loop-vectorize -scalable-vectorization=on < %s | FileCheck %s +; RUN: opt -S -loop-vectorize < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; REQUIRES: asserts -; RUN: opt -loop-vectorize -scalable-vectorization=on -S -mtriple=aarch64 -mattr=+sve -debug-only=loop-vectorize < %s 2>&1 | FileCheck %s +; RUN: opt -loop-vectorize -S -mtriple=aarch64 -mattr=+sve -debug-only=loop-vectorize < %s 2>&1 | FileCheck %s target triple = "aarch64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll @@ -1,4 +1,4 @@ -; RUN: opt -mtriple aarch64-linux-gnu -mattr=+sve -loop-vectorize -scalable-vectorization=on -dce -instcombine -S < %s | FileCheck %s +; RUN: opt -mtriple aarch64-linux-gnu -mattr=+sve -loop-vectorize -dce -instcombine -S < %s | FileCheck %s ; Ensure that we can vectorize loops such as: ; int *ptr = c;