diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -171,7 +171,8 @@ void addRuntimePointerChecks(unsigned Num) { NumRuntimePointerChecks = Num; } - bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints); + bool doesNotMeet(Function *F, Loop *L, const LoopVectorizeHints &Hints, + bool CanIgnoreRTThreshold); private: unsigned NumRuntimePointerChecks = 0; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -238,8 +238,9 @@ } } -bool LoopVectorizationRequirements::doesNotMeet( - Function *F, Loop *L, const LoopVectorizeHints &Hints) { +bool LoopVectorizationRequirements::doesNotMeet(Function *F, Loop *L, + const LoopVectorizeHints &Hints, + bool IgnoreRTThreshold) { const char *PassName = Hints.vectorizeAnalysisPassName(); bool Failed = false; if (UnsafeAlgebraInst && !Hints.allowReordering()) { @@ -258,8 +259,12 @@ NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; bool ThresholdReached = NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; - if ((ThresholdReached && !Hints.allowReordering()) || - PragmaThresholdReached) { + bool DoubleThresholdReached = + NumRuntimePointerChecks > + 2 * VectorizerParams::RuntimeMemoryCheckThreshold; + if ((!IgnoreRTThreshold && ((ThresholdReached && !Hints.allowReordering()) || + PragmaThresholdReached)) || + (DoubleThresholdReached && !Hints.allowReordering())) { ORE.emit([&]() { return OptimizationRemarkAnalysisAliasing(PassName, "CantReorderMemOps", L->getStartLoc(), diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -370,7 +370,9 @@ return None; } + struct GeneratedRTChecks; + namespace llvm { /// InnerLoopVectorizer vectorizes loops which contain only one basic /// block to a specified vectorization factor (VF). @@ -1302,13 +1304,6 @@ /// i.e. either vector version isn't available, or is too expensive. unsigned getVectorCallCost(CallInst *CI, unsigned VF, bool &NeedToScalarize); -private: - unsigned NumPredStores = 0; - - /// \return An upper bound for the vectorization factor, larger than zero. - /// One is returned if vectorization should best be avoided due to cost. - unsigned computeFeasibleMaxVF(unsigned ConstTripCount); - /// The vectorization cost is a combination of the cost itself and a boolean /// indicating whether any of the contributing operations will actually /// operate on @@ -1318,16 +1313,25 @@ /// actually taken place). using VectorizationCostTy = std::pair; + /// Returns the execution time cost of an instruction for a given vector + /// width. Vector width of one means scalar. + VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); + + float ScalarCost; + +private: + unsigned NumPredStores = 0; + + /// \return An upper bound for the vectorization factor, larger than zero. + /// One is returned if vectorization should best be avoided due to cost. + unsigned computeFeasibleMaxVF(unsigned ConstTripCount); + /// Returns the expected execution cost. The unit of the cost does /// not matter because we use the 'cost' units to compare different /// vector widths. The cost that is returned is *not* normalized by /// the factor width. VectorizationCostTy expectedCost(unsigned VF); - /// Returns the execution time cost of an instruction for a given vector - /// width. Vector width of one means scalar. - VectorizationCostTy getInstructionCost(Instruction *I, unsigned VF); - /// The cost-computation logic from getInstructionCost which provides /// the vector type as an output parameter. unsigned getInstructionCost(Instruction *I, unsigned VF, Type *&VectorTy); @@ -1562,6 +1566,13 @@ return Checks; } + unsigned getCost(LoopVectorizationCostModel &CM) { + unsigned RTCheckCost = 0; + for (Instruction &I : *TmpBlock) + RTCheckCost += CM.getInstructionCost(&I, 1).first; + return RTCheckCost; + } + ~GeneratedRTChecks() { if (!TmpBlock) return; @@ -5176,7 +5187,7 @@ VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(unsigned MaxVF) { float Cost = expectedCost(1).first; - const float ScalarCost = Cost; + ScalarCost = Cost; unsigned Width = 1; LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << (int)ScalarCost << ".\n"); @@ -7852,11 +7863,21 @@ GeneratedRTChecks Checks = GeneratedRTChecks::Create( L->getLoopPreheader(), *LVL.getLAI(), PSE, DT, LI); + bool CanIgnoreRTThreshold = false; + unsigned RTCost = Checks.getCost(CM); + if (ExpectedTC) { + // If the expected cost of the runtime checks is a small fraction of the + // expected cost of the scalar loop, we can be more aggressive with using + // runtime checks. + CanIgnoreRTThreshold = RTCost < (*ExpectedTC * CM.ScalarCost * 0.005); + LLVM_DEBUG(dbgs() << "LV: Cost of runtime check: " << RTCost << " " + << *ExpectedTC * CM.ScalarCost << "\n"); + } // Identify the diagnostic messages that should be produced. std::pair VecDiagMsg, IntDiagMsg; bool VectorizeLoop = true, InterleaveLoop = true; - if (Requirements.doesNotMeet(F, L, Hints)) { + if (Requirements.doesNotMeet(F, L, Hints, CanIgnoreRTThreshold)) { LLVM_DEBUG(dbgs() << "LV: Not vectorizing: loop did not meet vectorization " "requirements.\n"); Hints.emitRemarkWithHints(); diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-size-based-threshold.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-size-based-threshold.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/runtime-check-size-based-threshold.ll @@ -0,0 +1,159 @@ +; RUN: opt -loop-vectorize -S %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +%struct.snork = type <{ i32, i32, i16, [6 x i8], %struct.snork.0, i32, [4 x i8] }> +%struct.snork.0 = type { [4 x %struct.zot] } +%struct.zot = type { %struct.baz } +%struct.baz = type { %struct.pluto } +%struct.pluto = type { %struct.quux } +%struct.quux = type { %struct.widget } +%struct.widget = type { %struct.baz.1* } +%struct.baz.1 = type { i32 (...)**, %struct.zot.2 } +%struct.zot.2 = type { %struct.pluto.3 } +%struct.pluto.3 = type { %struct.bar } +%struct.bar = type { %struct.barney, %struct.blam.4 } +%struct.barney = type { %struct.blam } +%struct.blam = type { i8 } +%struct.blam.4 = type { i16*, i16*, i16* } +%struct.foo = type { i32, i16*, i32, i32 } +%struct.blam.5 = type { i32, i16*, i32, i32 } + +; The trip count in the loop in this function is too to warrant large runtime checks. +; CHECK-LABEL: define {{.*}} @test_tc_too_small +; CHECK-NOT: vector.memcheck +; CHECK-NOT: vector.body +define void @test_tc_too_small(%struct.snork* nocapture readonly %arg, %struct.foo* nocapture readonly byval(%struct.foo) align 8 %arg1, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg2, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg3) { +entry: + %tmp11 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 0 + %tmp12 = load i32, i32* %tmp11, align 8 + %tmp13 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 1 + %tmp14 = load i16*, i16** %tmp13, align 8 + %tmp17 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg2, i64 0, i32 1 + %tmp18 = load i16*, i16** %tmp17, align 8 + %tmp19 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 0 + %tmp20 = load i32, i32* %tmp19, align 8 + %tmp21 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 1 + %tmp22 = load i16*, i16** %tmp21, align 8 + %tmp23 = getelementptr inbounds %struct.snork, %struct.snork* %arg, i64 0, i32 1 + %tmp24 = load i32, i32* %tmp23, align 4 + %tmp26 = icmp sgt i32 %tmp24, 0 + %tmp39 = sext i32 %tmp12 to i64 + %tmp40 = shl nsw i64 %tmp39, 1 + %tmp41 = sext i32 %tmp20 to i64 + %tmp42 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp41 + br label %bb54 + +bb54: ; preds = %bb54, %bb37 + %tmp55 = phi i64 [ 0, %entry ], [ %tmp88, %bb54 ] + %tmp56 = getelementptr inbounds i16, i16* %tmp18, i64 %tmp55 + %tmp57 = load i16, i16* %tmp56, align 2 + %tmp58 = sext i16 %tmp57 to i32 + %tmp59 = getelementptr inbounds i16, i16* %tmp14, i64 %tmp55 + %tmp60 = load i16, i16* %tmp59, align 2 + %tmp61 = sext i16 %tmp60 to i32 + %tmp62 = mul nsw i32 %tmp61, 11 + %tmp63 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp39 + %tmp64 = load i16, i16* %tmp63, align 2 + %tmp65 = sext i16 %tmp64 to i32 + %tmp66 = mul nsw i32 %tmp65, -4 + %tmp67 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp40 + %tmp68 = load i16, i16* %tmp67, align 2 + %tmp69 = sext i16 %tmp68 to i32 + %tmp70 = add nsw i32 %tmp62, 4 + %tmp71 = add nsw i32 %tmp70, %tmp66 + %tmp72 = add nsw i32 %tmp71, %tmp69 + %tmp73 = lshr i32 %tmp72, 3 + %tmp74 = add nsw i32 %tmp73, %tmp58 + %tmp75 = lshr i32 %tmp74, 1 + %tmp76 = mul nsw i32 %tmp61, 5 + %tmp77 = shl nsw i32 %tmp65, 2 + %tmp78 = add nsw i32 %tmp76, 4 + %tmp79 = add nsw i32 %tmp78, %tmp77 + %tmp80 = sub nsw i32 %tmp79, %tmp69 + %tmp81 = lshr i32 %tmp80, 3 + %tmp82 = sub nsw i32 %tmp81, %tmp58 + %tmp83 = lshr i32 %tmp82, 1 + %tmp84 = trunc i32 %tmp75 to i16 + %tmp85 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp55 + store i16 %tmp84, i16* %tmp85, align 2 + %tmp86 = trunc i32 %tmp83 to i16 + %tmp87 = getelementptr inbounds i16, i16* %tmp42, i64 %tmp55 + store i16 %tmp86, i16* %tmp87, align 2 + %tmp88 = add nuw nsw i64 %tmp55, 1 + %tmp89 = icmp ult i64 %tmp55, 50 + br i1 %tmp89, label %bb54, label %bb90 + +bb90: ; preds = %bb54, %bb27, %bb + ret void +} + +; The trip count in the loop in this function high enough to warrant large runtime checks. +; CHECK-LABEL: define {{.*}} @test_tc_bug_enough +; CHECK: vector.memcheck +; CHECK: vector.body +define void @test_tc_big_enough(%struct.snork* nocapture readonly %arg, %struct.foo* nocapture readonly byval(%struct.foo) align 8 %arg1, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg2, %struct.blam.5* nocapture readonly byval(%struct.blam.5) align 8 %arg3) { +entry: + %tmp11 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 0 + %tmp12 = load i32, i32* %tmp11, align 8 + %tmp13 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg3, i64 0, i32 1 + %tmp14 = load i16*, i16** %tmp13, align 8 + %tmp17 = getelementptr inbounds %struct.blam.5, %struct.blam.5* %arg2, i64 0, i32 1 + %tmp18 = load i16*, i16** %tmp17, align 8 + %tmp19 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 0 + %tmp20 = load i32, i32* %tmp19, align 8 + %tmp21 = getelementptr inbounds %struct.foo, %struct.foo* %arg1, i64 0, i32 1 + %tmp22 = load i16*, i16** %tmp21, align 8 + %tmp23 = getelementptr inbounds %struct.snork, %struct.snork* %arg, i64 0, i32 1 + %tmp24 = load i32, i32* %tmp23, align 4 + %tmp26 = icmp sgt i32 %tmp24, 0 + %tmp39 = sext i32 %tmp12 to i64 + %tmp40 = shl nsw i64 %tmp39, 1 + %tmp41 = sext i32 %tmp20 to i64 + %tmp42 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp41 + br label %bb54 + +bb54: ; preds = %bb54, %bb37 + %tmp55 = phi i64 [ 0, %entry ], [ %tmp88, %bb54 ] + %tmp56 = getelementptr inbounds i16, i16* %tmp18, i64 %tmp55 + %tmp57 = load i16, i16* %tmp56, align 2 + %tmp58 = sext i16 %tmp57 to i32 + %tmp59 = getelementptr inbounds i16, i16* %tmp14, i64 %tmp55 + %tmp60 = load i16, i16* %tmp59, align 2 + %tmp61 = sext i16 %tmp60 to i32 + %tmp62 = mul nsw i32 %tmp61, 11 + %tmp63 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp39 + %tmp64 = load i16, i16* %tmp63, align 2 + %tmp65 = sext i16 %tmp64 to i32 + %tmp66 = mul nsw i32 %tmp65, -4 + %tmp67 = getelementptr inbounds i16, i16* %tmp59, i64 %tmp40 + %tmp68 = load i16, i16* %tmp67, align 2 + %tmp69 = sext i16 %tmp68 to i32 + %tmp70 = add nsw i32 %tmp62, 4 + %tmp71 = add nsw i32 %tmp70, %tmp66 + %tmp72 = add nsw i32 %tmp71, %tmp69 + %tmp73 = lshr i32 %tmp72, 3 + %tmp74 = add nsw i32 %tmp73, %tmp58 + %tmp75 = lshr i32 %tmp74, 1 + %tmp76 = mul nsw i32 %tmp61, 5 + %tmp77 = shl nsw i32 %tmp65, 2 + %tmp78 = add nsw i32 %tmp76, 4 + %tmp79 = add nsw i32 %tmp78, %tmp77 + %tmp80 = sub nsw i32 %tmp79, %tmp69 + %tmp81 = lshr i32 %tmp80, 3 + %tmp82 = sub nsw i32 %tmp81, %tmp58 + %tmp83 = lshr i32 %tmp82, 1 + %tmp84 = trunc i32 %tmp75 to i16 + %tmp85 = getelementptr inbounds i16, i16* %tmp22, i64 %tmp55 + store i16 %tmp84, i16* %tmp85, align 2 + %tmp86 = trunc i32 %tmp83 to i16 + %tmp87 = getelementptr inbounds i16, i16* %tmp42, i64 %tmp55 + store i16 %tmp86, i16* %tmp87, align 2 + %tmp88 = add nuw nsw i64 %tmp55, 1 + %tmp89 = icmp ult i64 %tmp55, 500 + br i1 %tmp89, label %bb54, label %bb90 + +bb90: ; preds = %bb54, %bb27, %bb + ret void +}