Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2695,8 +2695,11 @@ if (C->isZero()) return; + // FIXME: FoldTailByMasking no longer implies OptForSize, this should be + // checking CM_ScalarEpilogueNotAllowedOptSize assert(!Cost->foldTailByMasking() && "Cannot SCEV check stride or overflow when folding tail"); + // Create a new block containing the stride check. BB->setName("vector.scevcheck"); auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); @@ -2729,7 +2732,6 @@ if (!MemRuntimeCheck) return; - assert(!Cost->foldTailByMasking() && "Cannot check memory when folding tail"); // Create a new block containing the memory check. BB->setName("vector.memcheck"); auto *NewBB = BB->splitBasicBlock(BB->getTerminator(), "vector.ph"); Index: llvm/test/Transforms/LoopVectorize/X86/tail-folding-memcheck.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/X86/tail-folding-memcheck.ll @@ -0,0 +1,38 @@ +; RUN: opt < %s -loop-vectorize -mcpu=core-avx2 -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define dso_local void @_Z1GPiS_S_(i32* nocapture %A, i32* nocapture readonly %B, i32* nocapture readonly %C) local_unnamed_addr #0 { +; CHECK-LABEL: @_Z1GPiS_S_ +; CHECK: vector.memcheck: +; CHECK: vector.body: +; CHECK: @llvm.masked.load.v8i32.p0v8i32 +; CHECK: @llvm.masked.load.v8i32.p0v8i32 +; CHECK: @llvm.masked.store.v8i32.p0v8i32 +; CHECK: br i1 %12, label %middle.block, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv + %1 = load i32, i32* %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv + store i32 %add, i32* %arrayidx4, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, 430 + br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !6 +} + +attributes #0 = { nofree norecurse nounwind uwtable } + +!6 = distinct !{!6, !7, !8} +!7 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +!8 = !{!"llvm.loop.vectorize.enable", i1 true}