Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9065,6 +9065,17 @@ VPBB->appendRecipe(Recipe); return VPBB; } + + if (IsUniform && isa(I) && + !Legal->blockNeedsPredication(I->getParent()) && Range.Start.isVector()) { + assert(CM.foldTailByMasking() && + "Uniform load is unexpectedly marked as predicated!"); + LLVM_DEBUG(dbgs() << "LV: Replicating uniform load in predicated loop:" + << *I << "\n"); + VPBB->appendRecipe(Recipe); + return VPBB; + } + LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); assert(VPBB->getSuccessors().empty() && "VPBB has successors when handling predicated replication."); Index: llvm/test/Transforms/LoopVectorize/AArch64/tail-fold-uniform-memops.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/tail-fold-uniform-memops.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/tail-fold-uniform-memops.ll @@ -5,6 +5,46 @@ target triple = "aarch64-linux-gnu" +; The original loop had an unconditional uniform load. Let's make sure +; we don't artificially create new predicated blocks for the load. +define void @uniform_load(i32* noalias %dst, i32* noalias readonly %src, i64 %n) #0 { +; CHECK-LABEL: @uniform_load( +; CHECK: vector.body: +; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[IDX_NEXT:%.*]], %vector.body ] +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i64> poison, i64 [[IDX]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i64> [[TMP2]], +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[IDX]], 0 +; CHECK-NEXT: [[LOOP_PRED:%.*]] = icmp ule <4 x i64> [[INDUCTION]] +; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, i32* %src, align 4 +; CHECK-NOT: load i32, i32* %src, align 4 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[LOAD_VAL]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* %dst, i64 [[TMP3]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 +; CHECK-NEXT: [[STORE_PTR:%.*]] = bitcast i32* [[TMP7]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP5]], <4 x i32>* [[STORE_PTR]], i32 4, <4 x i1> [[LOOP_PRED]]) +; CHECK-NEXT: [[IDX_NEXT]] = add i64 [[IDX]], 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IDX_NEXT]], %n.vec +; CHECK-NEXT: br i1 [[CMP]], label %middle.block, label %vector.body + +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %val = load i32, i32* %src, align 4 + %arrayidx = getelementptr inbounds i32, i32* %dst, i64 %indvars.iv + store i32 %val, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + + ; The original loop had a conditional uniform load. In this case we actually ; do need to perform conditional loads and so we end up using a gather instead. ; However, we at least ensure the mask is the overlap of the loop predicate