Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -566,6 +566,8 @@ /// element. virtual Value *getBroadcastInstrs(Value *V); + LoopVectorizationLegality *getLegal() const { return Legal; } + protected: friend class LoopVectorizationPlanner; @@ -7465,7 +7467,8 @@ Cost = getGatherScatterCost(&I, VF); setWideningDecision(&I, VF, CM_GatherScatter, Cost); } else { - assert((isa(&I) || !VF.isScalable()) && + assert((isa(&I) || !VF.isScalable() || + Legal->isUniformMemOp(I)) && "Cannot yet scalarize uniform stores"); Cost = getUniformMemOpCost(&I, VF); setWideningDecision(&I, VF, CM_Scalarize, Cost); @@ -9811,6 +9814,15 @@ return; } + Instruction *I = getUnderlyingInstr(); + if (!IsUniform && State.VF.isScalable() && isa(I) && + State.ILV->getLegal()->isUniformMemOp(*I)) { + VPLane Lane = VPLane::getLastLaneForVF(State.VF); + State.ILV->scalarizeInstruction( + I, this, *this, VPIteration(State.UF - 1, Lane), IsPredicated, State); + return; + } + // Generate scalar instances for all VF lanes of all UF parts, unless the // instruction is uniform inwhich case generate only the first lane for each // of the UF parts. @@ -9819,9 +9831,8 @@ "Can't scalarize a scalable vector"); for (unsigned Part = 0; Part < State.UF; ++Part) for (unsigned Lane = 0; Lane < EndLane; ++Lane) - State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, - VPIteration(Part, Lane), IsPredicated, - State); + State.ILV->scalarizeInstruction(I, this, *this, VPIteration(Part, Lane), + IsPredicated, State); } void VPBranchOnMaskRecipe::execute(VPTransformState &State) { Index: llvm/test/Transforms/LoopVectorize/AArch64/sve-uniform-store.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/AArch64/sve-uniform-store.ll @@ -0,0 +1,138 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-vectorize -scalable-vectorization=preferred -mtriple aarch64-linux-gnu -mattr=+sve -S < %s | FileCheck %s +target triple = "aarch64-unknown-linux-gnu" + +define void @uniform_store_i1(i1* noalias %dst, i64* noalias %start, i64 %N) { +; CHECK-LABEL: @uniform_store_i1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i64, i64* [[START:%.*]], i64 [[N_VEC]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64* [[START]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT10:%.*]] = insertelement poison, i64* [[START]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT11:%.*]] = shufflevector [[BROADCAST_SPLATINSERT10]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i32 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT]], [[TMP6]] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i64, i64* [[START]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i64, i64* [[START]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i64, i64* [[START]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 +; CHECK-NEXT: [[DOTSPLATINSERT4:%.*]] = insertelement poison, i64 [[TMP11]], i32 0 +; CHECK-NEXT: [[DOTSPLAT5:%.*]] = shufflevector [[DOTSPLATINSERT4]], poison, zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = add [[DOTSPLAT5]], [[TMP5]] +; CHECK-NEXT: [[TMP13:%.*]] = add [[DOTSPLAT]], [[TMP12]] +; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i64, i64* [[START]], [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP11]], 0 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], [[TMP14]] +; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i64, i64* [[START]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP11]], 1 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], [[TMP16]] +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i64, i64* [[START]], i64 [[TMP17]] +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP20]], 0 +; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], 1 +; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], [[TMP22]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, i64* [[NEXT_GEP2]], i32 0 +; CHECK-NEXT: [[TMP25:%.*]] = bitcast i64* [[TMP24]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP25]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP27:%.*]] = mul i32 [[TMP26]], 2 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, i64* [[NEXT_GEP2]], i32 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = bitcast i64* [[TMP28]] to * +; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load , * [[TMP29]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, [[NEXT_GEP]], i64 1 +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, [[NEXT_GEP6]], i64 1 +; CHECK-NEXT: [[TMP32:%.*]] = icmp eq [[TMP30]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP33:%.*]] = icmp eq [[TMP31]], [[BROADCAST_SPLAT11]] +; CHECK-NEXT: [[TMP34:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP35:%.*]] = mul i32 [[TMP34]], 2 +; CHECK-NEXT: [[TMP36:%.*]] = sub i32 [[TMP35]], 1 +; CHECK-NEXT: [[TMP37:%.*]] = extractelement [[TMP33]], i32 [[TMP36]] +; CHECK-NEXT: store i1 [[TMP37]], i1* [[DST:%.*]], align 1 +; CHECK-NEXT: [[TMP38:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP39:%.*]] = mul i64 [[TMP38]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP39]] +; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; +entry: + br label %for.body + +for.body: + %first.sroa = phi i64* [ %incdec.ptr, %for.body ], [ %start, %entry ] + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %iv.next = add i64 %iv, 1 + %0 = load i64, i64* %first.sroa + %incdec.ptr = getelementptr inbounds i64, i64* %first.sroa, i64 1 + %cmp.not = icmp eq i64* %incdec.ptr, %start + store i1 %cmp.not, i1* %dst + %cmp = icmp ult i64 %iv, %N + br i1 %cmp, label %for.body, label %end + +end: + ret void +} + +; Ensure conditional i1 stores do not vectorize +define void @cond_store_i1(i1* noalias %dst, i8* noalias %start, i32 %cond, i64 %N) { +; CHECK-LABEL: @cond_store_i1( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[FIRST_SROA:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ null, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[FIRST_SROA]], i64 1 +; CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1 +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP0]], 10 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[CMP_STORE:%.*]] = icmp eq i8* [[START:%.*]], [[INCDEC_PTR]] +; CHECK-NEXT: store i1 [[CMP_STORE]], i1* [[DST:%.*]], align 1 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i8* [[INCDEC_PTR]], [[START]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %first.sroa = phi i8* [ %incdec.ptr, %if.end ], [ null, %entry ] + %incdec.ptr = getelementptr inbounds i8, i8* %first.sroa, i64 1 + %0 = load i8, i8* %incdec.ptr + %tobool.not = icmp eq i8 %0, 10 + br i1 %tobool.not, label %if.end, label %if.then + +if.then: + %cmp.store = icmp eq i8* %start, %incdec.ptr + store i1 %cmp.store, i1* %dst + br label %if.end + +if.end: + %cmp.not = icmp eq i8* %incdec.ptr, %start + br i1 %cmp.not, label %for.end, label %for.body + +for.end: + ret void +}