diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4636,6 +4636,14 @@ assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && "This function should not be visited twice for the same VF"); + // This avoids any chances of creating a REPLICATE recipe during planning + // since that would result in generation of scalarized code during execution, + // which is not supported for scalable vectors. + if (VF.isScalable()) { + Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); + return; + } + SmallSetVector Worklist; // These sets are used to seed the analysis with pointers used by memory diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll @@ -0,0 +1,120 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=aarch64 -loop-vectorize -S | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; The test checks that scalarized code is not generated for SVE. +; It creates a scenario where the gep instruction is used outside +; the loop, preventing it (and consequently the loop induction +; update variable) from being classified as 'uniform'. + +define void @test_no_scalarization(i64* %a) #0 { +; CHECK-LABEL: @test_no_scalarization( +; CHECK-NEXT: L.entry: +; CHECK-NEXT: [[IDX:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store i32 100, i32* [[IDX]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[IDX]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], -1 +; CHECK-NEXT: [[SMIN:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP1]], i32 1) +; CHECK-NEXT: [[TMP2:%.*]] = sub i32 [[TMP0]], [[SMIN]] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP2]], [[TMP4]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], [[TMP6]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i32 [[TMP0]], i32 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.experimental.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP8:%.*]] = add [[TMP7]], zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], shufflevector ( insertelement ( poison, i32 -1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[INDUCTION:%.*]] = add [[DOTSPLAT]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[TMP10]], 2 +; CHECK-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]] +; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP12]], i32 0 +; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT2]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, i64* [[A:%.*]], [[VEC_IND]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i64, i64* [[A]], [[STEP_ADD]] +; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[TMP13]], i32 0 +; CHECK-NEXT: [[TMP16:%.*]] = bitcast i64* [[TMP15]] to double* +; CHECK-NEXT: [[TMP17:%.*]] = extractelement [[TMP14]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = bitcast i64* [[TMP17]] to double* +; CHECK-NEXT: [[TMP19:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP20:%.*]] = mul i32 [[TMP19]], 2 +; CHECK-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP20]] +; CHECK-NEXT: [[TMP22:%.*]] = sub i32 1, [[TMP20]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, double* [[TMP16]], i32 [[TMP21]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr double, double* [[TMP23]], i32 [[TMP22]] +; CHECK-NEXT: [[TMP25:%.*]] = bitcast double* [[TMP24]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP25]], align 8 +; CHECK-NEXT: [[REVERSE:%.*]] = call @llvm.experimental.vector.reverse.nxv2f64( [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP27:%.*]] = mul i32 [[TMP26]], 2 +; CHECK-NEXT: [[TMP28:%.*]] = mul i32 -1, [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = sub i32 1, [[TMP27]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr double, double* [[TMP16]], i32 [[TMP28]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr double, double* [[TMP30]], i32 [[TMP29]] +; CHECK-NEXT: [[TMP32:%.*]] = bitcast double* [[TMP31]] to * +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load , * [[TMP32]], align 8 +; CHECK-NEXT: [[REVERSE5:%.*]] = call @llvm.experimental.vector.reverse.nxv2f64( [[WIDE_LOAD4]]) +; CHECK-NEXT: [[TMP33:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP34:%.*]] = mul i32 [[TMP33]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP34]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT2]] +; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP2]], [[N_VEC]] +; CHECK-NEXT: [[TMP36:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP37:%.*]] = mul i32 [[TMP36]], 2 +; CHECK-NEXT: [[TMP38:%.*]] = sub i32 [[TMP37]], 1 +; CHECK-NEXT: [[TMP39:%.*]] = extractelement [[TMP14]], i32 [[TMP38]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[L_LB19_337:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[TMP0]], [[L_ENTRY:%.*]] ] +; CHECK-NEXT: br label [[L_LB19_336:%.*]] +; CHECK: L.LB19_336: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[INDVARS_IV_NEXT:%.*]], [[L_LB19_336]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i32 [[INDVARS_IV]], -1 +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i64, i64* [[A]], i32 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP41:%.*]] = bitcast i64* [[TMP40]] to double* +; CHECK-NEXT: [[TMP42:%.*]] = load double, double* [[TMP41]], align 8 +; CHECK-NEXT: [[TMP43:%.*]] = icmp sgt i32 [[INDVARS_IV_NEXT]], 1 +; CHECK-NEXT: br i1 [[TMP43]], label [[L_LB19_336]], label [[L_LB19_337]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK: L.LB19_337: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i64* [ [[TMP40]], [[L_LB19_336]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: store i64 1, i64* [[DOTLCSSA]], align 8 +; CHECK-NEXT: ret void +; +L.entry: + %idx = alloca i32, align 4 + store i32 100, i32* %idx, align 4 + %0 = load i32, i32* %idx + br label %L.LB19_336 + +L.LB19_336: ; preds = %L.LB19_336, %L.entry + %indvars.iv = phi i32 [ %indvars.iv.next, %L.LB19_336 ], [ %0, %L.entry ] + %indvars.iv.next = add nsw i32 %indvars.iv, -1 + %1 = getelementptr i64, i64* %a, i32 %indvars.iv + %2 = bitcast i64* %1 to double* + %3 = load double, double* %2, align 8 + %4 = icmp sgt i32 %indvars.iv.next, 1 + br i1 %4, label %L.LB19_336, label %L.LB19_337 + +L.LB19_337: ; preds = %L.LB19_336 + store i64 1, i64* %1, align 8 + ret void +} + +attributes #0 = { nofree norecurse noreturn nosync nounwind "target-features"="+neon,+v8a,+sve" } +