diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4392,7 +4392,6 @@ // entire expression in the smaller type. if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { assert(!IsInLoopReductionPhi && "Unexpected truncated inloop reduction!"); - assert(!VF.isScalable() && "scalable vectors not yet supported."); Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); Builder.SetInsertPoint( LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); diff --git a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -force-target-supports-scalable-vectors=true -S | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" + +define i8 @reduction_add_trunc(i8* noalias nocapture %A) { +; CHECK-LABEL: @reduction_add_trunc( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( zeroinitializer, i32 255, i32 0), %vector.ph ], [ [[TMP34:%.*]], %vector.body ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, %vector.ph ], [ [[TMP36:%.*]], %vector.body ] +; CHECK: [[TMP14:%.*]] = and [[VEC_PHI]], shufflevector ( insertelement ( poison, i32 255, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP15:%.*]] = and [[VEC_PHI1]], shufflevector ( insertelement ( poison, i32 255, i32 0), poison, zeroinitializer) +; CHECK: [[WIDE_LOAD:%.*]] = load , * +; CHECK: [[WIDE_LOAD2:%.*]] = load , * +; CHECK-NEXT: [[TMP26:%.*]] = zext [[WIDE_LOAD]] to +; CHECK-NEXT: [[TMP27:%.*]] = zext [[WIDE_LOAD2]] to +; CHECK-NEXT: [[TMP28:%.*]] = add [[TMP14]], [[TMP26]] +; CHECK-NEXT: [[TMP29:%.*]] = add [[TMP15]], [[TMP27]] +; CHECK-NEXT: [[TMP30:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP31:%.*]] = mul i32 [[TMP30]], 16 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP31]] +; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i32 [[INDEX_NEXT]], {{%.*}} +; CHECK-NEXT: [[TMP33:%.*]] = trunc [[TMP28]] to +; CHECK-NEXT: [[TMP34]] = zext [[TMP33]] to +; CHECK-NEXT: [[TMP35:%.*]] = trunc [[TMP29]] to +; CHECK-NEXT: [[TMP36]] = zext [[TMP35]] to +; CHECK: middle.block: +; CHECK-NEXT: [[TMP37:%.*]] = trunc [[TMP34]] to +; CHECK-NEXT: [[TMP38:%.*]] = trunc [[TMP36]] to +; CHECK-NEXT: [[BIN_RDX:%.*]] = add [[TMP38]], [[TMP37]] +; CHECK-NEXT: [[TMP39:%.*]] = call i8 @llvm.vector.reduce.add.nxv8i8( [[BIN_RDX]]) +; CHECK-NEXT: [[TMP40:%.*]] = zext i8 [[TMP39]] to i32 +; +entry: + br label %loop + +loop: ; preds = %entry, %loop + %indvars.iv = phi i32 [ %indvars.iv.next, %loop ], [ 0, %entry ] + %sum.02p = phi i32 [ %l9, %loop ], [ 255, %entry ] + %sum.02 = and i32 %sum.02p, 255 + %l2 = getelementptr inbounds i8, i8* %A, i32 %indvars.iv + %l3 = load i8, i8* %l2, align 4 + %l3e = zext i8 %l3 to i32 + %l9 = add i32 %sum.02, %l3e + %indvars.iv.next = add i32 %indvars.iv, 1 + %exitcond = icmp eq i32 %indvars.iv.next, 256 + br i1 %exitcond, label %exit, label %loop, !llvm.loop !0 + +exit: ; preds = %loop + %sum.0.lcssa = phi i32 [ %l9, %loop ] + %ret = trunc i32 %sum.0.lcssa to i8 + ret i8 %ret +} + +!0 = distinct !{!0, !1, !2, !3, !4} +!1 = !{!"llvm.loop.vectorize.width", i32 8} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!3 = !{!"llvm.loop.interleave.count", i32 2} +!4 = !{!"llvm.loop.vectorize.enable", i1 true}