diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3995,16 +3995,14 @@ break; } } else if (auto *SI = dyn_cast(I)) { - auto Elements0 = cast(SI->getOperand(0)->getType()) - ->getNumElements(); + auto Elements0 = + cast(SI->getOperand(0)->getType())->getElementCount(); auto *O0 = B.CreateZExtOrTrunc( - SI->getOperand(0), - FixedVectorType::get(ScalarTruncatedTy, Elements0)); - auto Elements1 = cast(SI->getOperand(1)->getType()) - ->getNumElements(); + SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); + auto Elements1 = + cast(SI->getOperand(1)->getType())->getElementCount(); auto *O1 = B.CreateZExtOrTrunc( - SI->getOperand(1), - FixedVectorType::get(ScalarTruncatedTy, Elements1)); + SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); } else if (isa(I) || isa(I)) { diff --git a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll @@ -0,0 +1,71 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S < %s -loop-vectorize -instsimplify -scalable-vectorization=on -force-target-supports-scalable-vectors | FileCheck %s + +define void @trunc_minimal_bitwidths_shufflevector (i8* %p, i8 %arg1, i64 %len) { +; CHECK-LABEL: @trunc_minimal_bitwidths_shufflevector +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CONV13:%.*]] = zext i8 [[ARG1:%.*]] to i32 +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[LEN:%.*]], [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[CONV13]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = trunc [[BROADCAST_SPLATINSERT]] to +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[TMP4]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to * +; CHECK-NEXT: [[TMP7:%.*]] = load , * [[TMP6]], align 1 +; CHECK-NEXT: [[TMP8:%.*]] = shl [[TMP7]], trunc ( shufflevector ( insertelement ( poison, i32 4, i32 0), poison, zeroinitializer) to ) +; CHECK-NEXT: [[TMP9:%.*]] = add [[TMP8]], trunc ( shufflevector ( insertelement ( poison, i32 32, i32 0), poison, zeroinitializer) to ) +; CHECK-NEXT: [[TMP10:%.*]] = or [[TMP7]], trunc ( shufflevector ( insertelement ( poison, i32 51, i32 0), poison, zeroinitializer) to ) +; CHECK-NEXT: [[TMP11:%.*]] = mul [[TMP10]], trunc ( shufflevector ( insertelement ( poison, i32 60, i32 0), poison, zeroinitializer) to ) +; CHECK-NEXT: [[TMP12:%.*]] = and [[TMP9]], [[TMP7]] +; CHECK-NEXT: [[TMP13:%.*]] = and [[TMP11]], trunc ( shufflevector ( insertelement ( poison, i32 252, i32 0), poison, zeroinitializer) to ) +; CHECK-NEXT: [[TMP14:%.*]] = xor [[TMP13]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP15:%.*]] = mul [[TMP14]], [[TMP12]] +; CHECK-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP5]] to * +; CHECK-NEXT: store [[TMP15]], * [[TMP16]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +entry: + %conv13 = zext i8 %arg1 to i32 + br label %for.body + +for.body: ; preds = %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i8, i8* %p, i64 %indvars.iv + %0 = load i8, i8* %arrayidx + %conv = zext i8 %0 to i32 + %add = shl i32 %conv, 4 + %conv2 = add nuw nsw i32 %add, 32 + %or = or i32 %conv, 51 + %mul = mul nuw nsw i32 %or, 60 + %and = and i32 %conv2, %conv + %mul.masked = and i32 %mul, 252 + %conv17 = xor i32 %mul.masked, %conv13 + %mul18 = mul nuw nsw i32 %conv17, %and + %conv19 = trunc i32 %mul18 to i8 + %arrayidx21 = getelementptr inbounds i8, i8* %p, i64 %indvars.iv + store i8 %conv19, i8* %arrayidx21 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, %len + br i1 %exitcond, label %for.exit, label %for.body, !llvm.loop !0 + +for.exit: ; preds = %for.body + ret void +} + +!0 = !{!0, !1, !2} +!1 = !{!"llvm.loop.vectorize.width", i32 4} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}