diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4003,16 +4003,14 @@ break; } } else if (auto *SI = dyn_cast(I)) { - auto Elements0 = cast(SI->getOperand(0)->getType()) - ->getNumElements(); + auto Elements0 = + cast(SI->getOperand(0)->getType())->getElementCount(); auto *O0 = B.CreateZExtOrTrunc( - SI->getOperand(0), - FixedVectorType::get(ScalarTruncatedTy, Elements0)); - auto Elements1 = cast(SI->getOperand(1)->getType()) - ->getNumElements(); + SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); + auto Elements1 = + cast(SI->getOperand(1)->getType())->getElementCount(); auto *O1 = B.CreateZExtOrTrunc( - SI->getOperand(1), - FixedVectorType::get(ScalarTruncatedTy, Elements1)); + SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); } else if (isa(I) || isa(I)) { diff --git a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll --- a/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll @@ -62,6 +62,55 @@ ret void } +define void @trunc_minimal_bitwidths_shufflevector (i8* %p, i32 %arg1, i64 %len) { +; CHECK-LABEL: @trunc_minimal_bitwidths_shufflevector( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[LEN:%.*]], [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[ARG1:%.*]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 1 +; CHECK-NEXT: [[TMP6:%.*]] = trunc [[BROADCAST_SPLAT]] to +; CHECK-NEXT: [[TMP7:%.*]] = xor [[WIDE_LOAD]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = mul [[TMP7]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP4]] to * +; CHECK-NEXT: store [[TMP8]], * [[TMP9]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +entry: + br label %for.body + +for.body: ; preds = %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i8, i8* %p, i64 %indvars.iv + %0 = load i8, i8* %arrayidx + %conv = zext i8 %0 to i32 + %conv17 = xor i32 %conv, %arg1 + %mul18 = mul nuw nsw i32 %conv17, %conv + %conv19 = trunc i32 %mul18 to i8 + store i8 %conv19, i8* %arrayidx + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, %len + br i1 %exitcond, label %for.exit, label %for.body, !llvm.loop !0 + +for.exit: ; preds = %for.body + ret void +} !0 = !{!0, !1, !2} !1 = !{!"llvm.loop.vectorize.width", i32 4} !2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}