Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3951,9 +3951,8 @@ Type *OriginalTy = I->getType(); Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), KV.second); - auto *TruncatedTy = FixedVectorType::get( - ScalarTruncatedTy, - cast(OriginalTy)->getNumElements()); + auto *TruncatedTy = VectorType::get( + ScalarTruncatedTy, cast(OriginalTy)->getElementCount()); if (TruncatedTy == OriginalTy) continue; Index: llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -scalable-vectorization=preferred -force-target-supports-scalable-vectors -S | FileCheck %s + +define void @trunc_minimal_bitwidth(i8* %bptr, i16* %hptr, i32 %sptr, i64 %dptr) #0 { +; CHECK-LABEL: @trunc_minimal_bitwidth( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[HPTR1:%.*]] = bitcast i16* [[HPTR:%.*]] to i8* +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[HPTR]], i64 [[DPTR:%.*]] +; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i16* [[SCEVGEP]] to i8* +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, i8* [[BPTR:%.*]], i64 1 +; CHECK-NEXT: br label [[FOR_BODY_PRE:%.*]] +; CHECK: for.body.pre.loopexit: +; CHECK-NEXT: br label [[FOR_BODY_PRE]] +; CHECK: for.body.pre: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[DPTR]], [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[HPTR1]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[BPTR]], [[SCEVGEP2]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[FOUND_CONFLICT]], true +; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[DPTR]], [[TMP3]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[DPTR]], [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement poison, i32 [[SPTR:%.*]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector [[BROADCAST_SPLATINSERT4]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = load i8, i8* [[BPTR]], align 1, !alias.scope !0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[TMP5]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-NEXT: [[TMP7:%.*]] = trunc [[BROADCAST_SPLAT5]] to +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, i16* [[HPTR]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, i16* [[TMP8]], i32 0 +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[TMP9]] to * +; CHECK-NEXT: store [[TMP7]], * [[TMP10]], align 2, !alias.scope !3, !noalias !0 +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP11]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[DPTR]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_BODY_PRE_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PRE]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP14:%.*]] = load i8, i8* [[BPTR]], align 1 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP14]] to i32 +; CHECK-NEXT: [[CONV21:%.*]] = trunc i32 [[SPTR]] to i16 +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[HPTR]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[DPTR]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_BODY_PRE_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; +entry: + br label %for.body.pre + +for.body.pre: ; preds = %entry, %for.body.pre + br label %for.body + +for.body: ; preds = %for.body.pre, %for.body + %indvars.iv = phi i64 [ 0, %for.body.pre ], [ %indvars.iv.next, %for.body ] + %0 = load i8, i8* %bptr, align 1 + %conv = zext i8 %0 to i32 + %conv21 = trunc i32 %sptr to i16 + %arrayidx23 = getelementptr inbounds i16, i16* %hptr, i64 %indvars.iv + store i16 %conv21, i16* %arrayidx23, align 2 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %dptr + br i1 %exitcond.not, label %for.body.pre, label %for.body +}