Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3951,9 +3951,8 @@ Type *OriginalTy = I->getType(); Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), KV.second); - auto *TruncatedTy = FixedVectorType::get( - ScalarTruncatedTy, - cast(OriginalTy)->getNumElements()); + auto *TruncatedTy = VectorType::get( + ScalarTruncatedTy, cast(OriginalTy)->getElementCount()); if (TruncatedTy == OriginalTy) continue; Index: llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/scalable-trunc-min-bitwidth.ll @@ -0,0 +1,75 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -scalable-vectorization=on -force-target-supports-scalable-vectors -S | FileCheck %s + +define void @trunc_minimal_bitwidth(i8* %bptr, i16* %hptr, i32 %sptr, i64 %dptr) { +; CHECK-LABEL: @trunc_minimal_bitwidth( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[HPTR1:%.*]] = bitcast i16* [[HPTR:%.*]] to i8* +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[DPTR:%.*]], 2 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i16, i16* [[HPTR]], i64 [[DPTR]] +; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i16* [[SCEVGEP]] to i8* +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, i8* [[BPTR:%.*]], i64 1 +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[HPTR1]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[BPTR]], [[SCEVGEP2]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[FOUND_CONFLICT]], true +; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[DPTR]], 2 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[DPTR]], [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <2 x i32> poison, i32 [[SPTR:%.*]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT4]], <2 x i32> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* [[BPTR]], align 1, !alias.scope !0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i8> poison, i8 [[TMP1]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i8> [[BROADCAST_SPLATINSERT]], <2 x i8> poison, <2 x i32> zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = zext <2 x i8> [[BROADCAST_SPLAT]] to <2 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = trunc <2 x i32> [[BROADCAST_SPLAT5]] to <2 x i16> +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i16, i16* [[HPTR]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i16* [[TMP5]] to <2 x i16>* +; CHECK-NEXT: store <2 x i16> [[TMP3]], <2 x i16>* [[TMP6]], align 2, !alias.scope !3, !noalias !0 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[DPTR]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = load i8, i8* [[BPTR]], align 1 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP8]] to i32 +; CHECK-NEXT: [[CONV21:%.*]] = trunc i32 [[SPTR]] to i16 +; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[HPTR]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i16 [[CONV21]], i16* [[ARRAYIDX23]], align 2 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[DPTR]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: for.exit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %0 = load i8, i8* %bptr, align 1 + %conv = zext i8 %0 to i32 + %conv21 = trunc i32 %sptr to i16 + %arrayidx23 = getelementptr inbounds i16, i16* %hptr, i64 %indvars.iv + store i16 %conv21, i16* %arrayidx23, align 2 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %dptr + br i1 %exitcond.not, label %for.exit, label %for.body + +for.exit: + ret void +}