Index: llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -153,7 +153,8 @@ bool AArch64TTIImpl::shouldMaximizeVectorBandwidth( TargetTransformInfo::RegisterKind K) const { assert(K != TargetTransformInfo::RGK_Scalar); - return K == TargetTransformInfo::RGK_FixedWidthVector; + return (K == TargetTransformInfo::RGK_FixedWidthVector && + !ST->forceStreamingCompatibleSVE()); } /// Calculate the cost of materializing a 64-bit value. This helper Index: llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/AArch64/streaming-compatible-sve-no-maximize-bandwidth.ll @@ -0,0 +1,261 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=loop-vectorize -force-streaming-compatible-sve -mattr=+sve -force-target-instruction-cost=1 -scalable-vectorization=off -S 2>&1 | FileCheck %s --check-prefix=SC_SVE +; RUN: opt < %s -passes=loop-vectorize -mattr=+sve -force-target-instruction-cost=1 -scalable-vectorization=off -S 2>&1 | FileCheck %s --check-prefix=NO_SC_SVE + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@a = global [32 x i16] zeroinitializer, align 2 +@b = global [32 x i16] zeroinitializer, align 2 +@c = global [32 x i16] zeroinitializer, align 2 + +define i32 @foo(i32 noundef %n, i32 noundef %lag, i32 noundef %shift) vscale_range(1,16) { +; SC_SVE-LABEL: @foo( +; SC_SVE-NEXT: entry: +; SC_SVE-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[N:%.*]], 0 +; SC_SVE-NEXT: br i1 [[CMP17]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; SC_SVE: for.body.preheader: +; SC_SVE-NEXT: [[TMP0:%.*]] = sext i32 [[LAG:%.*]] to i64 +; SC_SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 +; SC_SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 8 +; SC_SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; SC_SVE: vector.ph: +; SC_SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8 +; SC_SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] +; SC_SVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[SHIFT:%.*]], i64 0 +; SC_SVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; SC_SVE-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <4 x i32> poison, i32 [[SHIFT]], i64 0 +; SC_SVE-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT8]], <4 x i32> poison, <4 x i32> zeroinitializer +; SC_SVE-NEXT: br label [[VECTOR_BODY:%.*]] +; SC_SVE: vector.body: +; SC_SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; SC_SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] +; SC_SVE-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP34:%.*]], [[VECTOR_BODY]] ] +; SC_SVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; SC_SVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], +; SC_SVE-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; SC_SVE-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 4 +; SC_SVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[TMP1]] +; SC_SVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[TMP2]] +; SC_SVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 0 +; SC_SVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP5]], align 2 +; SC_SVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 4 +; SC_SVE-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i16>, ptr [[TMP6]], align 2 +; SC_SVE-NEXT: [[TMP7:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i32> +; SC_SVE-NEXT: [[TMP8:%.*]] = sext <4 x i16> [[WIDE_LOAD3]] to <4 x i32> +; SC_SVE-NEXT: [[TMP9:%.*]] = ashr <4 x i32> [[TMP7]], [[VEC_IND]] +; SC_SVE-NEXT: [[TMP10:%.*]] = ashr <4 x i32> [[TMP8]], [[STEP_ADD]] +; SC_SVE-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP1]], [[TMP0]] +; SC_SVE-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP2]], [[TMP0]] +; SC_SVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP11]] +; SC_SVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP12]] +; SC_SVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds i16, ptr [[TMP13]], i32 0 +; SC_SVE-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i16>, ptr [[TMP15]], align 2 +; SC_SVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i16, ptr [[TMP13]], i32 4 +; SC_SVE-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i16>, ptr [[TMP16]], align 2 +; SC_SVE-NEXT: [[TMP17:%.*]] = sext <4 x i16> [[WIDE_LOAD4]] to <4 x i32> +; SC_SVE-NEXT: [[TMP18:%.*]] = sext <4 x i16> [[WIDE_LOAD5]] to <4 x i32> +; SC_SVE-NEXT: [[TMP19:%.*]] = shl <4 x i32> [[TMP17]], [[VEC_IND]] +; SC_SVE-NEXT: [[TMP20:%.*]] = shl <4 x i32> [[TMP18]], [[STEP_ADD]] +; SC_SVE-NEXT: [[TMP21:%.*]] = mul nsw <4 x i32> [[TMP19]], [[TMP9]] +; SC_SVE-NEXT: [[TMP22:%.*]] = mul nsw <4 x i32> [[TMP20]], [[TMP10]] +; SC_SVE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[TMP1]] +; SC_SVE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[TMP2]] +; SC_SVE-NEXT: [[TMP25:%.*]] = getelementptr inbounds i16, ptr [[TMP23]], i32 0 +; SC_SVE-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i16>, ptr [[TMP25]], align 2 +; SC_SVE-NEXT: [[TMP26:%.*]] = getelementptr inbounds i16, ptr [[TMP23]], i32 4 +; SC_SVE-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i16>, ptr [[TMP26]], align 2 +; SC_SVE-NEXT: [[TMP27:%.*]] = sext <4 x i16> [[WIDE_LOAD6]] to <4 x i32> +; SC_SVE-NEXT: [[TMP28:%.*]] = sext <4 x i16> [[WIDE_LOAD7]] to <4 x i32> +; SC_SVE-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[TMP21]], [[TMP27]] +; SC_SVE-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[TMP22]], [[TMP28]] +; SC_SVE-NEXT: [[TMP31:%.*]] = shl <4 x i32> [[TMP29]], [[BROADCAST_SPLAT]] +; SC_SVE-NEXT: [[TMP32:%.*]] = shl <4 x i32> [[TMP30]], [[BROADCAST_SPLAT9]] +; SC_SVE-NEXT: [[TMP33]] = add <4 x i32> [[TMP31]], [[VEC_PHI]] +; SC_SVE-NEXT: [[TMP34]] = add <4 x i32> [[TMP32]], [[VEC_PHI1]] +; SC_SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; SC_SVE-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[STEP_ADD]], +; SC_SVE-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; SC_SVE-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SC_SVE: middle.block: +; SC_SVE-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP34]], [[TMP33]] +; SC_SVE-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; SC_SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] +; SC_SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; SC_SVE: scalar.ph: +; SC_SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; SC_SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[TMP36]], [[MIDDLE_BLOCK]] ] +; SC_SVE-NEXT: br label [[FOR_BODY:%.*]] +; SC_SVE: for.cond.cleanup.loopexit: +; SC_SVE-NEXT: [[ADD9_LCSSA:%.*]] = phi i32 [ [[ADD9:%.*]], [[FOR_BODY]] ], [ [[TMP36]], [[MIDDLE_BLOCK]] ] +; SC_SVE-NEXT: br label [[FOR_COND_CLEANUP]] +; SC_SVE: for.cond.cleanup: +; SC_SVE-NEXT: [[RET_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD9_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; SC_SVE-NEXT: ret i32 [[RET_0_LCSSA]] +; SC_SVE: for.body: +; SC_SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; SC_SVE-NEXT: [[RET_018:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD9]], [[FOR_BODY]] ] +; SC_SVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[INDVARS_IV]] +; SC_SVE-NEXT: [[TMP37:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 +; SC_SVE-NEXT: [[CONV:%.*]] = sext i16 [[TMP37]] to i32 +; SC_SVE-NEXT: [[TMP38:%.*]] = trunc i64 [[INDVARS_IV]] to i32 +; SC_SVE-NEXT: [[SHR:%.*]] = ashr i32 [[CONV]], [[TMP38]] +; SC_SVE-NEXT: [[TMP39:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]] +; SC_SVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP39]] +; SC_SVE-NEXT: [[TMP40:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +; SC_SVE-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +; SC_SVE-NEXT: [[SHL:%.*]] = shl i32 [[CONV3]], [[TMP38]] +; SC_SVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[SHL]], [[SHR]] +; SC_SVE-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[INDVARS_IV]] +; SC_SVE-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2 +; SC_SVE-NEXT: [[CONV6:%.*]] = sext i16 [[TMP41]] to i32 +; SC_SVE-NEXT: [[ADD7:%.*]] = add nsw i32 [[MUL]], [[CONV6]] +; SC_SVE-NEXT: [[SHL8:%.*]] = shl i32 [[ADD7]], [[SHIFT]] +; SC_SVE-NEXT: [[ADD9]] = add nsw i32 [[SHL8]], [[RET_018]] +; SC_SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; SC_SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] +; SC_SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; +; NO_SC_SVE-LABEL: @foo( +; NO_SC_SVE-NEXT: entry: +; NO_SC_SVE-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[N:%.*]], 0 +; NO_SC_SVE-NEXT: br i1 [[CMP17]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; NO_SC_SVE: for.body.preheader: +; NO_SC_SVE-NEXT: [[TMP0:%.*]] = sext i32 [[LAG:%.*]] to i64 +; NO_SC_SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 +; NO_SC_SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16 +; NO_SC_SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO_SC_SVE: vector.ph: +; NO_SC_SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16 +; NO_SC_SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] +; NO_SC_SVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[SHIFT:%.*]], i64 0 +; NO_SC_SVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer +; NO_SC_SVE-NEXT: [[BROADCAST_SPLATINSERT8:%.*]] = insertelement <8 x i32> poison, i32 [[SHIFT]], i64 0 +; NO_SC_SVE-NEXT: [[BROADCAST_SPLAT9:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT8]], <8 x i32> poison, <8 x i32> zeroinitializer +; NO_SC_SVE-NEXT: br label [[VECTOR_BODY:%.*]] +; NO_SC_SVE: vector.body: +; NO_SC_SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO_SC_SVE-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ] +; NO_SC_SVE-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP34:%.*]], [[VECTOR_BODY]] ] +; NO_SC_SVE-NEXT: [[VEC_IND:%.*]] = phi <8 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO_SC_SVE-NEXT: [[STEP_ADD:%.*]] = add <8 x i32> [[VEC_IND]], +; NO_SC_SVE-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 +; NO_SC_SVE-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; NO_SC_SVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[TMP1]] +; NO_SC_SVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[TMP2]] +; NO_SC_SVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 0 +; NO_SC_SVE-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP5]], align 2 +; NO_SC_SVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 8 +; NO_SC_SVE-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[TMP6]], align 2 +; NO_SC_SVE-NEXT: [[TMP7:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP8:%.*]] = sext <8 x i16> [[WIDE_LOAD3]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP9:%.*]] = ashr <8 x i32> [[TMP7]], [[VEC_IND]] +; NO_SC_SVE-NEXT: [[TMP10:%.*]] = ashr <8 x i32> [[TMP8]], [[STEP_ADD]] +; NO_SC_SVE-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP1]], [[TMP0]] +; NO_SC_SVE-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP2]], [[TMP0]] +; NO_SC_SVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP11]] +; NO_SC_SVE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP12]] +; NO_SC_SVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds i16, ptr [[TMP13]], i32 0 +; NO_SC_SVE-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP15]], align 2 +; NO_SC_SVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i16, ptr [[TMP13]], i32 8 +; NO_SC_SVE-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[TMP16]], align 2 +; NO_SC_SVE-NEXT: [[TMP17:%.*]] = sext <8 x i16> [[WIDE_LOAD4]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP18:%.*]] = sext <8 x i16> [[WIDE_LOAD5]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP19:%.*]] = shl <8 x i32> [[TMP17]], [[VEC_IND]] +; NO_SC_SVE-NEXT: [[TMP20:%.*]] = shl <8 x i32> [[TMP18]], [[STEP_ADD]] +; NO_SC_SVE-NEXT: [[TMP21:%.*]] = mul nsw <8 x i32> [[TMP19]], [[TMP9]] +; NO_SC_SVE-NEXT: [[TMP22:%.*]] = mul nsw <8 x i32> [[TMP20]], [[TMP10]] +; NO_SC_SVE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[TMP1]] +; NO_SC_SVE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[TMP2]] +; NO_SC_SVE-NEXT: [[TMP25:%.*]] = getelementptr inbounds i16, ptr [[TMP23]], i32 0 +; NO_SC_SVE-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP25]], align 2 +; NO_SC_SVE-NEXT: [[TMP26:%.*]] = getelementptr inbounds i16, ptr [[TMP23]], i32 8 +; NO_SC_SVE-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP26]], align 2 +; NO_SC_SVE-NEXT: [[TMP27:%.*]] = sext <8 x i16> [[WIDE_LOAD6]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP28:%.*]] = sext <8 x i16> [[WIDE_LOAD7]] to <8 x i32> +; NO_SC_SVE-NEXT: [[TMP29:%.*]] = add nsw <8 x i32> [[TMP21]], [[TMP27]] +; NO_SC_SVE-NEXT: [[TMP30:%.*]] = add nsw <8 x i32> [[TMP22]], [[TMP28]] +; NO_SC_SVE-NEXT: [[TMP31:%.*]] = shl <8 x i32> [[TMP29]], [[BROADCAST_SPLAT]] +; NO_SC_SVE-NEXT: [[TMP32:%.*]] = shl <8 x i32> [[TMP30]], [[BROADCAST_SPLAT9]] +; NO_SC_SVE-NEXT: [[TMP33]] = add <8 x i32> [[TMP31]], [[VEC_PHI]] +; NO_SC_SVE-NEXT: [[TMP34]] = add <8 x i32> [[TMP32]], [[VEC_PHI1]] +; NO_SC_SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; NO_SC_SVE-NEXT: [[VEC_IND_NEXT]] = add <8 x i32> [[STEP_ADD]], +; NO_SC_SVE-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO_SC_SVE-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO_SC_SVE: middle.block: +; NO_SC_SVE-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP34]], [[TMP33]] +; NO_SC_SVE-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX]]) +; NO_SC_SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] +; NO_SC_SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; NO_SC_SVE: scalar.ph: +; NO_SC_SVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; NO_SC_SVE-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[TMP36]], [[MIDDLE_BLOCK]] ] +; NO_SC_SVE-NEXT: br label [[FOR_BODY:%.*]] +; NO_SC_SVE: for.cond.cleanup.loopexit: +; NO_SC_SVE-NEXT: [[ADD9_LCSSA:%.*]] = phi i32 [ [[ADD9:%.*]], [[FOR_BODY]] ], [ [[TMP36]], [[MIDDLE_BLOCK]] ] +; NO_SC_SVE-NEXT: br label [[FOR_COND_CLEANUP]] +; NO_SC_SVE: for.cond.cleanup: +; NO_SC_SVE-NEXT: [[RET_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD9_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; NO_SC_SVE-NEXT: ret i32 [[RET_0_LCSSA]] +; NO_SC_SVE: for.body: +; NO_SC_SVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO_SC_SVE-NEXT: [[RET_018:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD9]], [[FOR_BODY]] ] +; NO_SC_SVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 [[INDVARS_IV]] +; NO_SC_SVE-NEXT: [[TMP37:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 +; NO_SC_SVE-NEXT: [[CONV:%.*]] = sext i16 [[TMP37]] to i32 +; NO_SC_SVE-NEXT: [[TMP38:%.*]] = trunc i64 [[INDVARS_IV]] to i32 +; NO_SC_SVE-NEXT: [[SHR:%.*]] = ashr i32 [[CONV]], [[TMP38]] +; NO_SC_SVE-NEXT: [[TMP39:%.*]] = add nsw i64 [[INDVARS_IV]], [[TMP0]] +; NO_SC_SVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 [[TMP39]] +; NO_SC_SVE-NEXT: [[TMP40:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +; NO_SC_SVE-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +; NO_SC_SVE-NEXT: [[SHL:%.*]] = shl i32 [[CONV3]], [[TMP38]] +; NO_SC_SVE-NEXT: [[MUL:%.*]] = mul nsw i32 [[SHL]], [[SHR]] +; NO_SC_SVE-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 [[INDVARS_IV]] +; NO_SC_SVE-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2 +; NO_SC_SVE-NEXT: [[CONV6:%.*]] = sext i16 [[TMP41]] to i32 +; NO_SC_SVE-NEXT: [[ADD7:%.*]] = add nsw i32 [[MUL]], [[CONV6]] +; NO_SC_SVE-NEXT: [[SHL8:%.*]] = shl i32 [[ADD7]], [[SHIFT]] +; NO_SC_SVE-NEXT: [[ADD9]] = add nsw i32 [[SHL8]], [[RET_018]] +; NO_SC_SVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; NO_SC_SVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] +; NO_SC_SVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; +entry: + %cmp17 = icmp sgt i32 %n, 0 + br i1 %cmp17, label %for.body.preheader, label %for.cond.cleanup + +for.body.preheader: + %0 = sext i32 %lag to i64 + %wide.trip.count = zext i32 %n to i64 + br label %for.body + +for.cond.cleanup: + %ret.0.lcssa = phi i32 [ 0, %entry ], [ %add9, %for.body ] + ret i32 %ret.0.lcssa + +for.body: + %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ] + %ret.018 = phi i32 [ 0, %for.body.preheader ], [ %add9, %for.body ] + %arrayidx = getelementptr inbounds [32 x i16], ptr @a, i64 0, i64 %indvars.iv + %1 = load i16, ptr %arrayidx, align 2 + %conv = sext i16 %1 to i32 + %2 = trunc i64 %indvars.iv to i32 + %shr = ashr i32 %conv, %2 + %3 = add nsw i64 %indvars.iv, %0 + %arrayidx2 = getelementptr inbounds [32 x i16], ptr @b, i64 0, i64 %3 + %4 = load i16, ptr %arrayidx2, align 2 + %conv3 = sext i16 %4 to i32 + %shl = shl i32 %conv3, %2 + %mul = mul nsw i32 %shl, %shr + %arrayidx5 = getelementptr inbounds [32 x i16], ptr @c, i64 0, i64 %indvars.iv + %5 = load i16, ptr %arrayidx5, align 2 + %conv6 = sext i16 %5 to i32 + %add7 = add nsw i32 %mul, %conv6 + %shl8 = shl i32 %add7, %shift + %add9 = add nsw i32 %shl8, %ret.018 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body +}