diff --git a/llvm/test/Transforms/GlobalOpt/masked-load-global.ll b/llvm/test/Transforms/GlobalOpt/masked-load-global.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/GlobalOpt/masked-load-global.ll @@ -0,0 +1,158 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -globalopt -S < %s -o - | FileCheck %s + +@lPtr = internal unnamed_addr constant [16 x i32] [i32 7, i32 15, i32 10, i32 10, i32 6, i32 10, i32 8, i32 11, i32 11, i32 14, i32 12, i32 2, i32 9, i32 14, i32 2, i32 11], align 4 +@uPtr = internal unnamed_addr constant [16 x i32] [i32 0, i32 7, i32 10, i32 9, i32 7, i32 4, i32 11, i32 10, i32 9, i32 11, i32 4, i32 6, i32 15, i32 2, i32 13, i32 7], align 4 +@upperPtr = internal unnamed_addr constant [16 x double] [double 5.000000e-01, double 1.250000e-01, double 5.000000e-01, double 1.250000e-01, double 5.000000e-01, double 9.091000e-02, double 2.500000e-01, double 7.692000e-02, double 1.111100e-01, double 6.250000e-02, double 0x3FB11148FD9FD36F, double 5.000000e-01, double 0x3FB2493C89F40A28, double 1.000000e+00, double 7.692000e-02, double 0x3FB11148FD9FD36F], align 8 +@lowerPtr = internal unnamed_addr constant [16 x double] [double 1.111100e-01, double 1.250000e-01, double 1.250000e-01, double 6.250000e-02, double 8.333000e-02, double 3.333300e-01, double 1.000000e-01, double 2.000000e-01, double 0x3FB2493C89F40A28, double 0x3FB2493C89F40A28, double 9.091000e-02, double 7.692000e-02, double 6.250000e-02, double 1.250000e-01, double 2.500000e-01, double 5.000000e-01], align 8 +@ApsiPtr = internal global [16 x double] [double 0x3FB11148FD9FD36F, double 1.666700e-01, double 1.111100e-01, double 6.250000e-02, double 6.250000e-02, double 8.333000e-02, double 8.333000e-02, double 1.250000e-01, double 9.091000e-02, double 9.091000e-02, double 1.111100e-01, double 1.111100e-01, double 9.091000e-02, double 1.666700e-01, double 1.428600e-01, double 3.333300e-01], align 8 +@psiPtr = internal global [16 x double] zeroinitializer, align 8 +@diagPtr = internal global [16 x double] [double 7.692000e-02, double 0x3FB2493C89F40A28, double 5.000000e-01, double 1.250000e-01, double 6.250000e-02, double 8.333000e-02, double 8.333000e-02, double 7.692000e-02, double 1.111100e-01, double 1.666700e-01, double 1.111100e-01, double 1.111100e-01, double 0x3FB2493C89F40A28, double 7.692000e-02, double 1.000000e-01, double 8.333000e-02], align 8 + +; Function Attrs: nofree norecurse nosync nounwind uwtable vscale_range(1,16) +define dso_local i32 @main(i32 %argc, i8** nocapture readnone %argv) local_unnamed_addr #0 { +; CHECK-LABEL: @main( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[PREDICATE_ENTRY:%.*]] = icmp ult [[TMP0]], shufflevector ( insertelement ( poison, i64 16, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 +; CHECK-NEXT: br label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[CMP:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ false, [[AMUL_EXIT:%.*]] ] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[PREDICATE:%.*]] = phi [ [[PREDICATE_ENTRY]], [[VECTOR_PH]] ], [ [[PREDICATE_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @diagPtr, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP3]] to * +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = tail call @llvm.masked.load.nxv2f64.p0nxv2f64(* nonnull [[TMP4]], i32 8, [[PREDICATE]], undef) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[TMP5]] to * +; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = tail call @llvm.masked.load.nxv2f64.p0nxv2f64(* nonnull [[TMP6]], i32 8, [[PREDICATE]], undef) +; CHECK-NEXT: [[TMP7:%.*]] = fmul contract [[WIDE_MASKED_LOAD]], [[WIDE_MASKED_LOAD9]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP9:%.*]] = bitcast double* [[TMP8]] to * +; CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0nxv2f64( [[TMP7]], * [[TMP9]], i32 8, [[PREDICATE]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; CHECK-NEXT: [[DOTSPLATINSERT7:%.*]] = insertelement poison, i64 [[INDEX_NEXT]], i64 0 +; CHECK-NEXT: [[DOTSPLAT8:%.*]] = shufflevector [[DOTSPLATINSERT7]], poison, zeroinitializer +; CHECK-NEXT: [[TMP10:%.*]] = add nuw [[DOTSPLAT8]], [[TMP0]] +; CHECK-NEXT: [[PREDICATE_NEXT]] = icmp ult [[TMP10]], shufflevector ( insertelement ( poison, i64 16, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP11:%.*]] = extractelement [[PREDICATE_NEXT]], i64 0 +; CHECK-NEXT: br i1 [[TMP11]], label [[VECTOR_BODY]], label [[FOR_BODY8_I:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret i32 0 +; CHECK: for.body8.i: +; CHECK-NEXT: [[INDVARS_IV_I:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_I:%.*]], [[FOR_BODY8_I]] ], [ 0, [[VECTOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX10_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @lowerPtr, i64 0, i64 [[INDVARS_IV_I]] +; CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[ARRAYIDX10_I]], align 8 +; CHECK-NEXT: [[ARRAYIDX12_I:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* @lPtr, i64 0, i64 [[INDVARS_IV_I]] +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX12_I]], align 4 +; CHECK-NEXT: [[IDXPROM13_I:%.*]] = sext i32 [[TMP13]] to i64 +; CHECK-NEXT: [[ARRAYIDX14_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 [[IDXPROM13_I]] +; CHECK-NEXT: [[TMP14:%.*]] = load double, double* [[ARRAYIDX14_I]], align 8 +; CHECK-NEXT: [[MUL15_I:%.*]] = fmul contract double [[TMP12]], [[TMP14]] +; CHECK-NEXT: [[ARRAYIDX17_I:%.*]] = getelementptr inbounds [16 x i32], [16 x i32]* @uPtr, i64 0, i64 [[INDVARS_IV_I]] +; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX17_I]], align 4 +; CHECK-NEXT: [[IDXPROM18_I:%.*]] = sext i32 [[TMP15]] to i64 +; CHECK-NEXT: [[ARRAYIDX19_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 [[IDXPROM18_I]] +; CHECK-NEXT: [[TMP16:%.*]] = load double, double* [[ARRAYIDX19_I]], align 8 +; CHECK-NEXT: [[ADD_I:%.*]] = fadd contract double [[MUL15_I]], [[TMP16]] +; CHECK-NEXT: store double [[ADD_I]], double* [[ARRAYIDX19_I]], align 8 +; CHECK-NEXT: [[ARRAYIDX21_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @upperPtr, i64 0, i64 [[INDVARS_IV_I]] +; CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[ARRAYIDX21_I]], align 8 +; CHECK-NEXT: [[ARRAYIDX25_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 [[IDXPROM18_I]] +; CHECK-NEXT: [[TMP18:%.*]] = load double, double* [[ARRAYIDX25_I]], align 8 +; CHECK-NEXT: [[MUL26_I:%.*]] = fmul contract double [[TMP17]], [[TMP18]] +; CHECK-NEXT: [[ARRAYIDX30_I:%.*]] = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 [[IDXPROM13_I]] +; CHECK-NEXT: [[TMP19:%.*]] = load double, double* [[ARRAYIDX30_I]], align 8 +; CHECK-NEXT: [[ADD31_I:%.*]] = fadd contract double [[TMP19]], [[MUL26_I]] +; CHECK-NEXT: store double [[ADD31_I]], double* [[ARRAYIDX30_I]], align 8 +; CHECK-NEXT: [[INDVARS_IV_NEXT_I]] = add nuw nsw i64 [[INDVARS_IV_I]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_I:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_I]], 16 +; CHECK-NEXT: br i1 [[EXITCOND_NOT_I]], label [[AMUL_EXIT]], label [[FOR_BODY8_I]] +; CHECK: AMUL.exit: +; CHECK-NEXT: br i1 [[CMP]], label [[VECTOR_PH]], label [[FOR_COND_CLEANUP:%.*]] +; +entry: + %0 = tail call @llvm.experimental.stepvector.nxv2i64() + %predicate.entry = icmp ult %0, shufflevector ( insertelement ( poison, i64 16, i32 0), poison, zeroinitializer) + %1 = tail call i64 @llvm.vscale.i64() + %2 = shl nuw nsw i64 %1, 1 + br label %vector.ph + +vector.ph: ; preds = %AMUL.exit, %entry + %cmp = phi i1 [ true, %entry ], [ false, %AMUL.exit ] + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %predicate = phi [ %predicate.entry, %vector.ph ], [ %predicate.next, %vector.body ] + %3 = getelementptr inbounds [16 x double], [16 x double]* @diagPtr, i64 0, i64 %index + %4 = bitcast double* %3 to * + %wide.masked.load = tail call @llvm.masked.load.nxv2f64.p0nxv2f64(* nonnull %4, i32 8, %predicate, undef) + %5 = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 %index + %6 = bitcast double* %5 to * + %wide.masked.load9 = tail call @llvm.masked.load.nxv2f64.p0nxv2f64(* nonnull %6, i32 8, %predicate, undef) + %7 = fmul contract %wide.masked.load, %wide.masked.load9 + %8 = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 %index + %9 = bitcast double* %8 to * + tail call void @llvm.masked.store.nxv2f64.p0nxv2f64( %7, * %9, i32 8, %predicate) + %index.next = add nuw i64 %index, %2 + %.splatinsert7 = insertelement poison, i64 %index.next, i64 0 + %.splat8 = shufflevector %.splatinsert7, poison, zeroinitializer + %10 = add nuw %.splat8, %0 + %predicate.next = icmp ult %10, shufflevector ( insertelement ( poison, i64 16, i32 0), poison, zeroinitializer) + %11 = extractelement %predicate.next, i64 0 + br i1 %11, label %vector.body, label %for.body8.i + +for.cond.cleanup: ; preds = %AMUL.exit + ret i32 0 + +for.body8.i: ; preds = %for.body8.i, %vector.body + %indvars.iv.i = phi i64 [ %indvars.iv.next.i, %for.body8.i ], [ 0, %vector.body ] + %arrayidx10.i = getelementptr inbounds [16 x double], [16 x double]* @lowerPtr, i64 0, i64 %indvars.iv.i + %12 = load double, double* %arrayidx10.i, align 8 + %arrayidx12.i = getelementptr inbounds [16 x i32], [16 x i32]* @lPtr, i64 0, i64 %indvars.iv.i + %13 = load i32, i32* %arrayidx12.i, align 4 + %idxprom13.i = sext i32 %13 to i64 + %arrayidx14.i = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 %idxprom13.i + %14 = load double, double* %arrayidx14.i, align 8 + %mul15.i = fmul contract double %12, %14 + %arrayidx17.i = getelementptr inbounds [16 x i32], [16 x i32]* @uPtr, i64 0, i64 %indvars.iv.i + %15 = load i32, i32* %arrayidx17.i, align 4 + %idxprom18.i = sext i32 %15 to i64 + %arrayidx19.i = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 %idxprom18.i + %16 = load double, double* %arrayidx19.i, align 8 + %add.i = fadd contract double %mul15.i, %16 + store double %add.i, double* %arrayidx19.i, align 8 + %arrayidx21.i = getelementptr inbounds [16 x double], [16 x double]* @upperPtr, i64 0, i64 %indvars.iv.i + %17 = load double, double* %arrayidx21.i, align 8 + %arrayidx25.i = getelementptr inbounds [16 x double], [16 x double]* @psiPtr, i64 0, i64 %idxprom18.i + %18 = load double, double* %arrayidx25.i, align 8 + %mul26.i = fmul contract double %17, %18 + %arrayidx30.i = getelementptr inbounds [16 x double], [16 x double]* @ApsiPtr, i64 0, i64 %idxprom13.i + %19 = load double, double* %arrayidx30.i, align 8 + %add31.i = fadd contract double %19, %mul26.i + store double %add31.i, double* %arrayidx30.i, align 8 + %indvars.iv.next.i = add nuw nsw i64 %indvars.iv.i, 1 + %exitcond.not.i = icmp eq i64 %indvars.iv.next.i, 16 + br i1 %exitcond.not.i, label %AMUL.exit, label %for.body8.i + +AMUL.exit: ; preds = %for.body8.i + br i1 %cmp, label %vector.ph, label %for.cond.cleanup +} + +; Function Attrs: nofree nosync nounwind readnone willreturn +declare @llvm.experimental.stepvector.nxv2i64() + +; Function Attrs: argmemonly nofree nosync nounwind readonly willreturn +declare @llvm.masked.load.nxv2f64.p0nxv2f64(*, i32 immarg, , ) + +; Function Attrs: argmemonly nofree nosync nounwind willreturn writeonly +declare void @llvm.masked.store.nxv2f64.p0nxv2f64(, *, i32 immarg, ) + +; Function Attrs: nofree nosync nounwind readnone willreturn +declare i64 @llvm.vscale.i64() +