diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -6796,8 +6796,14 @@ I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) return true; - if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) - return true; + auto *I = dyn_cast(V); + // programUndefinedIfUndefOrPoison currently only scan V's parent basicblock + // if V is instruction or entry block if V is argument. If we have CtxI and V + // is instruction, we should make sure that V's parent basic block is the same + // as CtxI's parent basic block. + if (!(CtxI && I && CtxI->getParent() != I->getParent())) + if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) + return true; // CxtI may be null or a cloned instruction. if (!CtxI || !CtxI->getParent() || !DT) diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll --- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll @@ -12,19 +12,19 @@ ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV]] ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 225 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]]) -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[MATRIXEXT:%.*]] = load double, ptr [[TMP4]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[MATRIXEXT:%.*]] = load double, ptr [[TMP3]], align 8 ; CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[I:%.*]] to i64 -; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV2]] -; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i64 [[TMP5]], 225 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]]) -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds <225 x double>, ptr [[B:%.*]], i64 0, i64 [[TMP5]] -; CHECK-NEXT: [[MATRIXEXT4:%.*]] = load double, ptr [[TMP8]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV2]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i64 [[TMP4]], 225 +; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds <225 x double>, ptr [[B:%.*]], i64 0, i64 [[TMP4]] +; CHECK-NEXT: [[MATRIXEXT4:%.*]] = load double, ptr [[TMP6]], align 8 ; CHECK-NEXT: [[MUL:%.*]] = fmul double [[MATRIXEXT]], [[MATRIXEXT4]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[MATRIXEXT7:%.*]] = load double, ptr [[TMP9]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[MATRIXEXT7:%.*]] = load double, ptr [[TMP7]], align 8 ; CHECK-NEXT: [[SUB:%.*]] = fsub double [[MATRIXEXT7]], [[MUL]] -; CHECK-NEXT: store double [[SUB]], ptr [[TMP9]], align 8 +; CHECK-NEXT: store double [[SUB]], ptr [[TMP7]], align 8 ; CHECK-NEXT: ret void ; entry: @@ -83,99 +83,39 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferenceable(1800) %A, ptr nonnull align 8 dereferenceable(1800) %B) { ; CHECK-LABEL: @matrix_extract_insert_loop( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[CMP212_NOT:%.*]] = icmp eq i32 [[I:%.*]], 0 +; CHECK-NEXT: [[CMP210_NOT:%.*]] = icmp eq i32 [[I:%.*]], 0 ; CHECK-NEXT: [[CONV6:%.*]] = zext i32 [[I]] to i64 -; CHECK-NEXT: br i1 [[CMP212_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US:%.*]] +; CHECK-NEXT: br i1 [[CMP210_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US:%.*]] ; CHECK: for.cond1.preheader.us: -; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[I]], 225 +; CHECK-NEXT: [[J_012_US:%.*]] = phi i32 [ [[INC13_US:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[CONV5_US:%.*]] = zext i32 [[J_012_US]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[CONV5_US]], 15 +; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV6]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 225 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]]) -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[B:%.*]], i64 0, i64 [[CONV6]] ; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]] ; CHECK: for.body4.us: -; CHECK-NEXT: [[K_013_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ] -; CHECK-NEXT: [[CONV_US:%.*]] = zext i32 [[K_013_US]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i32 [[K_013_US]], 225 +; CHECK-NEXT: [[K_011_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ] +; CHECK-NEXT: [[CONV_US:%.*]] = zext i32 [[K_011_US]] to i64 +; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV_US]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 225 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP4]]) -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[CONV_US]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[TMP3]] ; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP5]], align 8 -; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP3]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = load <225 x double>, ptr [[B:%.*]], align 8 +; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = extractelement <225 x double> [[TMP6]], i64 [[TMP1]] ; CHECK-NEXT: [[MUL_US:%.*]] = fmul double [[MATRIXEXT_US]], [[MATRIXEXT8_US]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[CONV_US]] -; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = extractelement <225 x double> [[TMP6]], i64 [[TMP3]] ; CHECK-NEXT: [[SUB_US:%.*]] = fsub double [[MATRIXEXT11_US]], [[MUL_US]] -; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP6]], align 8 -; CHECK-NEXT: [[INC_US]] = add nuw nsw i32 [[K_013_US]], 1 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP7]], align 8 +; CHECK-NEXT: [[INC_US]] = add nuw i32 [[K_011_US]], 1 ; CHECK-NEXT: [[CMP2_US:%.*]] = icmp ult i32 [[INC_US]], [[I]] -; CHECK-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]] +; CHECK-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us: -; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[CONV6]], 15 -; CHECK-NEXT: [[TMP8:%.*]] = icmp ult i32 [[I]], 210 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP8]]) -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP7]] -; CHECK-NEXT: br label [[FOR_BODY4_US_1:%.*]] -; CHECK: for.body4.us.1: -; CHECK-NEXT: [[K_013_US_1:%.*]] = phi i32 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ [[INC_US_1:%.*]], [[FOR_BODY4_US_1]] ] -; CHECK-NEXT: [[NARROW:%.*]] = add nuw nsw i32 [[K_013_US_1]], 15 -; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[NARROW]] to i64 -; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i32 [[K_013_US_1]], 210 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP11]]) -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP10]] -; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP12]], align 8 -; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP9]], align 8 -; CHECK-NEXT: [[MUL_US_1:%.*]] = fmul double [[MATRIXEXT_US_1]], [[MATRIXEXT8_US_1]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP10]] -; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP13]], align 8 -; CHECK-NEXT: [[SUB_US_1:%.*]] = fsub double [[MATRIXEXT11_US_1]], [[MUL_US_1]] -; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP13]], align 8 -; CHECK-NEXT: [[INC_US_1]] = add nuw nsw i32 [[K_013_US_1]], 1 -; CHECK-NEXT: [[CMP2_US_1:%.*]] = icmp ult i32 [[INC_US_1]], [[I]] -; CHECK-NEXT: br i1 [[CMP2_US_1]], label [[FOR_BODY4_US_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]] -; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.1: -; CHECK-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[CONV6]], 30 -; CHECK-NEXT: [[TMP15:%.*]] = icmp ult i32 [[I]], 195 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]]) -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP14]] -; CHECK-NEXT: br label [[FOR_BODY4_US_2:%.*]] -; CHECK: for.body4.us.2: -; CHECK-NEXT: [[K_013_US_2:%.*]] = phi i32 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]] ], [ [[INC_US_2:%.*]], [[FOR_BODY4_US_2]] ] -; CHECK-NEXT: [[NARROW17:%.*]] = add nuw nsw i32 [[K_013_US_2]], 30 -; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[NARROW17]] to i64 -; CHECK-NEXT: [[TMP18:%.*]] = icmp ult i32 [[K_013_US_2]], 195 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP18]]) -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP17]] -; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP19]], align 8 -; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP16]], align 8 -; CHECK-NEXT: [[MUL_US_2:%.*]] = fmul double [[MATRIXEXT_US_2]], [[MATRIXEXT8_US_2]] -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP17]] -; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP20]], align 8 -; CHECK-NEXT: [[SUB_US_2:%.*]] = fsub double [[MATRIXEXT11_US_2]], [[MUL_US_2]] -; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP20]], align 8 -; CHECK-NEXT: [[INC_US_2]] = add nuw nsw i32 [[K_013_US_2]], 1 -; CHECK-NEXT: [[CMP2_US_2:%.*]] = icmp ult i32 [[INC_US_2]], [[I]] -; CHECK-NEXT: br i1 [[CMP2_US_2]], label [[FOR_BODY4_US_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]] -; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.2: -; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw i64 [[CONV6]], 45 -; CHECK-NEXT: [[TMP22:%.*]] = icmp ult i32 [[I]], 180 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP22]]) -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP21]] -; CHECK-NEXT: br label [[FOR_BODY4_US_3:%.*]] -; CHECK: for.body4.us.3: -; CHECK-NEXT: [[K_013_US_3:%.*]] = phi i32 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]] ], [ [[INC_US_3:%.*]], [[FOR_BODY4_US_3]] ] -; CHECK-NEXT: [[NARROW18:%.*]] = add nuw nsw i32 [[K_013_US_3]], 45 -; CHECK-NEXT: [[TMP24:%.*]] = zext i32 [[NARROW18]] to i64 -; CHECK-NEXT: [[TMP25:%.*]] = icmp ult i32 [[K_013_US_3]], 180 -; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP25]]) -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP24]] -; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP26]], align 8 -; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP23]], align 8 -; CHECK-NEXT: [[MUL_US_3:%.*]] = fmul double [[MATRIXEXT_US_3]], [[MATRIXEXT8_US_3]] -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP24]] -; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP27]], align 8 -; CHECK-NEXT: [[SUB_US_3:%.*]] = fsub double [[MATRIXEXT11_US_3]], [[MUL_US_3]] -; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP27]], align 8 -; CHECK-NEXT: [[INC_US_3]] = add nuw nsw i32 [[K_013_US_3]], 1 -; CHECK-NEXT: [[CMP2_US_3:%.*]] = icmp ult i32 [[INC_US_3]], [[I]] -; CHECK-NEXT: br i1 [[CMP2_US_3]], label [[FOR_BODY4_US_3]], label [[FOR_COND_CLEANUP]] +; CHECK-NEXT: [[INC13_US]] = add nuw nsw i32 [[J_012_US]], 1 +; CHECK-NEXT: [[CMP_US:%.*]] = icmp ult i32 [[J_012_US]], 3 +; CHECK-NEXT: br i1 [[CMP_US]], label [[FOR_COND1_PREHEADER_US]], label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ;