Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -4993,7 +4993,7 @@ // Unless the addressing mode will not be folded. if (!Ops.empty() && LU.Kind == LSRUse::Address && isAMCompletelyFolded(TTI, LU, F)) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), nullptr); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } Index: test/Transforms/LoopStrengthReduce/X86/nested-loop.ll =================================================================== --- test/Transforms/LoopStrengthReduce/X86/nested-loop.ll +++ test/Transforms/LoopStrengthReduce/X86/nested-loop.ll @@ -29,12 +29,11 @@ ; CHECK-NEXT: [[LSRAR:%[^,]+]] = phi i8* [ %scevgep, %for.body2 ], [ %maxarray, %for.body2.preheader ] ; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ %lsr.iv.next, %for.body2 ], [ %0, %for.body2.preheader ] ; CHECK-NOT: = phi i64 [ {{.*}}, %for.body2 ], [ {{.*}}, %for.body2.preheader ] -; CHECK: [[LSRINT:%[^,]+]] = ptrtoint i8* [[LSRAR]] to i64 ; CHECK: [[SCEVGEP1:%[^,]+]] = getelementptr i8, i8* [[LSRAR]], i64 1 ; CHECK: {{.*}} = load i8, i8* [[SCEVGEP1]], align 1 -; CHECK: [[SCEVGEP2:%[^,]+]] = getelementptr i8, i8* %1, i64 [[LSRINT]] +; CHECK: [[SCEVGEP2:%[^,]+]] = getelementptr i8, i8* [[LSRAR]], i64 %0 ; CHECK: {{.*}} = load i8, i8* [[SCEVGEP2]], align 1 -; CHECK: [[SCEVGEP3:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSRINT]] +; CHECK: [[SCEVGEP3:%[^,]+]] = getelementptr i8, i8* [[LSRAR]], i64 {{.*}} ; CHECK: store i8 {{.*}}, i8* [[SCEVGEP3]], align 1 ; CHECK: [[LSRNEXT:%[^,]+]] = add i64 [[LSR]], -1 ; CHECK: %exitcond = icmp ne i64 [[LSRNEXT]], 0