diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp --- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -1003,6 +1003,15 @@ // one. If LowerGEP is enabled, a structure index is accumulated in the // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later // handle the constant offset and won't need a new structure index. + + // If GEP only has one index and it is used by non-gep instructions, + // it is not benificial to do the split. + if (GEP->getNumOperands() == 2) { + Value *Idx = GEP->getOperand(1); + if (!llvm::all_of(Idx->users(), + [](const User *U) { return isa(U); })) + return false; + } gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { if (GTI.isSequential()) { diff --git a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll --- a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll @@ -48,3 +48,30 @@ br label %do.body.i.backedge } + +define i64 @test_loop2(i64* %arg, i64* %save) { +bb: +; CHECK-LABEL: bb: +; CHECK %tmp1 = load i64, i64* null, align 8 +; CHECK %tmp22 = add nsw i64 %tmp1, -1 +; CHECK-NOT %uglygep = getelementptr i8, i8* %0, i64 %1 +; CHECK-NOT %uglygep2 = getelementptr i8, i8* %uglygep, i64 -8 + + %tmp1 = load i64, i64* null, align 8 + %tmp22 = add nsw i64 %tmp1, -1 + %tmp23 = getelementptr inbounds i64, i64* %arg, i64 %tmp22 + %tmp24 = load i64, i64* %tmp23, align 2 + br label %bb25 + +bb25: ; preds = %bb25, %bb18 + %tmp26 = phi i64 [ 1, %bb ], [ 0, %bb25 ] + %tmp29 = icmp eq i64 0, %tmp24 + %tmp30 = select i1 %tmp29, i64 0, i64 %tmp26 + store i64 %tmp30, i64* %save + %tmp31 = icmp eq i64 0, %tmp22 + br i1 %tmp31, label %bb32, label %bb25 + +bb32: ; preds = %bb25 + ret i64 0 + +} \ No newline at end of file