Index: lib/Analysis/InlineCost.cpp =================================================================== --- lib/Analysis/InlineCost.cpp +++ lib/Analysis/InlineCost.cpp @@ -134,6 +134,7 @@ void accumulateSROACost(DenseMap::iterator CostIt, int InstructionCost); bool isGEPOffsetConstant(GetElementPtrInst &GEP); + bool isGEPFree(GetElementPtrInst &GEP); bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); bool simplifyCallSite(Function *F, CallSite CS); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); @@ -331,6 +332,21 @@ return true; } +/// \brief Use TTI to check whether a GEP is free. +/// +/// Respects any simplified values known during the analysis of this callsite. +bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { + SmallVector Indices; + for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) + if (Constant *SimpleOp = SimplifiedValues.lookup(*I)) + Indices.push_back(SimpleOp); + else + Indices.push_back(*I); + return TargetTransformInfo::TCC_Free == + TTI.getGEPCost(GEP.getSourceElementType(), GEP.getPointerOperand(), + Indices); +} + bool CallAnalyzer::visitAlloca(AllocaInst &I) { // Check whether inlining will turn a dynamic alloca into a static // alloca and handle that case. @@ -396,7 +412,7 @@ // Non-constant GEPs aren't folded, and disable SROA. if (SROACandidate) disableSROA(CostIt); - return false; + return isGEPFree(I); } // Add the result as a new mapping to Base + Offset. @@ -422,7 +438,7 @@ // Variable GEPs will require math and will disable SROA. if (SROACandidate) disableSROA(CostIt); - return false; + return isGEPFree(I); } bool CallAnalyzer::visitBitCast(BitCastInst &I) { Index: test/Transforms/Inline/gep-cost.ll =================================================================== --- /dev/null +++ test/Transforms/Inline/gep-cost.ll @@ -0,0 +1,25 @@ +; RUN: opt -inline < %s -S -debug-only=inline-cost 2>&1 | FileCheck %s + +target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnu" + +define void @outer1([4 x i32]* %ptr1, [4 x [4 x i32]]* %ptr2, i32 %i) { + call void @inner1([4 x i32]* %ptr1, i32 %i) + call void @inner2([4 x [4 x i32]]* %ptr2, i32 %i) + ret void +} +; CHECK: Analyzing call of inner1 +; CHECK: NumInstructionsSimplified: 2 +; CHECK: NumInstructions: 2 +define void @inner1([4 x i32]* %ptr, i32 %i) { + %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 %i + ret void +} + +; CHECK: Analyzing call of inner2 +; CHECK: NumInstructionsSimplified: 1 +; CHECK: NumInstructions: 2 +define void @inner2([4 x [4 x i32]]* %ptr, i32 %i) { + %G = getelementptr inbounds [4 x [4 x i32]], [4 x [4 x i32]]* %ptr, i32 1, i32 %i + ret void +}