Index: llvm/trunk/lib/Analysis/InlineCost.cpp =================================================================== --- llvm/trunk/lib/Analysis/InlineCost.cpp +++ llvm/trunk/lib/Analysis/InlineCost.cpp @@ -134,6 +134,7 @@ void accumulateSROACost(DenseMap::iterator CostIt, int InstructionCost); bool isGEPOffsetConstant(GetElementPtrInst &GEP); + bool isGEPFree(GetElementPtrInst &GEP); bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); bool simplifyCallSite(Function *F, CallSite CS); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); @@ -331,6 +332,21 @@ return true; } +/// \brief Use TTI to check whether a GEP is free. +/// +/// Respects any simplified values known during the analysis of this callsite. +bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { + SmallVector Indices; + for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) + if (Constant *SimpleOp = SimplifiedValues.lookup(*I)) + Indices.push_back(SimpleOp); + else + Indices.push_back(*I); + return TargetTransformInfo::TCC_Free == + TTI.getGEPCost(GEP.getSourceElementType(), GEP.getPointerOperand(), + Indices); +} + bool CallAnalyzer::visitAlloca(AllocaInst &I) { // Check whether inlining will turn a dynamic alloca into a static // alloca and handle that case. @@ -396,7 +412,7 @@ // Non-constant GEPs aren't folded, and disable SROA. if (SROACandidate) disableSROA(CostIt); - return false; + return isGEPFree(I); } // Add the result as a new mapping to Base + Offset. @@ -422,7 +438,7 @@ // Variable GEPs will require math and will disable SROA. if (SROACandidate) disableSROA(CostIt); - return false; + return isGEPFree(I); } bool CallAnalyzer::visitBitCast(BitCastInst &I) { Index: llvm/trunk/test/Transforms/Inline/gep-cost.ll =================================================================== --- llvm/trunk/test/Transforms/Inline/gep-cost.ll +++ llvm/trunk/test/Transforms/Inline/gep-cost.ll @@ -0,0 +1,29 @@ +; RUN: opt -inline < %s -S -debug-only=inline-cost 2>&1 | FileCheck %s + +target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnu" + +define void @outer([4 x i32]* %ptr, i32 %i) { + call void @inner1([4 x i32]* %ptr, i32 %i) + call void @inner2([4 x i32]* %ptr, i32 %i) + ret void +} +; The gep in inner1() is reg+reg, which is a legal addressing mode for AArch64. +; Thus, both the gep and ret can be simplified. +; CHECK: Analyzing call of inner1 +; CHECK: NumInstructionsSimplified: 2 +; CHECK: NumInstructions: 2 +define void @inner1([4 x i32]* %ptr, i32 %i) { + %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 0, i32 %i + ret void +} + +; The gep in inner2() is reg+imm+reg, which is not a legal addressing mode for +; AArch64. Thus, only the ret can be simplified and not the gep. +; CHECK: Analyzing call of inner2 +; CHECK: NumInstructionsSimplified: 1 +; CHECK: NumInstructions: 2 +define void @inner2([4 x i32]* %ptr, i32 %i) { + %G = getelementptr inbounds [4 x i32], [4 x i32]* %ptr, i32 1, i32 %i + ret void +}