diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -387,6 +387,7 @@ bool simplifyCallSite(Function *F, CallBase &Call); template bool simplifyInstruction(Instruction &I, Callable Evaluate); + bool simplifyIntrinsicCallIsConstant(CallBase &CB); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); /// Return true if the given argument to the function being considered for @@ -1531,6 +1532,27 @@ return true; } +/// Try to simplify a call to llvm.is.constant. +/// +/// Duplicate the argument checking from CallAnalyzer::simplifyCallSite since +/// we expect calls of this specific intrinsic to be infrequent. +/// +/// FIXME: Given that we know CB's parent (F) caller +/// (CandidateCall->getParent()->getParent()), we might be able to determine +/// whether inlining F into F's caller would change how the call to +/// llvm.is.constant would evaluate. +bool CallAnalyzer::simplifyIntrinsicCallIsConstant(CallBase &CB) { + Value *Arg = CB.getArgOperand(0); + auto *C = dyn_cast(Arg); + + if (!C) + C = dyn_cast_or_null(SimplifiedValues.lookup(Arg)); + + Type *RT = CB.getFunctionType()->getReturnType(); + SimplifiedValues[&CB] = ConstantInt::get(RT, C ? 1 : 0); + return true; +} + bool CallAnalyzer::visitBitCast(BitCastInst &I) { // Propagate constants through bitcasts. if (simplifyInstruction(I, [&](SmallVectorImpl &COps) { @@ -2154,6 +2176,8 @@ if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0))) SROAArgValues[II] = SROAArg; return true; + case Intrinsic::is_constant: + return simplifyIntrinsicCallIsConstant(Call); } } diff --git a/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll @@ -0,0 +1,39 @@ +; RUN: opt %s -passes=inline -inline-threshold=20 -S | FileCheck %s + +declare i1 @llvm.is.constant.i64(i64) +declare void @foo() + +define void @callee(i64 %val) { + %cond = call i1 @llvm.is.constant.i64(i64 %val) + br i1 %cond, label %cond.true, label %cond.false + +cond.true: +; Rack up costs with a couple of function calls so that this function +; gets inlined only when @llvm.is.constant.i64 is folded. In reality, +; the "then" clause of __builtin_constant_p tends to have statements +; that fold very well, so the cost of the "then" clause is not a huge +; concern. + call void @foo() + call void @foo() + ret void + +cond.false: + ret void +} + +define void @caller(i64 %val) { +; CHECK-LABEL: @caller( +; CHECK-NEXT: [[COND_I:%.*]] = call i1 @llvm.is.constant.i64(i64 [[VAL:%.*]]) +; CHECK-NEXT: br i1 [[COND_I]], label %[[COND_TRUE_I:.*]], label %[[COND_FALSE_I:.*]] +; CHECK: [[COND_TRUE_I]]: +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label %[[CALLEE_EXIT:.*]] +; CHECK: [[COND_FALSE_I]]: +; CHECK-NEXT: br label %[[CALLEE_EXIT]] +; CHECK: [[CALLEE_EXIT]]: +; CHECK-NEXT: ret void +; + call void @callee(i64 %val) + ret void +}