Index: include/llvm-c/Transforms/IPO.h =================================================================== --- include/llvm-c/Transforms/IPO.h +++ include/llvm-c/Transforms/IPO.h @@ -34,6 +34,9 @@ /** See llvm::createConstantMergePass function. */ void LLVMAddConstantMergePass(LLVMPassManagerRef PM); +/** See llvm::createCalledValuePropagationPass function. */ +void LLVMAddCalledValuePropagationPass(LLVMPassManagerRef PM); + /** See llvm::createDeadArgEliminationPass function. */ void LLVMAddDeadArgEliminationPass(LLVMPassManagerRef PM); Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -94,6 +94,7 @@ void initializeCallGraphWrapperPassPass(PassRegistry&); void initializeCodeGenPreparePass(PassRegistry&); void initializeConstantHoistingLegacyPassPass(PassRegistry&); +void initializeCalledValuePropagationLegacyPassPass(PassRegistry &); void initializeConstantMergeLegacyPassPass(PassRegistry&); void initializeConstantPropagationPass(PassRegistry&); void initializeCorrelatedValuePropagationPass(PassRegistry&); Index: include/llvm/LinkAllPasses.h =================================================================== --- include/llvm/LinkAllPasses.h +++ include/llvm/LinkAllPasses.h @@ -80,6 +80,7 @@ (void) llvm::createCFLSteensAAWrapperPass(); (void) llvm::createStructurizeCFGPass(); (void) llvm::createLibCallsShrinkWrapPass(); + (void) llvm::createCalledValuePropagationPass(); (void) llvm::createConstantMergePass(); (void) llvm::createConstantPropagationPass(); (void) llvm::createCostModelAnalysisPass(); Index: include/llvm/Transforms/IPO.h =================================================================== --- include/llvm/Transforms/IPO.h +++ include/llvm/Transforms/IPO.h @@ -216,6 +216,10 @@ /// manager. ModulePass *createBarrierNoopPass(); +/// createCalledValuePropagationPass - Attach metadata to indirct call sites +/// indicating the set of functions they may target at run-time. +ModulePass *createCalledValuePropagationPass(); + /// What to do with the summary when running passes that operate on it. enum class PassSummaryAction { None, ///< Do nothing. Index: include/llvm/Transforms/IPO/CalledValuePropagation.h =================================================================== --- /dev/null +++ include/llvm/Transforms/IPO/CalledValuePropagation.h @@ -0,0 +1,35 @@ +//===- CalledValuePropagation.h - Propagate called values -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a transformation that attaches !targets metadata to +// indirect call sites. For a given call site, the metadata, if present, +// indicates the set of functions the call site could possibly target at +// run-time. This metadata is added to indirect call sites when the set of +// possible targets can be determined by analysis and is known to be small. The +// analysis driving the transformation is similar to constant propagation and +// makes uses of the generic sparse propagation solver. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H +#define LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H + +#include "llvm/IR/Module.h" +#include "llvm/IR/PassManager.h" + +namespace llvm { + +class CalledValuePropagationPass + : public PassInfoMixin { +public: + PreservedAnalyses run(Module &M, ModuleAnalysisManager &); +}; +} + +#endif // LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H Index: lib/Passes/PassBuilder.cpp =================================================================== --- lib/Passes/PassBuilder.cpp +++ lib/Passes/PassBuilder.cpp @@ -63,6 +63,7 @@ #include "llvm/Transforms/GCOVProfiler.h" #include "llvm/Transforms/IPO/AlwaysInliner.h" #include "llvm/Transforms/IPO/ArgumentPromotion.h" +#include "llvm/Transforms/IPO/CalledValuePropagation.h" #include "llvm/Transforms/IPO/ConstantMerge.h" #include "llvm/Transforms/IPO/CrossDSOCFI.h" #include "llvm/Transforms/IPO/DeadArgumentElimination.h" @@ -572,6 +573,10 @@ // years, it should be re-analyzed. MPM.addPass(IPSCCPPass()); + // Attach metadata to indirect call sites indicating the set of functions + // they may target at run-time. This should follow IPSCCP. + MPM.addPass(CalledValuePropagationPass()); + // Optimize globals to try and fold them into constants. MPM.addPass(GlobalOptPass()); @@ -907,6 +912,10 @@ // opens opportunities for globalopt (and inlining) by substituting function // pointers passed as arguments to direct uses of functions. MPM.addPass(IPSCCPPass()); + + // Attach metadata to indirect call sites indicating the set of functions + // they may target at run-time. This should follow IPSCCP. + MPM.addPass(CalledValuePropagationPass()); } // Now deduce any function attributes based in the current code. Index: lib/Passes/PassRegistry.def =================================================================== --- lib/Passes/PassRegistry.def +++ lib/Passes/PassRegistry.def @@ -39,6 +39,7 @@ #define MODULE_PASS(NAME, CREATE_PASS) #endif MODULE_PASS("always-inline", AlwaysInlinerPass()) +MODULE_PASS("called-value-propagation", CalledValuePropagationPass()) MODULE_PASS("constmerge", ConstantMergePass()) MODULE_PASS("cross-dso-cfi", CrossDSOCFIPass()) MODULE_PASS("deadargelim", DeadArgumentEliminationPass()) Index: lib/Transforms/IPO/CMakeLists.txt =================================================================== --- lib/Transforms/IPO/CMakeLists.txt +++ lib/Transforms/IPO/CMakeLists.txt @@ -2,6 +2,7 @@ AlwaysInliner.cpp ArgumentPromotion.cpp BarrierNoopPass.cpp + CalledValuePropagation.cpp ConstantMerge.cpp CrossDSOCFI.cpp DeadArgumentElimination.cpp Index: lib/Transforms/IPO/CalledValuePropagation.cpp =================================================================== --- /dev/null +++ lib/Transforms/IPO/CalledValuePropagation.cpp @@ -0,0 +1,409 @@ +//===- CalledValuePropagation.cpp - Propagate called values -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a transformation that attaches !targets metadata to +// indirect call sites. For a given call site, the metadata, if present, +// indicates the set of functions the call site could possibly target at +// run-time. This metadata is added to indirect call sites when the set of +// possible targets can be determined by analysis and is known to be small. The +// analysis driving the transformation is similar to constant propagation and +// makes uses of the generic sparse propagation solver. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/IPO/CalledValuePropagation.h" +#include "llvm/Analysis/SparsePropagation.h" +#include "llvm/IR/InstVisitor.h" +#include "llvm/IR/MDBuilder.h" +#include "llvm/Transforms/IPO.h" +using namespace llvm; + +#define DEBUG_TYPE "called-value-propagation" + +/// The maximum number of functions to track per lattice value. Once the number +/// of functions a call site can possibly target exceeds this threshold, it's +/// lattice value becomes overdefined. The number of possible lattice values is +/// bounded by Ch(F, M), where F is the number of functions in the module and M +/// is MaxFunctionsPerValue. As such, this value should be kept very small. We +/// likely can't do anything useful for call sites with a large number of +/// possible targets, anyway. +static cl::opt MaxFunctionsPerValue( + "cvp-max-functions-per-value", cl::Hidden, cl::init(4), + cl::desc("The maximum number of functions to track per lattice value")); + +/// Determine if the given type is a pointer to a function. Function pointers +/// are the only values we track. Although relying on a value's type may cause +/// us to miss some values that are later called, we are conservative about +/// this. It's likely better to not waste space tracking irrelevant values. +static bool isPointerToFunction(Type *Ty) { + if (auto *PtrTy = dyn_cast(Ty)) + return PtrTy->getElementType()->isFunctionTy(); + return false; +} + +/// Determine if we can track the arguments and return values of the given +/// function. +static bool canTrackFunctionInterprocedurally(Function *F) { + return F->hasExactDefinition() && F->hasLocalLinkage() && + !F->hasFnAttribute(Attribute::Naked) && !F->hasAddressTaken(); +} + +/// Determine if we can track the values stored in the given global variable. +static bool canTrackGlobalVariableInterprocedurally(GlobalVariable *GV) { + if (GV->isConstant() || !GV->hasLocalLinkage() || + !GV->hasDefinitiveInitializer()) + return false; + for (User *U : GV->users()) { + if (auto *Store = dyn_cast(U)) { + if (Store->getValueOperand() == GV || Store->isVolatile()) + return false; + } else if (auto *Load = dyn_cast(U)) { + if (Load->isVolatile()) + return false; + } else { + return false; + } + } + return true; +} + +namespace { + +/// The lattice value type used by our custom lattice function. It holds the +/// lattice state, and a vector of functions. +class CVPLatticeVal { +public: + /// The states of the lattices values. Only the FunctionSet state is + /// interesting. It indicates the set of functions to which an LLVM value may + /// refer. + enum CVPLatticeValTy { Undefined, FunctionSet, Overdefined, Untracked }; + + /// Construct a lattice value in the given state. This is useful for creating + /// values in the undefined, overdefined, and untracked states. + CVPLatticeVal(CVPLatticeValTy LV) : LV(LV) {} + + /// Construct a lattice value in the FunctionSet state holding the given + /// functions. + CVPLatticeVal(ArrayRef Funcs) : LV(FunctionSet) { + assert(Funcs.size() <= MaxFunctionsPerValue && + "Function set initialized with too many values"); + Functions.append(Funcs.begin(), Funcs.end()); + } + + /// Get a reference to the functions held by this lattice value. The number + /// of functions will be zero for states other than FunctionSet. + ArrayRef getFunctions() const { return Functions; } + + /// Returns true if the lattice value is in the FunctionSet state. + bool isFunctionSet() const { return LV == FunctionSet; } + + /// An implementation of the less than operator to enable uniquing. + bool operator<(const CVPLatticeVal &O) const { + return LV < O.LV || Functions < O.Functions; + } + +private: + /// Holds the state this lattice value is in. + CVPLatticeValTy LV; + + /// Holds the actual lattice value. This is a set of functions indicating the + /// possible targets of call sites. This set is empty for lattice values in + /// the undefined, overdefined, and untracked states. The maximum size of the + /// set is controlled by MaxFunctionsPerValue. + SmallVector Functions; +}; + +/// The custom lattice function used by the generic sparse propagation solver. +/// It handles merging lattice values and computing new lattice values for +/// constants, arguments, values returned from trackable functions, and values +/// located in trackable global variables. It also computes the lattice values +/// that change as a result of executing instructions. +class CVPLatticeFunc : public AbstractLatticeFunction { +public: + typedef DenseMap StateTy; + + CVPLatticeFunc(LatticeVal UndefinedVal, LatticeVal OverdefinedVal, + LatticeVal UntrackedVal, std::set &LatticeVals) + : AbstractLatticeFunction(UndefinedVal, OverdefinedVal, UntrackedVal), + LatticeVals(LatticeVals) {} + + /// Determine if we should care about the given value in our analysis. Since + /// we're only interested in called values, we limit the interesting values + /// to pointers to functions. Although relying on a value's type may cause us + /// to miss some values that are later called, we are conservative about + /// this. It's likely better to not waste space tracking irrelevant values. + /// @{ + bool IsUntrackedValue(Value *V) { return !isPointerToFunction(V->getType()); } + bool IsUntrackedGlobalVariable(GlobalVariable *GV) { + return !isPointerToFunction(GV->getValueType()); + } + bool IsUntrackedFunction(Function *F) { + return !isPointerToFunction(F->getReturnType()); + } + /// @} + + /// Compute a new lattice value for the given constant. The constant, after + /// stripping any pointer casts, should be a Function. We ignore null and + /// undef values as an optimization, since calling these values is undefined + /// behavior. + LatticeVal ComputeConstant(Constant *C) { + if (C->isNullValue() || isa(C)) + return &*LatticeVals.emplace(CVPLatticeVal::FunctionSet).first; + if (auto *F = dyn_cast(C->stripPointerCasts())) + return &*LatticeVals.emplace(F).first; + return getOverdefinedVal(); + } + + /// Compute a new lattice value for the given argument. If we can't track the + /// argument's parent function, we mark it overdefined. + LatticeVal ComputeArgument(Argument *A) { + if (canTrackFunctionInterprocedurally(A->getParent())) + return getUndefVal(); + return getOverdefinedVal(); + } + + /// Compute a new lattice value to track the return values of the given + /// function. If we can't track the function, we mark the value overdefined. + LatticeVal ComputeFunction(Function *F) { + if (canTrackFunctionInterprocedurally(F)) + return getUndefVal(); + return getOverdefinedVal(); + } + + /// Compute a new lattice value to track the values stored in the given + /// global variable. If we can't track the global variable, we mark the value + /// overdefined. + LatticeVal ComputeGlobalVariable(GlobalVariable *GV) { + if (canTrackGlobalVariableInterprocedurally(GV)) + return ComputeConstant(GV->getInitializer()); + return getOverdefinedVal(); + } + + /// Merge the two given lattice values. The interesting cases are merging two + /// FunctionSet values and a FunctionSet value with an Undefined value. For + /// these cases, we simply union the function sets. If the size of the union + /// is greater than the maximum functions we track, the merged value is + /// overdefined. + LatticeVal MergeValues(LatticeVal LV1, LatticeVal LV2) { + auto *X = static_cast(LV1); + auto *Y = static_cast(LV2); + + if (X == getUntrackedVal() || Y == getUntrackedVal()) + return getUntrackedVal(); + if (X == getOverdefinedVal() || Y == getOverdefinedVal()) + return getOverdefinedVal(); + if (X == getUndefVal() && Y == getUndefVal()) + return getUndefVal(); + + SmallVector Union; + Union.append(X->getFunctions().begin(), X->getFunctions().end()); + Union.append(Y->getFunctions().begin(), Y->getFunctions().end()); + std::sort(Union.begin(), Union.end()); + Union.erase(std::unique(Union.begin(), Union.end()), Union.end()); + + if (Union.size() > MaxFunctionsPerValue) + return getOverdefinedVal(); + + return &*LatticeVals.emplace(Union).first; + } + + /// Compute the lattice values that change as a result of executing the given + /// instruction. The changed values are stored in \p CV. We handle just a few + /// kinds of instructions since we're only propagating values that can be + /// called. + void ComputeInstructionState(Instruction &I, StateTy &CV, SparseSolver &S) { + switch (I.getOpcode()) { + case Instruction::Call: return visitCallSite(cast(&I), CV, S); + case Instruction::Invoke: return visitCallSite(cast(&I), CV, S); + case Instruction::Load: return visitLoad(*cast(&I), CV, S); + case Instruction::Ret: return visitReturn(*cast(&I), CV, S); + case Instruction::Select: return visitSelect(*cast(&I), CV, S); + case Instruction::Store: return visitStore(*cast(&I), CV, S); + default: return visitInst(I, CV, S); + } + } + + /// Print the state of the given lattice value. + void PrintValue(LatticeVal LV, raw_ostream &OS) { + if (LV == getUndefVal()) + OS << "Undefined "; + else if (LV == getOverdefinedVal()) + OS << "Overdefined"; + else if (LV == getUntrackedVal()) + OS << "Untracked "; + else + OS << "FunctionSet"; + } + + /// We collect a set of indirect calls when visiting call sites. This method + /// returns a reference to that set. + SmallPtrSetImpl &getIndirectCalls() { return IndirectCalls; } + +private: + /// Holds the indirect calls we encounter during the analysis. We will attach + /// metadata to these calls after the analysis indicating the functions the + /// calls can possibly target. + SmallPtrSet IndirectCalls; + + /// The generic sparse propagation solver typedefs lattice values to void + /// pointers. Since our lattice values can't fit in the space of a pointer, + /// we have to unique them in a std::set and let the generic solver index + /// this set. + std::set &LatticeVals; + + /// Handle return instructions. The function's return state is the merge of + /// the returned value state and the function's return state. + void visitReturn(ReturnInst &I, StateTy &CV, SparseSolver &S) { + Function *F = I.getParent()->getParent(); + if (!F->getReturnType()->isVoidTy()) + CV[F] = MergeValues(S.getOrInitValueState(I.getReturnValue()), + S.getOrInitFunctionState(F)); + } + + /// Handle call sites. The state of a called function's formal arguments is + /// the merge of the argument state with the call sites corresponding actual + /// argument state. The call site state is the merge of the call site state + /// with the returned value state of the called function. + void visitCallSite(CallSite CS, StateTy &CV, SparseSolver &S) { + Function *F = CS.getCalledFunction(); + Instruction *I = CS.getInstruction(); + + // If this is an indirect call, save it so we can quickly revisit it when + // attaching metadata. + if (!F) + IndirectCalls.insert(CS.getInstruction()); + + // If we can't track the function, we can't track the value it returns. + if (!F || !canTrackFunctionInterprocedurally(F)) { + CV[I] = getUntrackedVal(); + return; + } + + // Inform the solver that the called function is executable, and perform + // the merges for the arguments and return value. + S.MarkBlockExecutable(&F->front()); + for (Argument &A : F->args()) + CV[&A] = MergeValues(S.getOrInitValueState(&A), + S.getOrInitValueState(CS.getArgument(A.getArgNo()))); + CV[I] = MergeValues(S.getOrInitValueState(I), S.getOrInitFunctionState(F)); + } + + /// Handle select instructions. The select instruction state is the merge the + /// true and false value states. + void visitSelect(SelectInst &I, StateTy &CV, SparseSolver &S) { + CV[&I] = MergeValues(S.getOrInitValueState(I.getTrueValue()), + S.getOrInitValueState(I.getFalseValue())); + } + + /// Handle load instructions. If the pointer operand of the load is a global + /// variable, we attempt to track the value. The loaded value state is the + /// merge of the loaded value state with the global variable state. + void visitLoad(LoadInst &I, StateTy &CV, SparseSolver &S) { + auto *GV = dyn_cast(I.getPointerOperand()); + CV[&I] = !GV ? getUntrackedVal() + : MergeValues(S.getOrInitValueState(&I), + S.getOrInitGlobalVariableState(GV)); + } + + /// Handle store instructions. If the pointer operand of the store is a + /// global variable, we attempt to track the value. The global variable state + /// is the merge of the stored value state with the global variable state. + void visitStore(StoreInst &I, StateTy &CV, SparseSolver &S) { + auto *GV = dyn_cast(I.getPointerOperand()); + if (!GV) + return; + CV[GV] = MergeValues(S.getOrInitValueState(I.getValueOperand()), + S.getOrInitGlobalVariableState(GV)); + } + + /// Handle all other instructions. All other instructions are marked + /// untracked. + void visitInst(Instruction &I, StateTy &CV, SparseSolver &S) { + CV[&I] = getUntrackedVal(); + } +}; +} + +static bool runCVP(Module &M) { + // The generic sparse propagation solver typedefs lattice values to void + // pointers. Since our lattice values can't fit in the space of a pointer, we + // have to unique them in a std::set and let the generic solver index this + // set. + std::set LatticeVals; + + // The generic sparse propagation solver requires that we define values for + // the undefined, overdefined, and untracked states. Construct those now. + auto *Undefined = &*LatticeVals.emplace(CVPLatticeVal::Undefined).first; + auto *Overdefined = &*LatticeVals.emplace(CVPLatticeVal::Overdefined).first; + auto *Untracked = &*LatticeVals.emplace(CVPLatticeVal::Untracked).first; + + // Our custom lattice function and generic sparse propagation solver. + CVPLatticeFunc Lattice(Undefined, Overdefined, Untracked, LatticeVals); + SparseSolver Solver(&Lattice); + + // For each function in the module, if we can't track its arguments and + // return values, let the generic solver assume it is executable. + for (Function &F : M) + if (!F.isDeclaration() && !canTrackFunctionInterprocedurally(&F)) + Solver.MarkBlockExecutable(&F.front()); + + // Solver our custom lattice. In doing so, we will also build a set of + // indirect call sites. + Solver.Solve(); + + // Attach metadata to the indirect call sites that were collected indicating + // the set of functions they can possibly target. + bool Changed = false; + MDBuilder MDB(M.getContext()); + for (Instruction *C : Lattice.getIndirectCalls()) { + CallSite CS(C); + auto *LV = static_cast( + Solver.getValueState(CS.getCalledValue())); + if (!LV->isFunctionSet() || LV->getFunctions().size() < 2) + continue; + MDNode *Targets = MDB.createTargets(LV->getFunctions()); + C->setMetadata(LLVMContext::MD_targets, Targets); + Changed = true; + } + + return Changed; +} + +PreservedAnalyses CalledValuePropagationPass::run(Module &M, + ModuleAnalysisManager &) { + if (!runCVP(M)) + return PreservedAnalyses::all(); + return PreservedAnalyses::none(); +} + +namespace { +class CalledValuePropagationLegacyPass : public ModulePass { +public: + static char ID; + + CalledValuePropagationLegacyPass() : ModulePass(ID) { + initializeCalledValuePropagationLegacyPassPass( + *PassRegistry::getPassRegistry()); + } + + bool runOnModule(Module &M) { + if (skipModule(M)) + return false; + return runCVP(M); + } +}; +} + +char CalledValuePropagationLegacyPass::ID = 0; +INITIALIZE_PASS(CalledValuePropagationLegacyPass, "called-value-propagation", + "Called Value Propagation", false, false) + +ModulePass *llvm::createCalledValuePropagationPass() { + return new CalledValuePropagationLegacyPass(); +} Index: lib/Transforms/IPO/IPO.cpp =================================================================== --- lib/Transforms/IPO/IPO.cpp +++ lib/Transforms/IPO/IPO.cpp @@ -25,6 +25,7 @@ void llvm::initializeIPO(PassRegistry &Registry) { initializeArgPromotionPass(Registry); + initializeCalledValuePropagationLegacyPassPass(Registry); initializeConstantMergeLegacyPassPass(Registry); initializeCrossDSOCFIPass(Registry); initializeDAEPass(Registry); @@ -67,6 +68,10 @@ unwrap(PM)->add(createArgumentPromotionPass()); } +void LLVMAddCalledValuePropagationPass(LLVMPassManagerRef PM) { + unwrap(PM)->add(createCalledValuePropagationPass()); +} + void LLVMAddConstantMergePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createConstantMergePass()); } Index: lib/Transforms/IPO/PassManagerBuilder.cpp =================================================================== --- lib/Transforms/IPO/PassManagerBuilder.cpp +++ lib/Transforms/IPO/PassManagerBuilder.cpp @@ -471,6 +471,7 @@ addExtensionsToPM(EP_ModuleOptimizerEarly, MPM); MPM.add(createIPSCCPPass()); // IP SCCP + MPM.add(createCalledValuePropagationPass()); MPM.add(createGlobalOptimizerPass()); // Optimize out global vars // Promote any localized global vars. MPM.add(createPromoteMemoryToRegisterPass()); @@ -706,6 +707,10 @@ // opens opportunities for globalopt (and inlining) by substituting function // pointers passed as arguments to direct uses of functions. PM.add(createIPSCCPPass()); + + // Attach metadata to indirect call sites indicating the set of functions + // they may target at run-time. This should follow IPSCCP. + PM.add(createCalledValuePropagationPass()); } // Infer attributes about definitions. The readnone attribute in particular is Index: test/Other/new-pm-defaults.ll =================================================================== --- test/Other/new-pm-defaults.ll +++ test/Other/new-pm-defaults.ll @@ -78,6 +78,7 @@ ; CHECK-O-NEXT: Running pass: LowerExpectIntrinsicPass ; CHECK-O-NEXT: Finished llvm::Function pass manager run. ; CHECK-O-NEXT: Running pass: IPSCCPPass +; CHECK-O-NEXT: Running pass: CalledValuePropagationPass ; CHECK-O-NEXT: Running pass: GlobalOptPass ; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PromotePass> ; CHECK-O-NEXT: Running pass: DeadArgumentEliminationPass Index: test/Other/new-pm-lto-defaults.ll =================================================================== --- test/Other/new-pm-lto-defaults.ll +++ test/Other/new-pm-lto-defaults.ll @@ -34,6 +34,7 @@ ; CHECK-O2-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Function ; CHECK-O2-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis ; CHECK-O2-NEXT: Running pass: IPSCCPPass +; CHECK-O2-NEXT: Running pass: CalledValuePropagationPass ; CHECK-O-NEXT: Running pass: ModuleToPostOrderCGSCCPassAdaptor<{{.*}}PostOrderFunctionAttrsPass> ; CHECK-O-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}SCC ; CHECK-O1-NEXT: Running analysis: InnerAnalysisManagerProxy<{{.*}}Function Index: test/Other/new-pm-thinlto-defaults.ll =================================================================== --- test/Other/new-pm-thinlto-defaults.ll +++ test/Other/new-pm-thinlto-defaults.ll @@ -74,6 +74,7 @@ ; CHECK-O-NEXT: Running pass: LowerExpectIntrinsicPass ; CHECK-O-NEXT: Finished llvm::Function pass manager run. ; CHECK-O-NEXT: Running pass: IPSCCPPass +; CHECK-O-NEXT: Running pass: CalledValuePropagationPass ; CHECK-O-NEXT: Running pass: GlobalOptPass ; CHECK-O-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PromotePass> ; CHECK-O-NEXT: Running pass: DeadArgumentEliminationPass Index: test/Transforms/CalledValuePropagation/simple-arguments.ll =================================================================== --- /dev/null +++ test/Transforms/CalledValuePropagation/simple-arguments.ll @@ -0,0 +1,83 @@ +; RUN: opt -called-value-propagation -S < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnueabi" + + +; This test checks that we propagate the functions through arguments and attach +; !targets metadata to the call. Such metadata can enable optimizations of this +; code sequence. +; +; For example, the code below a illustrates a contrived sort-like algorithm +; that accepts a pointer to a comparison function. Since the indirect call to +; the comparison function has only two targets, the call can be promoted to two +; direct calls using an if-then-else. The loop can then be unswitched and the +; called functions inlined. This essentially produces two loops, once +; specialized for each comparison. +; +; CHECK: %tmp3 = call i1 %cmp(i64* %tmp1, i64* %tmp2), !targets ![[MD:[0-9]+]] +; CHECK: ![[MD]] = !{i1 (i64*, i64*)* @ugt, i1 (i64*, i64*)* @ule} +; +define void @test_argument(i64* %x, i64 %n, i1 %flag) { +entry: + %tmp0 = sub i64 %n, 1 + br i1 %flag, label %then, label %else + +then: + call void @arrange_data(i64* %x, i64 %tmp0, i1 (i64*, i64*)* @ugt) + br label %merge + +else: + call void @arrange_data(i64* %x, i64 %tmp0, i1 (i64*, i64*)* @ule) + br label %merge + +merge: + ret void +} + +define internal void @arrange_data(i64* %x, i64 %n, i1 (i64*, i64*)* %cmp) { +entry: + %tmp0 = icmp eq i64 %n, 1 + br i1 %tmp0, label %merge, label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %cmp.false ] + %i.next = add nuw nsw i64 %i, 1 + %tmp1 = getelementptr inbounds i64, i64* %x, i64 %i + %tmp2 = getelementptr inbounds i64, i64* %x, i64 %i.next + %tmp3 = call i1 %cmp(i64* %tmp1, i64* %tmp2) + br i1 %tmp3, label %cmp.true, label %cmp.false + +cmp.true: + call void @swap(i64* %tmp1, i64* %tmp2) + br label %cmp.false + +cmp.false: + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + %tmp4 = sub i64 %n, 1 + call void @arrange_data(i64* %x, i64 %tmp4, i1 (i64*, i64*)* %cmp) + br label %merge + +merge: + ret void +} + +define internal i1 @ugt(i64* %a, i64* %b) { +entry: + %tmp0 = load i64, i64* %a + %tmp1 = load i64, i64* %b + %tmp2 = icmp ugt i64 %tmp0, %tmp1 + ret i1 %tmp2 +} + +define internal i1 @ule(i64* %a, i64* %b) { +entry: + %tmp0 = load i64, i64* %a + %tmp1 = load i64, i64* %b + %tmp2 = icmp ule i64 %tmp0, %tmp1 + ret i1 %tmp2 +} + +declare void @swap(i64*, i64*) Index: test/Transforms/CalledValuePropagation/simple-memory.ll =================================================================== --- /dev/null +++ test/Transforms/CalledValuePropagation/simple-memory.ll @@ -0,0 +1,62 @@ +; RUN: opt -called-value-propagation -S < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnueabi" + +@global_function = internal unnamed_addr global void ()* null, align 8 +@global_array = common unnamed_addr global i64* null, align 8 + +; This test checks that we propagate the functions through an internal global +; variable, and attach !targets metadata to the call. Such metadata can enable +; optimizations of this code sequence. +; +; For example, since both of the targeted functions have the "nounwind" and +; "readnone" function attributes, LICM can be made to move the call and the +; function pointer load outside the loop. This would then enable the loop +; vectorizer to vectorize the sum reduction. +; +; CHECK: call void %tmp0(), !targets ![[MD:[0-9]+]] +; CHECK: ![[MD]] = !{void ()* @invariant_1, void ()* @invariant_2} +; +define i64 @test_memory_entry(i64 %n, i1 %flag) { +entry: + br i1 %flag, label %then, label %else + +then: + store void ()* @invariant_1, void ()** @global_function + br label %merge + +else: + store void ()* @invariant_2, void ()** @global_function + br label %merge + +merge: + %tmp1 = call i64 @test_memory(i64 %n) + ret i64 %tmp1 +} + +define internal i64 @test_memory(i64 %n) { +entry: + %array = load i64*, i64** @global_array + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %r = phi i64 [ 0, %entry ], [ %tmp3, %for.body ] + %tmp0 = load void ()*, void ()** @global_function + call void %tmp0() + %tmp1 = getelementptr inbounds i64, i64* %array, i64 %i + %tmp2 = load i64, i64* %tmp1 + %tmp3 = add i64 %tmp2, %r + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + %tmp4 = phi i64 [ %tmp3, %for.body ] + ret i64 %tmp4 +} + +declare void @invariant_1() #0 +declare void @invariant_2() #0 + +attributes #0 = { nounwind readnone } Index: test/Transforms/CalledValuePropagation/simple-select.ll =================================================================== --- /dev/null +++ test/Transforms/CalledValuePropagation/simple-select.ll @@ -0,0 +1,39 @@ +; RUN: opt -called-value-propagation -S < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnueabi" + +@global_function = internal unnamed_addr global void ()* null, align 8 +@global_scalar = internal unnamed_addr global i64 zeroinitializer + +; This test checks that we propagate the functions through a select +; instruction, and attach !targets metadata to the call. Such metadata can +; enable optimizations of this code sequence. +; +; For example, since both of the targeted functions have the "norecurse" +; attribute, the function attributes pass can be made to infer that "@test_3" +; is also norecurse. This would allow the globals optimizer to localize +; "@global_scalar". The function could then be further simplified to always +; return the constant "1", eliminating the load and store instructions. +; +; CHECK: call void %tmp0(), !targets ![[MD:[0-9]+]] +; CHECK: ![[MD]] = !{void ()* @norecurse_1, void ()* @norecurse_2} +; +define i64 @test_select_entry(i1 %flag) { +entry: + %tmp0 = call i64 @test_select(i1 %flag) + ret i64 %tmp0 +} + +define internal i64 @test_select(i1 %flag) { +entry: + %tmp0 = select i1 %flag, void ()* @norecurse_1, void ()* @norecurse_2 + store i64 1, i64* @global_scalar + call void %tmp0() + %tmp1 = load i64, i64* @global_scalar + ret i64 %tmp1 +} + +declare void @norecurse_1() #0 +declare void @norecurse_2() #0 + +attributes #0 = { norecurse }