diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -97,6 +97,7 @@ #define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H #include "llvm/ADT/SetVector.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/PassManager.h" @@ -536,6 +537,14 @@ return FuncRWInstsMap[&F]; } + /// Return TargetLibraryInfo for function \p F. + TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) { + return FuncTLIMap[&F]; + } + + /// Return datalayout used in the module. + const DataLayout &getDL() { return DL; } + private: /// A map type from functions to opcode to instruction maps. using FuncInstOpcodeMapTy = DenseMap; @@ -543,6 +552,9 @@ /// A map type from functions to their read or write instructions. using FuncRWInstsMapTy = DenseMap; + /// A map type from functions to their TLI. + using FuncTLIMapTy = DenseMap; + /// A nested map that remembers all instructions in a function with a certain /// instruction opcode (Instruction::getOpcode()). FuncInstOpcodeMapTy FuncInstOpcodeMap; @@ -550,6 +562,9 @@ /// A map from functions to their instructions that may read or write memory. FuncRWInstsMapTy FuncRWInstsMap; + /// A map from functions to their TLI. + FuncTLIMapTy FuncTLIMap; + /// The datalayout used in the module. const DataLayout &DL; @@ -664,6 +679,7 @@ /// /// \param F The function that is checked for attribute opportunities. /// \param Whitelist If not null, a set limiting the attribute opportunities. + /// \param TLIGetter helper function to get TargetLibraryInfo Analysis result. /// /// Note that abstract attribute instances are generally created even if the /// IR already contains the information they would deduce. The most important @@ -671,7 +687,8 @@ /// instance, which can be queried without the need to look at the IR in /// various places. void identifyDefaultAbstractAttributes( - Function &F, DenseSet *Whitelist = nullptr); + Function &F, std::function &TLIGetter, + DenseSet *Whitelist = nullptr); /// Return true if \p AA (or its context instruction) is assumed dead. /// @@ -1467,6 +1484,30 @@ static const char ID; }; +struct AAHeapToStack : public StateWrapper, + public IRPosition { + AAHeapToStack(const IRPosition &IRP) : IRPosition(IRP) {} + + /// Returns true if HeapToStack conversion is assumed to be possible. + bool isAssumedHeapToStack() const { return getAssumed(); } + + /// Returns true if HeapToStack conversion is known to be possible. + bool isKnownHeapToStack() const { return getKnown(); } + + /// Return an IR position, see struct IRPosition. + /// + ///{ + IRPosition &getIRPosition() { return *this; } + const IRPosition &getIRPosition() const { return *this; } + ///} + + /// Create an abstract attribute view for the position \p IRP. + static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A); + + /// Unique ID (due to the unique address) + static const char ID; +}; + } // end namespace llvm #endif // LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/:w copy from llvm/lib/Transforms/IPO/Attributor.cpp copy to llvm/lib/Transforms/IPO/:w --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/:w @@ -25,6 +25,7 @@ #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" @@ -120,6 +121,12 @@ "manifestation of attributes -- may issue false-positive errors"), cl::init(false)); +static cl::opt EnableHeapToStack("enable-heap-to-stack-conversion", + cl::init(false), cl::Hidden); + +static cl::opt MaxHeapToStackSize("max-heap-to-stack-size", + cl::init(1024), cl::Hidden); + /// Logic operators for the change status enum class. /// ///{ @@ -2603,6 +2610,174 @@ /// NoCapture attribute deduction for a call site return value. using AANoCaptureCallSiteReturned = AANoCaptureFloating; +/// ----------------------- Heap-To-Stack Conversion --------------------------- +struct AAHeapToStackImpl : public AAHeapToStack { + AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {} + + const std::string getAsStr() const override { + return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); + } + + void initialize(Attributor &A) override { + const Function *F = getAssociatedFunction(); + const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); + + auto MallocCheck = [&](Instruction &I) { + if (isMallocLikeFn(&I, TLI)) + if (auto *CSize = dyn_cast(I.getOperand(0))) + if (CSize->getValue().sle(MaxHeapToStackSize)) { + MallocCalls.insert(const_cast(&I)); + LLVM_DEBUG(dbgs() << "H2S: Initial malloc call: " << I << "\n"); + } + return true; + }; + + // Get all malloc calls from this function. + A.checkForAllCallLikeInstructions(MallocCheck, *this); + + // No malloc calls, there is no work to be done. + if (MallocCalls.empty()) + indicatePessimisticFixpoint(); + } + + ChangeStatus manifest(Attributor &A) override { + assert(getState().isValidState() && + "Attempted to manifest an invalid state!"); + + ChangeStatus HasChanged = ChangeStatus::UNCHANGED; + Function *F = getAssociatedFunction(); + + for (const Instruction *Malloc : MallocCalls) { + Instruction *MallocCall = const_cast(Malloc); + // This malloc cannot be replaced. + if (BadMallocCalls.count(MallocCall)) + continue; + + for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { + LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); + FreeCall->eraseFromParent(); + HasChanged = ChangeStatus::CHANGED; + } + + LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall + << "\n"); + + unsigned AS = cast(MallocCall->getType())->getAddressSpace(); + Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, + MallocCall->getOperand(0)); + F->begin()->getInstList().insert(F->begin()->begin(), AI); + + if (AI->getType() != MallocCall->getType()) { + auto *BC = new BitCastInst(AI, MallocCall->getType()); + BC->insertAfter(AI); + AI = BC; + } + + MallocCall->replaceAllUsesWith(AI); + + if (auto *II = dyn_cast(MallocCall)) { + auto *NBB = II->getNormalDest(); + BranchInst::Create(NBB, MallocCall->getParent()); + MallocCall->eraseFromParent(); + } else { + MallocCall->eraseFromParent(); + } + HasChanged = ChangeStatus::CHANGED; + } + + return HasChanged; + } + + /// Collection of all malloc calls in a function. + SmallSetVector MallocCalls; + + /// Collection of malloc calls that cannot be converted. + DenseSet BadMallocCalls; + + /// A map for each malloc call to the set of associated free calls. + DenseMap> FreesForMalloc; + + ChangeStatus updateImpl(Attributor &A) override; +}; + +ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { + const Function *F = getAssociatedFunction(); + const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); + + auto CallCheck = [&](Instruction &I) { + for (const Instruction *MallocCall : MallocCalls) { + if (MallocCall == &I) + continue; + + if (isFreeCall(&I, TLI)) { + if (auto *Ptr = dyn_cast(I.getOperand(0))) + if (Ptr == MallocCall) { + if (auto *Ptr = dyn_cast(I.getOperand(0))) + FreesForMalloc[const_cast(MallocCall)].insert(&I); + return true; + } + continue; + } + + CallSite ICS(&I); + + Function *CalledFunction = ICS.getCalledFunction(); + const auto &NoFreeAA = + A.getAAFor(*this, IRPosition::callsite_function(ICS)); + + for (Argument &Arg : CalledFunction->args()) { + if (dyn_cast(&Arg) != MallocCall) + continue; + + BadMallocCalls.insert(MallocCall); + const IRPosition &IRPos = IRPosition::value(Arg); + const auto &NoCaptureAA = A.getAAFor(*this, IRPos); + + // If a function does not capture the pointer we are fine. + if(NoCaptureAA.isAssumedNoCapture()) + return true; + // If a function does not free memory we are fine + // Note: Right now, if a function that has malloc pointer as an argument + // frees memory, we assume that the malloc pointer is freed. + // + // TODO: Add nofree callsite argument attribute to indicate that pointer + // argument is not freed. + if (NoFreeAA.isAssumedNoFree()) + return true; + + // Remove all Frees that we thought we could remove. + FreesForMalloc.erase(const_cast(MallocCall)); + BadMallocCalls.insert(MallocCall); + return false; + } + } + return true; + + }; + size_t NumBadMallocs = BadMallocCalls.size(); + + A.checkForAllCallLikeInstructions(CallCheck, *this); + if (NumBadMallocs != BadMallocCalls.size()) + + return ChangeStatus::CHANGED; + + return ChangeStatus::UNCHANGED; + +} + +struct AAHeapToStackFunction final : public AAHeapToStackImpl { + AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {} + + /// See AbstractAttribute::trackStatistics() + void trackStatistics() const override { + STATS_DECL(MallocCalls, Function, + "Number of MallocCalls"); + BUILD_STAT_NAME(MallocCalls, Function) += MallocCalls.size(); + } +}; + +using AAHeapToStackCallSite = AAHeapToStackFunction; + /// ---------------------------------------------------------------------------- /// Attributor /// ---------------------------------------------------------------------------- @@ -2949,7 +3124,11 @@ } void Attributor::identifyDefaultAbstractAttributes( - Function &F, DenseSet *Whitelist) { + Function &F, std::function &TLIGetter, + DenseSet *Whitelist) { + + if (EnableHeapToStack) + InfoCache.FuncTLIMap[&F] = &TLIGetter(F); IRPosition FPos = IRPosition::function(F); @@ -2958,8 +3137,8 @@ // broken IR in which SSA rules do not apply. checkAndRegisterAA(FPos, *this, /* Whitelist */ nullptr); - // Every function might be "will-return". checkAndRegisterAA(FPos, *this, Whitelist); + // Every function might be "will-return". // Every function can be nounwind. checkAndRegisterAA(FPos, *this, Whitelist); @@ -2973,6 +3152,10 @@ // Every function might be "no-return". checkAndRegisterAA(FPos, *this, Whitelist); + // Every function might be applicable for Heap-To-Stack conversion. + if (EnableHeapToStack) + checkAndRegisterAA(FPos, *this, Whitelist); + // Return attributes are only appropriate if the return type is non void. Type *ReturnType = F.getReturnType(); if (!ReturnType->isVoidTy()) { @@ -3130,7 +3313,8 @@ /// Pass (Manager) Boilerplate /// ---------------------------------------------------------------------------- -static bool runAttributorOnModule(Module &M) { +static bool runAttributorOnModule( + Module &M, std::function &TLIGetter) { if (DisableAttributor) return false; @@ -3164,14 +3348,21 @@ // Populate the Attributor with abstract attribute opportunities in the // function and the information cache with IR information. - A.identifyDefaultAbstractAttributes(F); + A.identifyDefaultAbstractAttributes(F, TLIGetter); } return A.run() == ChangeStatus::CHANGED; } PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) { - if (runAttributorOnModule(M)) { + auto &FAM = AM.getResult(M).getManager(); + + std::function TLIGetter = + [&](Function &F) -> TargetLibraryInfo & { + return FAM.getResult(F); + }; + + if (runAttributorOnModule(M, TLIGetter)) { // FIXME: Think about passes we will preserve and add them here. return PreservedAnalyses::none(); } @@ -3190,11 +3381,18 @@ bool runOnModule(Module &M) override { if (skipModule(M)) return false; - return runAttributorOnModule(M); + std::function TLIGetter = + [&](Function &F) -> TargetLibraryInfo & { + return getAnalysis(F).getTLI(); + }; + + return runAttributorOnModule(M, TLIGetter); } void getAnalysisUsage(AnalysisUsage &AU) const override { // FIXME: Think about passes we will preserve and add them here. + // ModulePass::getAnalysisUsage(AU); + AU.addRequired(); AU.setPreservesCFG(); } }; @@ -3218,6 +3416,7 @@ const char AADereferenceable::ID = 0; const char AAAlign::ID = 0; const char AANoCapture::ID = 0; +const char AAHeapToStack::ID = 0; // Macro magic to create the static generator function for attributes that // follow the naming scheme. @@ -3236,8 +3435,8 @@ CLASS *AA = nullptr; \ switch (IRP.getPositionKind()) { \ SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ - SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ + SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ @@ -3272,6 +3471,7 @@ CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) @@ -3287,5 +3487,6 @@ INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", "Deduce and propagate attributes", false, false) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", "Deduce and propagate attributes", false, false) diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -25,6 +25,7 @@ #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" @@ -120,6 +121,12 @@ "manifestation of attributes -- may issue false-positive errors"), cl::init(false)); +static cl::opt EnableHeapToStack("enable-heap-to-stack-conversion", + cl::init(false), cl::Hidden); + +static cl::opt MaxHeapToStackSize("max-heap-to-stack-size", + cl::init(1024), cl::Hidden); + /// Logic operators for the change status enum class. /// ///{ @@ -2603,6 +2610,172 @@ /// NoCapture attribute deduction for a call site return value. using AANoCaptureCallSiteReturned = AANoCaptureFloating; +/// ----------------------- Heap-To-Stack Conversion --------------------------- +struct AAHeapToStackImpl : public AAHeapToStack { + AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {} + + const std::string getAsStr() const override { + return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); + } + + ChangeStatus manifest(Attributor &A) override { + assert(getState().isValidState() && + "Attempted to manifest an invalid state!"); + + ChangeStatus HasChanged = ChangeStatus::UNCHANGED; + Function *F = getAssociatedFunction(); + + for (const Instruction *Malloc : MallocCalls) { + Instruction *MallocCall = const_cast(Malloc); + // This malloc cannot be replaced. + if (BadMallocCalls.count(MallocCall)) + continue; + + for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { + LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); + FreeCall->eraseFromParent(); + HasChanged = ChangeStatus::CHANGED; + } + + LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall + << "\n"); + + unsigned AS = cast(MallocCall->getType())->getAddressSpace(); + Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, + MallocCall->getOperand(0)); + F->begin()->getInstList().insert(F->begin()->begin(), AI); + + if (AI->getType() != MallocCall->getType()) { + auto *BC = new BitCastInst(AI, MallocCall->getType()); + BC->insertAfter(AI); + AI = BC; + } + + MallocCall->replaceAllUsesWith(AI); + + if (auto *II = dyn_cast(MallocCall)) { + auto *NBB = II->getNormalDest(); + BranchInst::Create(NBB, MallocCall->getParent()); + MallocCall->eraseFromParent(); + } else { + MallocCall->eraseFromParent(); + } + HasChanged = ChangeStatus::CHANGED; + } + + return HasChanged; + } + + /// Collection of all malloc calls in a function. + SmallSetVector MallocCalls; + + /// Collection of malloc calls that cannot be converted. + DenseSet BadMallocCalls; + + /// A map for each malloc call to the set of associated free calls. + DenseMap> FreesForMalloc; + + ChangeStatus updateImpl(Attributor &A) override; +}; + +ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { + const Function *F = getAssociatedFunction(); + const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); + + auto UsesCheck = [&](Instruction &I) { + SmallPtrSet Visited; + SmallVector Worklist; + + for (Use &U : I.uses()) + Worklist.push_back(const_cast(&U)); + + while (!Worklist.empty()) { + const Use *U = Worklist.pop_back_val(); + if (!Visited.insert(U).second) + continue; + + const auto *UserI = U->getUser(); + + if (isa(UserI) || isa(UserI)) + continue; + + // NOTE: Right now, if a function that has malloc pointer as an argument + // frees memory, we assume that the malloc pointer is freed. + if (auto *CB = dyn_cast(UserI)) { + // Record malloc. + if (isFreeCall(UserI, TLI)) { + FreesForMalloc[&I].insert( + cast(const_cast(UserI))); + continue; + } + + // If a function does not free memory we are fine + const auto &NoFreeAA = + A.getAAFor(*this, IRPosition::callsite_function(*CB)); + if (!NoFreeAA.isAssumedNoFree()) + return false; + + // TODO: Add nofree callsite argument attribute to indicate that pointer + // argument is not freed. + if (auto *Arg = dyn_cast(U)) { + const auto &NoCaptureAA = A.getAAFor( + *this, IRPosition::callsite_argument(*CB, Arg->getArgNo())); + + if (NoCaptureAA.isAssumedNoCapture()) + return false; + } + continue; + } + if (isa(UserI) || isa(UserI)) { + for (Use &U : const_cast(UserI)->uses()) + Worklist.push_back(&U); + continue; + } + // Unknown user. + return false; + } + return true; + }; + + auto MallocCheck = [&](Instruction &I) { + if (!isMallocLikeFn(&I, TLI)) + return true; + if (BadMallocCalls.count(&I)) + return true; + + if (UsesCheck(I)) + MallocCalls.insert(&I); + else { + BadMallocCalls.insert(&I); + FreesForMalloc.erase(const_cast(&I)); + } + return true; + }; + + size_t NumBadMallocs = BadMallocCalls.size(); + + A.checkForAllCallLikeInstructions(MallocCheck, *this); + + if (NumBadMallocs != BadMallocCalls.size()) + return ChangeStatus::CHANGED; + + return ChangeStatus::UNCHANGED; + +} + +struct AAHeapToStackFunction final : public AAHeapToStackImpl { + AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {} + + /// See AbstractAttribute::trackStatistics() + void trackStatistics() const override { + STATS_DECL(MallocCalls, Function, + "Number of MallocCalls"); + BUILD_STAT_NAME(MallocCalls, Function) += MallocCalls.size(); + } +}; + +using AAHeapToStackCallSite = AAHeapToStackFunction; + /// ---------------------------------------------------------------------------- /// Attributor /// ---------------------------------------------------------------------------- @@ -2949,7 +3122,11 @@ } void Attributor::identifyDefaultAbstractAttributes( - Function &F, DenseSet *Whitelist) { + Function &F, std::function &TLIGetter, + DenseSet *Whitelist) { + + if (EnableHeapToStack) + InfoCache.FuncTLIMap[&F] = &TLIGetter(F); IRPosition FPos = IRPosition::function(F); @@ -2958,8 +3135,8 @@ // broken IR in which SSA rules do not apply. checkAndRegisterAA(FPos, *this, /* Whitelist */ nullptr); - // Every function might be "will-return". checkAndRegisterAA(FPos, *this, Whitelist); + // Every function might be "will-return". // Every function can be nounwind. checkAndRegisterAA(FPos, *this, Whitelist); @@ -2973,6 +3150,10 @@ // Every function might be "no-return". checkAndRegisterAA(FPos, *this, Whitelist); + // Every function might be applicable for Heap-To-Stack conversion. + if (EnableHeapToStack) + checkAndRegisterAA(FPos, *this, Whitelist); + // Return attributes are only appropriate if the return type is non void. Type *ReturnType = F.getReturnType(); if (!ReturnType->isVoidTy()) { @@ -3007,11 +3188,11 @@ // Every argument with pointer type might be marked dereferenceable. checkAndRegisterAA(ArgPos, *this, Whitelist); - // Every argument with pointer type might be marked align. - checkAndRegisterAA(ArgPos, *this, Whitelist); - // Every argument with pointer type might be marked nocapture. checkAndRegisterAA(ArgPos, *this, Whitelist); + + // Every argument with pointer type might be marked align. + checkAndRegisterAA(ArgPos, *this, Whitelist); } } @@ -3130,7 +3311,8 @@ /// Pass (Manager) Boilerplate /// ---------------------------------------------------------------------------- -static bool runAttributorOnModule(Module &M) { +static bool runAttributorOnModule( + Module &M, std::function &TLIGetter) { if (DisableAttributor) return false; @@ -3164,14 +3346,21 @@ // Populate the Attributor with abstract attribute opportunities in the // function and the information cache with IR information. - A.identifyDefaultAbstractAttributes(F); + A.identifyDefaultAbstractAttributes(F, TLIGetter); } return A.run() == ChangeStatus::CHANGED; } PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) { - if (runAttributorOnModule(M)) { + auto &FAM = AM.getResult(M).getManager(); + + std::function TLIGetter = + [&](Function &F) -> TargetLibraryInfo & { + return FAM.getResult(F); + }; + + if (runAttributorOnModule(M, TLIGetter)) { // FIXME: Think about passes we will preserve and add them here. return PreservedAnalyses::none(); } @@ -3190,11 +3379,18 @@ bool runOnModule(Module &M) override { if (skipModule(M)) return false; - return runAttributorOnModule(M); + std::function TLIGetter = + [&](Function &F) -> TargetLibraryInfo & { + return getAnalysis(F).getTLI(); + }; + + return runAttributorOnModule(M, TLIGetter); } void getAnalysisUsage(AnalysisUsage &AU) const override { // FIXME: Think about passes we will preserve and add them here. + // ModulePass::getAnalysisUsage(AU); + AU.addRequired(); AU.setPreservesCFG(); } }; @@ -3218,6 +3414,7 @@ const char AADereferenceable::ID = 0; const char AAAlign::ID = 0; const char AANoCapture::ID = 0; +const char AAHeapToStack::ID = 0; // Macro magic to create the static generator function for attributes that // follow the naming scheme. @@ -3236,8 +3433,8 @@ CLASS *AA = nullptr; \ switch (IRP.getPositionKind()) { \ SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ - SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ + SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ @@ -3272,6 +3469,7 @@ CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) +CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) @@ -3287,5 +3485,6 @@ INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", "Deduce and propagate attributes", false, false) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", "Deduce and propagate attributes", false, false) diff --git a/llvm/test/Transforms/FunctionAttrs/heap_to_stack.ll b/llvm/test/Transforms/FunctionAttrs/heap_to_stack.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/FunctionAttrs/heap_to_stack.ll @@ -0,0 +1,120 @@ +; RUN: opt -passes=attributor --attributor-disable=false -enable-heap-to-stack-conversion=true -S < %s | FileCheck %s + +declare noalias i8* @malloc(i64) + +declare void @nocapture_func_frees_pointer(i8* nocapture) + +declare void @func_throws(...) + +declare void @sync_func(i8* %p) + +declare void @no_sync_func(i8* %p) nofree nosync willreturn + +declare void @nofree_func(i8* %p) nofree nosync willreturn + +declare i32 @no_return_call() noreturn + +declare void @free(i8* nocapture) + +; TEST 1 - negative, pointer freed in another function. + +define void @test1() { + %1 = tail call noalias i8* @malloc(i64 4) + ; CHECK: @malloc(i64 4) + ; CHECK-NEXT: @nocapture_func_frees_pointer(i8* %1) + tail call void @nocapture_func_frees_pointer(i8* %1) + tail call void (...) @func_throws() + tail call void @free(i8* %1) + ret void +} + +; TEST 2 - negative, call to a sync function. + +define void @test2() { + %1 = tail call noalias i8* @malloc(i64 4) + ; CHECK: @malloc(i64 4) + ; CHECK-NEXT: @sync_func(i8* %1) + tail call void @sync_func(i8* %1) + tail call void @free(i8* %1) + ret void +} + +; TEST 3 - 1 malloc, 1 free + +define void @test3() { + %1 = tail call noalias i8* @malloc(i64 4) + ; CHECK: %1 = alloca i8, i64 4 + ; CHECK-NEXT: @no_sync_func(i8* %1) + tail call void @no_sync_func(i8* %1) + ; CHECK-NOT: @free(i8* %1) + tail call void @free(i8* %1) + ret void +} + +; TEST 4 - negative, no call to free + +define void @test4() { + %1 = tail call noalias i8* @malloc(i64 4) + ; CHECK: %1 = alloca i8, i64 4 + ; CHECK-NEXT: @nofree_func(i8* %1) + tail call void @nofree_func(i8* %1) + ret void +} + +; TEST 5 - not all exit paths have a call to free + +define void @test5(i32) { + %2 = tail call noalias i8* @malloc(i64 4) + ; CHECK: %2 = alloca i8, i64 4 + ; CHECK-NEXT: icmp eq i32 %0, 0 + %3 = icmp eq i32 %0, 0 + br i1 %3, label %5, label %4 + +4: ; preds = %1 + tail call void @nofree_func(i8* %2) + br label %6 + +5: ; preds = %1 + tail call void @free(i8* %2) + ; CHECK-NOT: @free(i8* %2) + br label %6 + +6: ; preds = %5, %4 + ret void +} + +; TEST 6 - all exit paths have a call to free + +define void @test6(i32) { + %2 = tail call noalias i8* @malloc(i64 4) + ; CHECK: %2 = alloca i8, i64 4 + ; CHECK-NEXT: icmp eq i32 %0, 0 + %3 = icmp eq i32 %0, 0 + br i1 %3, label %5, label %4 + +4: ; preds = %1 + tail call void @nofree_func(i8* %2) + tail call void @free(i8* %2) + ; CHECK-NOT: @free(i8* %2) + br label %5 + +5: ; preds = %1 + tail call void @free(i8* %2) + ; CHECK-NOT: @free(i8* %2) + br label %6 + +6: ; preds = %5, %4 + ret void +} + +; TEST 7 - free is dead. + +define void @test7() { + %1 = tail call noalias i8* @malloc(i64 4) + ; CHECK: @malloc(i64 4) + ; CHECK-NEXT: call i32 @no_return_call() + tail call i32 @no_return_call() + ; this free is dead. So malloc cannot be transformed. + tail call void @free(i8* %1) + ret void +}