Index: llvm/include/llvm/ADT/FunctionExtras.h =================================================================== --- /dev/null +++ llvm/include/llvm/ADT/FunctionExtras.h @@ -0,0 +1,271 @@ +//===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// This file provides a collection of function (or more generally, callable) +/// type erasure utilities supplementing those provided by the standard library +/// in ``. +/// +/// It provides `unique_function`, which works like `std::function` but supports +/// move-only callable objects. +/// +/// Future plans: +/// - Add a `function` that provides const, volatile, and ref-qualified support, +/// which doesn't work with `std::function`. +/// - Provide support for specifying multiple signatures to type erase callable +/// objects with an overload set, such as those produced by generic lambdas. +/// - Expand to include a copyable utility that directly replaces std::function +/// but brings the above improvements. +/// +/// Note that LLVM's utilities are greatly simplified by not supporting +/// allocators. +/// +/// If the standard library ever begins to provide comparable facilities we can +/// consider switching to those. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_FUNCTION_EXTRAS_H +#define LLVM_ADT_FUNCTION_EXTRAS_H + +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/PointerUnion.h" +#include +#include + +namespace llvm { + +template class unique_function; + +template +class unique_function { + static constexpr int InlineStorageSize = sizeof(void *) * 3; + + union StorageUnionT { + // For out-of-line storage we keep a pointer to the underlying storage and + // the size. This is enough to deallocate the memory. + struct OutOfLineStorageT { + void *StoragePtr; + size_t Size; + size_t Alignment; + } OutOfLineStorage; + static_assert( + sizeof(OutOfLineStorageT) <= InlineStorageSize, + "Should always use all of the out-of-line storage for inline storage!"); + + // For in-line storage, we just provide an aligned character buffer. We + // provide three pointers worth of storage here. This wastes one pointer + // worth of memory when in the out-of-line form. + typename std::aligned_storage::type + InlineStorage; + } StorageUnion; + + // Provide a type function to map parameters that won't observe extra copies + // or moves and which are small enough to likely pass in register to values + // and all other types to l-value reference types. We use this to compute the + // types used in our erased call utility to minimize copies and moves unless + // doing so would force things unnecessarily into memory. + // + // The heuristic used is related to common ABI register passing conventions. + // It doesn't have to be exact though, and in one way it is more strict + // because we want to still be able to observe either moves *or* copies. + template + using AdjustedParamT = typename std::conditional< + !std::is_reference::value && + std::is_trivially_copy_constructible::value && + std::is_trivially_move_constructible::value && + sizeof(T) <= (2 * sizeof(void *)), + T, T &>::type; + + // The type of the erased function pointer we use as a callback to dispatch to + // the stored callable when it is trivial to move and destroy. + using CallPtrT = ReturnT (*)(void *CallableAddr, + AdjustedParamT... Params); + using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr); + using DestroyPtrT = void (*)(void *CallableAddr); + + /// A struct we use to aggregate three callbacks when we need full set of + /// operations. + struct NonTrivialCallbacks { + CallPtrT CallPtr; + MovePtrT MovePtr; + DestroyPtrT DestroyPtr; + }; + + // Now we can create a pointer union between either a direct, trivial call + // pointer and a pointer to a static struct of the call, move, and destroy + // pointers. We do this to keep the footprint in this object a single pointer + // while supporting all the necessary type-erased operation. + using CallbackPointerUnionT = PointerUnion; + + // A compressed pointer to either our dispatching callback or our table of + // dispatching callbacks and the flag for whether the callable itself is + // stored inline or not. + PointerIntPair CallbackAndInlineFlag; + + bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); } + + bool isTrivialCallback() const { + return CallbackAndInlineFlag.getPointer().template is(); + } + + CallPtrT getTrivialCallback() const { + return CallbackAndInlineFlag.getPointer().template get(); + } + + NonTrivialCallbacks *getNonTrivialCallbacks() const { + return CallbackAndInlineFlag.getPointer() + .template get(); + } + + void *getInlineStorage() { return &StorageUnion.InlineStorage; } + + void *getOutOfLineStorage() { + return StorageUnion.OutOfLineStorage.StoragePtr; + } + size_t getOutOfLineStorageSize() const { + return StorageUnion.OutOfLineStorage.Size; + } + size_t getOutOfLineStorageAlignment() const { + return StorageUnion.OutOfLineStorage.Alignment; + } + + void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) { + StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment}; + } + + template + static ReturnT CallImpl(void *CallableAddr, + AdjustedParamT... Params) { + return (*reinterpret_cast(CallableAddr))( + std::forward(Params)...); + } + + template + static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept { + new (LHSCallableAddr) + CallableT(std::move(*reinterpret_cast(RHSCallableAddr))); + } + + template + static void DestroyImpl(void *CallableAddr) noexcept { + reinterpret_cast(CallableAddr)->~CallableT(); + } + +public: + unique_function() = default; + unique_function(std::nullptr_t /*null_callable*/) {} + + ~unique_function() { + if (!CallbackAndInlineFlag.getPointer()) + return; + + // Cache this value so we don't re-check it after type-erased operations. + bool IsInlineStorage = isInlineStorage(); + + if (!isTrivialCallback()) + getNonTrivialCallbacks()->DestroyPtr( + IsInlineStorage ? getInlineStorage() : getOutOfLineStorage()); + + if (!IsInlineStorage) + deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(), + getOutOfLineStorageAlignment()); + } + + unique_function(unique_function &&RHS) noexcept { + // Copy the callback and inline flag. + CallbackAndInlineFlag = RHS.CallbackAndInlineFlag; + + if (!isInlineStorage()) { + // The out-of-line case is easiest to move. + StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage; + + } else if (isTrivialCallback()) { + // Move is trivial, just memcpy the bytes across. + memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize); + } else { + // Non-trivial move, so dispatch to a type-erased implementation. + getNonTrivialCallbacks()->MovePtr(getInlineStorage(), + RHS.getInlineStorage()); + } + + // Clear the old callback and inline flag to get back to as-if-null. + RHS.CallbackAndInlineFlag = {}; + +#ifndef NDEBUG + // In debug builds, we also scribble across the rest of the storage. + memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize); +#endif + } + + unique_function &operator=(unique_function &&RHS) noexcept { + if (this == &RHS) + return *this; + + // Because we don't try to provide any exception safety guarantees we can + // implement move assignment very simply by first destroying the current + // object and then move-constructing over top of it. + this->~unique_function(); + new (this) unique_function(std::move(RHS)); + return *this; + } + + template unique_function(CallableT Callable) { + bool IsInlineStorage = true; + void *CallableAddr = getInlineStorage(); + if (sizeof(CallableT) > InlineStorageSize || + alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) { + IsInlineStorage = false; + // Allocate out-of-line storage. FIXME: Use an explicit alignment + // parameter in C++17 mode. + auto Size = sizeof(CallableT); + auto Alignment = alignof(CallableT); + CallableAddr = allocate_buffer(Size, Alignment); + setOutOfLineStorage(CallableAddr, Size, Alignment); + } + + // Now move into the storage. + new (CallableAddr) CallableT(std::move(Callable)); + + // See if we can create a trivial callback. + // FIXME: we should use constexpr if here and below to avoid instantiating + // the non-trivial static objects when unnecessary. While the linker should + // remove them, it is still wasteful. + if (std::is_trivially_move_constructible::value && + std::is_trivially_destructible::value) { + CallbackAndInlineFlag = {&CallImpl, IsInlineStorage}; + return; + } + + // Otherwise, we need to point at an object with a vtable that contains all + // the different type erased behaviors needed. Create a static instance of + // the derived type here and then use a pointer to that. + static NonTrivialCallbacks Callbacks = { + &CallImpl, &MoveImpl, &DestroyImpl}; + + CallbackAndInlineFlag = {&Callbacks, IsInlineStorage}; + } + + ReturnT operator()(ParamTs... Params) { + void *CallableAddr = + isInlineStorage() ? getInlineStorage() : getOutOfLineStorage(); + + return (isTrivialCallback() + ? getTrivialCallback() + : getNonTrivialCallbacks()->CallPtr)(CallableAddr, Params...); + } + + explicit operator bool() const { + return (bool)CallbackAndInlineFlag.getPointer(); + } +}; + +} // end namespace llvm + +#endif // LLVM_ADT_FUNCTION_H + Index: llvm/include/llvm/Support/Compiler.h =================================================================== --- llvm/include/llvm/Support/Compiler.h +++ llvm/include/llvm/Support/Compiler.h @@ -17,6 +17,9 @@ #include "llvm/Config/llvm-config.h" +#include +#include + #if defined(_MSC_VER) #include #endif @@ -503,4 +506,46 @@ #define LLVM_ENABLE_EXCEPTIONS 1 #endif +namespace llvm { + +/// Allocate a buffer of memory with the given size and alignment. +/// +/// When the compiler supports aligned operator new, this will use it to to +/// handle even over-aligned allocations. +/// +/// However, this doesn't make any attempt to leverage the fancier techniques +/// like posix_memalign due to portability. It is mostly intended to allow +/// compatibility with platforms that, after aligned allocation was added, use +/// reduced default alignment. +inline void *allocate_buffer(size_t Size, size_t Alignment) { + return ::operator new(Size +#if __cpp_aligned_new + , + std::align_val_t(Alignment) +#endif + ); +} + +/// Deallocate a buffer of memory with the given size and alignment. +/// +/// If supported, this will used the sized delete operator. Also if supported, +/// this will pass the alignment to the delete operator. +/// +/// The pointer must have been allocated with the corresponding new operator, +/// most likely using the above helper. +inline void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment) { + ::operator delete(Ptr +#if __cpp_sized_deallocation + , + Size +#endif +#if __cpp_aligned_new + , + std::align_val_t(Alignment) +#endif + ); +} + +} // End namespace llvm + #endif Index: llvm/include/llvm/Support/PointerLikeTypeTraits.h =================================================================== --- llvm/include/llvm/Support/PointerLikeTypeTraits.h +++ llvm/include/llvm/Support/PointerLikeTypeTraits.h @@ -16,6 +16,7 @@ #define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H #include "llvm/Support/DataTypes.h" +#include #include namespace llvm { @@ -111,6 +112,39 @@ enum { NumLowBitsAvailable = 0 }; }; +/// Provide suitable custom traits struct for function pointers. +/// +/// Function pointers can't be directly given these traits as functions can't +/// have their alignment computed with `alignof` and we need different casting. +/// +/// To rely on higher alignment for a specialized use, you can provide a +/// customized form of this template explicitly with higher alignment, and +/// potentially use alignment attributes on functions to satisfy that. +template +struct FunctionPointerLikeTypeTraits { + enum { NumLowBitsAvailable = detail::ConstantLog2::value }; + static inline void *getAsVoidPointer(FunctionPointerT P) { + assert((reinterpret_cast(P) & + ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 && + "Alignment not satisfied for an actual function pointer!"); + return reinterpret_cast(P); + } + static inline FunctionPointerT getFromVoidPointer(void *P) { + return reinterpret_cast(P); + } +}; + +/// Provide a default specialization for function pointers that assumes 4-byte +/// alignment. +/// +/// We assume here that functions used with this are always at least 4-byte +/// aligned. This means that, for example, thumb functions won't work or systems +/// with weird unaligned function pointers won't work. But all practical systems +/// we support satisfy this requirement. +template +struct PointerLikeTypeTraits + : FunctionPointerLikeTypeTraits<4, ReturnT (*)(ParamTs...)> {}; + } // end namespace llvm #endif Index: llvm/unittests/ADT/CMakeLists.txt =================================================================== --- llvm/unittests/ADT/CMakeLists.txt +++ llvm/unittests/ADT/CMakeLists.txt @@ -18,6 +18,7 @@ DepthFirstIteratorTest.cpp EquivalenceClassesTest.cpp FoldingSet.cpp + FunctionExtrasTest.cpp FunctionRefTest.cpp HashingTest.cpp IListBaseTest.cpp Index: llvm/unittests/ADT/FunctionExtrasTest.cpp =================================================================== --- /dev/null +++ llvm/unittests/ADT/FunctionExtrasTest.cpp @@ -0,0 +1,225 @@ +//===- FunctionExtrasTest.cpp - Unit tests for function type erasure ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/FunctionExtras.h" +#include "gtest/gtest.h" + +#include + +using namespace llvm; + +namespace { + +TEST(UniqueFunctionTest, Basic) { + unique_function Sum = [](int A, int B) { return A + B; }; + EXPECT_EQ(Sum(1, 2), 3); + + unique_function Sum2 = std::move(Sum); + EXPECT_EQ(Sum2(1, 2), 3); + + unique_function Sum3 = [](int A, int B) { return A + B; }; + Sum2 = std::move(Sum3); + EXPECT_EQ(Sum2(1, 2), 3); + + Sum2 = unique_function([](int A, int B) { return A + B; }); + EXPECT_EQ(Sum2(1, 2), 3); + + // Explicit self-move test. + *&Sum2 = std::move(Sum2); + EXPECT_EQ(Sum2(1, 2), 3); + + Sum2 = unique_function(); + EXPECT_FALSE(Sum2); + + // Make sure we can forward through l-value reference parameters. + unique_function Inc = [](int &X) { ++X; }; + int X = 42; + Inc(X); + EXPECT_EQ(X, 43); + + // Make sure we can forward through r-value reference parameters with + // move-only types. + unique_function &&)> ReadAndDeallocByRef = + [](std::unique_ptr &&Ptr) { + int V = *Ptr; + Ptr.reset(); + return V; + }; + std::unique_ptr Ptr{new int(13)}; + EXPECT_EQ(ReadAndDeallocByRef(std::move(Ptr)), 13); + EXPECT_FALSE((bool)Ptr); + + // Make sure we can pass a move-only temporary as opposed to a local variable. + EXPECT_EQ(ReadAndDeallocByRef(std::unique_ptr(new int(42))), 42); + + // Make sure we can pass a move-only type by-value. + unique_function)> ReadAndDeallocByVal = + [](std::unique_ptr Ptr) { + int V = *Ptr; + Ptr.reset(); + return V; + }; + Ptr.reset(new int(13)); + EXPECT_EQ(ReadAndDeallocByVal(std::move(Ptr)), 13); + EXPECT_FALSE((bool)Ptr); + + EXPECT_EQ(ReadAndDeallocByVal(std::unique_ptr(new int(42))), 42); +} + +TEST(UniqueFunctionTest, Captures) { + long A = 1, B = 2, C = 3, D = 4, E = 5; + + unique_function Tmp; + + unique_function C1 = [A]() { return A; }; + EXPECT_EQ(C1(), 1); + Tmp = std::move(C1); + EXPECT_EQ(Tmp(), 1); + + unique_function C2 = [A, B]() { return A + B; }; + EXPECT_EQ(C2(), 3); + Tmp = std::move(C2); + EXPECT_EQ(Tmp(), 3); + + unique_function C3 = [A, B, C]() { return A + B + C; }; + EXPECT_EQ(C3(), 6); + Tmp = std::move(C3); + EXPECT_EQ(Tmp(), 6); + + unique_function C4 = [A, B, C, D]() { return A + B + C + D; }; + EXPECT_EQ(C4(), 10); + Tmp = std::move(C4); + EXPECT_EQ(Tmp(), 10); + + unique_function C5 = [A, B, C, D, E]() { return A + B + C + D + E; }; + EXPECT_EQ(C5(), 15); + Tmp = std::move(C5); + EXPECT_EQ(Tmp(), 15); +} + +TEST(UniqueFunctionTest, MoveOnly) { + struct SmallCallable { + std::unique_ptr A{new int(1)}; + + int operator()(int B) { return *A + B; } + }; + unique_function Small = SmallCallable(); + EXPECT_EQ(Small(2), 3); + unique_function Small2 = std::move(Small); + EXPECT_EQ(Small2(2), 3); + + struct LargeCallable { + std::unique_ptr A{new int(1)}; + std::unique_ptr B{new int(2)}; + std::unique_ptr C{new int(3)}; + std::unique_ptr D{new int(4)}; + std::unique_ptr E{new int(5)}; + + int operator()() { return *A + *B + *C + *D + *E; } + }; + unique_function Large = LargeCallable(); + EXPECT_EQ(Large(), 15); + unique_function Large2 = std::move(Large); + EXPECT_EQ(Large2(), 15); +} + +TEST(UniqueFunctionTest, CountForwardingCopies) { + struct CopyCounter { + int &CopyCount; + + CopyCounter(int &CopyCount) : CopyCount(CopyCount) {} + CopyCounter(const CopyCounter &Arg) : CopyCount(Arg.CopyCount) { + ++CopyCount; + } + }; + + unique_function ByValF = [](CopyCounter) {}; + int CopyCount = 0; + ByValF(CopyCounter(CopyCount)); + EXPECT_EQ(1, CopyCount); + + CopyCount = 0; + { + CopyCounter Counter{CopyCount}; + ByValF(Counter); + } + EXPECT_EQ(2, CopyCount); + + // Check that we don't generate a copy at all when we can bind a reference all + // the way down, even if that reference could *in theory* allow copies. + unique_function ByRefF = [](const CopyCounter &) {}; + CopyCount = 0; + ByRefF(CopyCounter(CopyCount)); + EXPECT_EQ(0, CopyCount); + + CopyCount = 0; + { + CopyCounter Counter{CopyCount}; + ByRefF(Counter); + } + EXPECT_EQ(0, CopyCount); + + // If we use a reference, we can make a stronger guarantee that *no* copy + // occurs. + struct Uncopyable { + Uncopyable() = default; + Uncopyable(const Uncopyable &) = delete; + }; + unique_function UncopyableF = [](const Uncopyable &) {}; + UncopyableF(Uncopyable()); + Uncopyable X; + UncopyableF(X); +} + +TEST(UniqueFunctionTest, CountForwardingMoves) { + struct MoveCounter { + int &MoveCount; + + MoveCounter(int &MoveCount) : MoveCount(MoveCount) {} + MoveCounter(MoveCounter &&Arg) : MoveCount(Arg.MoveCount) { ++MoveCount; } + }; + + unique_function ByValF = [](MoveCounter) {}; + int MoveCount = 0; + ByValF(MoveCounter(MoveCount)); + EXPECT_EQ(1, MoveCount); + + MoveCount = 0; + { + MoveCounter Counter{MoveCount}; + ByValF(std::move(Counter)); + } + EXPECT_EQ(2, MoveCount); + + // Check that when we use an r-value reference we get no spurious copies. + unique_function ByRefF = [](MoveCounter &&) {}; + MoveCount = 0; + ByRefF(MoveCounter(MoveCount)); + EXPECT_EQ(0, MoveCount); + + MoveCount = 0; + { + MoveCounter Counter{MoveCount}; + ByRefF(std::move(Counter)); + } + EXPECT_EQ(0, MoveCount); + + // If we use an r-value reference we can in fact make a stronger guarantee + // with an unmovable type. + struct Unmovable { + Unmovable() = default; + Unmovable(Unmovable &&) = delete; + }; + unique_function UnmovableF = [](const Unmovable &) {}; + UnmovableF(Unmovable()); + Unmovable X; + UnmovableF(X); +} + +} // anonymous namespace