Index: llvm/include/llvm/ADT/function.h =================================================================== --- /dev/null +++ llvm/include/llvm/ADT/function.h @@ -0,0 +1,254 @@ +//===- function.h - Function type erasure utilities -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// This file provides a collection of function (or more generally, callable) +/// type erasure utilities supplementing those provided by the standard library. +/// +/// It provides `unique_function`, which works like `std::function` but supports +/// move-only callable objects. +/// +/// Future plans: +/// - Add a `function` that provides const, volatile, and ref-qualified support, +/// which doesn't work with `std::function`. +/// - Provide support for specifying multiple signatures to type erase callable +/// objects with an overload set, such as those produced by generic lambdas. +/// - Expand to include a copyable utility that directly replaces std::function +/// but brings the above improvements. +/// +/// Note that LLVM's utilities are greatly simplified by not supporting +/// allocators. +/// +/// If the standard library ever begins to provide comparable facilities we can +/// consider switching to those. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_FUNCTION_H +#define LLVM_ADT_FUNCTION_H + +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/PointerUnion.h" +#include +#include + +namespace llvm { + +namespace detail {} // end namespace detail + +template class unique_function; + +template +class unique_function { + static constexpr int InlineStorageSize = sizeof(void *) * 3; + + union StorageUnionT { + // For out-of-line storage we keep a pointer to the underlying storage and + // the size. This is enough to deallocate the memory. + struct OutOfLineStorageT { + void *StoragePtr; + size_t Size; + size_t Alignment; + } OutOfLineStorage; + static_assert( + sizeof(OutOfLineStorageT) <= InlineStorageSize, + "Should always use all of the out-of-line storage for inline storage!"); + + // For in-line storage, we just provide an aligned character buffer. We + // provide three pointers worth of storage here. This wastes one pointer + // worth of memory when in the out-of-line form. + typename std::aligned_storage::type + InlineStorage; + } StorageUnion; + + // The type of the erased function pointer we use as a callback to dispatch to + // the stored callable when it is trivial to move and destroy. + using CallPtrT = ReturnT (*)(void *CallableAddr, ParamTs... Params); + using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr); + using DestroyPtrT = void (*)(void *CallableAddr); + + /// A struct we use to aggregate three callbacks when we need full set of + /// operations. + struct NonTrivialCallbacks { + CallPtrT CallPtr; + MovePtrT MovePtr; + DestroyPtrT DestroyPtr; + }; + + // Now we can create a pointer union between either a direct, trivial call + // pointer and a pointer to a static struct of the call, move, and destroy + // pointers. We do this to keep the footprint in this object a single pointer + // while supporting all the necessary type-erased operation. + using CallbackPointerUnionT = PointerUnion; + + // A compressed pointer to either our dispatching callback or our table of + // dispatching callbacks and the flag for whether the callable itself is + // stored inline or not. + PointerIntPair CallbackAndInlineFlag; + + bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); } + + bool isTrivialCallback() const { + return CallbackAndInlineFlag.getPointer().template is(); + } + + CallPtrT getTrivialCallback() const { + return CallbackAndInlineFlag.getPointer().template get(); + } + + NonTrivialCallbacks *getNonTrivialCallbacks() const { + return CallbackAndInlineFlag.getPointer() + .template get(); + } + + void *getInlineStorage() { return &StorageUnion.InlineStorage; } + + void *getOutOfLineStorage() { + return StorageUnion.OutOfLineStorage.StoragePtr; + } + size_t getOutOfLineStorageSize() const { + return StorageUnion.OutOfLineStorage.Size; + } + size_t getOutOfLineStorageAlignment() const { + return StorageUnion.OutOfLineStorage.Alignment; + } + + void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) { + StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment}; + } + + template + static ReturnT CallImpl(void *CallableAddr, ParamTs... Params) { + return (*reinterpret_cast(CallableAddr))( + std::forward(Params)...); + } + + template + static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept { + new (LHSCallableAddr) + CallableT(std::move(*reinterpret_cast(RHSCallableAddr))); + } + + template + static void DestroyImpl(void *CallableAddr) noexcept { + reinterpret_cast(CallableAddr)->~CallableT(); + } + +public: + unique_function() = default; + unique_function(std::nullptr_t /*null_callable*/) {} + + ~unique_function() { + if (!CallbackAndInlineFlag.getPointer()) + return; + + // Cache this value so we don't re-check it after type-erased operations. + bool IsInlineStorage = isInlineStorage(); + + if (!isTrivialCallback()) + getNonTrivialCallbacks()->DestroyPtr( + IsInlineStorage ? getInlineStorage() : getOutOfLineStorage()); + + if (!IsInlineStorage) + deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(), + getOutOfLineStorageAlignment()); + } + + unique_function(unique_function &&RHS) noexcept { + // Copy the callback and inline flag. + CallbackAndInlineFlag = RHS.CallbackAndInlineFlag; + + if (!isInlineStorage()) { + // The out-of-line case is easiest to move. + StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage; + + } else if (isTrivialCallback()) { + // Move is trivial, just memcpy the bytes across. + memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize); + } else { + // Non-trivial move, so dispatch to a type-erased implementation. + getNonTrivialCallbacks()->MovePtr(getInlineStorage(), + RHS.getInlineStorage()); + } + + // Clear the old callback and inline flag to get back to as-if-null. + RHS.CallbackAndInlineFlag = {}; + +#ifndef NDEBUG + // In debug builds, we also scribble across the rest of the storage. + memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize); +#endif + } + + unique_function &operator=(unique_function &&RHS) noexcept { + if (this == &RHS) + return *this; + + // Because we don't try to provide any exception safety guarantees we can + // implement move assignment very simply by first destroying the current + // object and then move-constructing over top of it. + this->~unique_function(); + new (this) unique_function(std::move(RHS)); + return *this; + } + + template unique_function(CallableT Callable) { + bool IsInlineStorage = true; + void *CallableAddr = getInlineStorage(); + if (sizeof(CallableT) > InlineStorageSize || + alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) { + IsInlineStorage = false; + // Allocate out-of-line storage. FIXME: Use an explicit alignment + // parameter in C++17 mode. + auto Size = sizeof(CallableT); + auto Alignment = alignof(CallableT); + CallableAddr = allocate_buffer(Size, Alignment); + setOutOfLineStorage(CallableAddr, Size, Alignment); + } + + // Now move into the storage. + new (CallableAddr) CallableT(std::move(Callable)); + + // See if we can create a trivial callback. + // FIXME: we should use constexpr if here and below to avoid instantiating + // the non-trivial static objects when unnecessary. While the linker should + // remove them, it is still wasteful. + if (std::is_trivially_move_constructible::value && + std::is_trivially_destructible::value) { + CallbackAndInlineFlag = {&CallImpl, IsInlineStorage}; + return; + } + + // Otherwise, we need to point at an object with a vtable that contains all + // the different type erased behaviors needed. Create a static instance of + // the derived type here and then use a pointer to that. + static NonTrivialCallbacks Callbacks = { + &CallImpl, &MoveImpl, &DestroyImpl}; + + CallbackAndInlineFlag = {&Callbacks, IsInlineStorage}; + } + + ReturnT operator()(ParamTs... Params) { + void *CallableAddr = + isInlineStorage() ? getInlineStorage() : getOutOfLineStorage(); + if (isTrivialCallback()) + return getTrivialCallback()(CallableAddr, + std::forward(Params)...); + + return getNonTrivialCallbacks()->CallPtr(CallableAddr, + std::forward(Params)...); + } + + explicit operator bool() const { + return (bool)CallbackAndInlineFlag.getPointer(); + } +}; + +} // end namespace llvm + +#endif // LLVM_ADT_FUNCTION_H Index: llvm/include/llvm/Support/Compiler.h =================================================================== --- llvm/include/llvm/Support/Compiler.h +++ llvm/include/llvm/Support/Compiler.h @@ -17,6 +17,9 @@ #include "llvm/Config/llvm-config.h" +#include +#include + #if defined(_MSC_VER) #include #endif @@ -503,4 +506,46 @@ #define LLVM_ENABLE_EXCEPTIONS 1 #endif +namespace llvm { + +/// Allocate a buffer of memory with the given size and alignment. +/// +/// When the compiler supports aligned operator new, this will use it to to +/// handle even over-aligned allocations. +/// +/// However, this doesn't make any attempt to leverage the fancier techniques +/// like posix_memalign due to portability. It is mostly intended to allow +/// compatibility with platforms that, after aligned allocation was added, use +/// reduced default alignment. +inline void *allocate_buffer(size_t Size, size_t Alignment) { + return ::operator new(Size +#if __cpp_aligned_new + , + std::align_val_t(Alignment) +#endif + ); +} + +/// Deallocate a buffer of memory with the given size and alignment. +/// +/// If supported, this will used the sized delete operator. Also if supported, +/// this will pass the alignment to the delete operator. +/// +/// The pointer must have been allocated with the corresponding new operator, +/// most likely using the above helper. +inline void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment) { + ::operator delete(Ptr +#if __cpp_sized_deallocation + , + Size +#endif +#if __cpp_aligned_new + , + std::align_val_t(Alignment) +#endif + ); +} + +} // End namespace llvm + #endif Index: llvm/include/llvm/Support/PointerLikeTypeTraits.h =================================================================== --- llvm/include/llvm/Support/PointerLikeTypeTraits.h +++ llvm/include/llvm/Support/PointerLikeTypeTraits.h @@ -16,6 +16,7 @@ #define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H #include "llvm/Support/DataTypes.h" +#include #include namespace llvm { @@ -111,6 +112,39 @@ enum { NumLowBitsAvailable = 0 }; }; +/// Provide suitable custom traits struct for function pointers. +/// +/// Function pointers can't be directly given these traits as functions can't +/// have their alignment computed with `alignof` and we need different casting. +/// +/// To rely on higher alignment for a specialized use, you can provide a +/// customized form of this template explicitly with higher alignment, and +/// potentially use alignment attributes on functions to satisfy that. +template +struct FunctionPointerLikeTypeTraits { + enum { NumLowBitsAvailable = detail::ConstantLog2::value }; + static inline void *getAsVoidPointer(FunctionPointerT P) { + assert((reinterpret_cast(P) & + ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 && + "Alignment not satisfied for an actual function pointer!"); + return reinterpret_cast(P); + } + static inline FunctionPointerT getFromVoidPointer(void *P) { + return reinterpret_cast(P); + } +}; + +/// Provide a default specialization for function pointers that assumes 4-byte +/// alignment. +/// +/// We assume here that functions used with this are always at least 4-byte +/// aligned. This means that, for example, thumb functions won't work or systems +/// with weird unaligned function pointers won't work. But all practical systems +/// we support satisfy this requirement. +template +struct PointerLikeTypeTraits + : FunctionPointerLikeTypeTraits<4, ReturnT (*)(ParamTs...)> {}; + } // end namespace llvm #endif Index: llvm/unittests/ADT/CMakeLists.txt =================================================================== --- llvm/unittests/ADT/CMakeLists.txt +++ llvm/unittests/ADT/CMakeLists.txt @@ -18,6 +18,7 @@ DepthFirstIteratorTest.cpp EquivalenceClassesTest.cpp FoldingSet.cpp + FunctionTest.cpp FunctionRefTest.cpp HashingTest.cpp IListBaseTest.cpp Index: llvm/unittests/ADT/FunctionTest.cpp =================================================================== --- /dev/null +++ llvm/unittests/ADT/FunctionTest.cpp @@ -0,0 +1,98 @@ +//===- FunctionTest.cpp - Unit tests for function type erasure ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/function.h" +#include "gtest/gtest.h" + +#include + +using namespace llvm; + +namespace { + +TEST(UniqueFunctionTest, Basic) { + unique_function Sum = [](int A, int B) { return A + B; }; + EXPECT_EQ(Sum(1, 2), 3); + + unique_function Sum2 = std::move(Sum); + EXPECT_EQ(Sum2(1, 2), 3); + + unique_function Sum3 = [](int A, int B) { return A + B; }; + Sum2 = std::move(Sum3); + EXPECT_EQ(Sum2(1, 2), 3); + + Sum2 = unique_function([](int A, int B) { return A + B; }); + EXPECT_EQ(Sum2(1, 2), 3); + + // Explicit self-move test. + *&Sum2 = std::move(Sum2); + EXPECT_EQ(Sum2(1, 2), 3); + + Sum2 = unique_function(); + EXPECT_FALSE(Sum2); +} + +TEST(UniqueFunctionTest, Captures) { + long A = 1, B = 2, C = 3, D = 4, E = 5; + + unique_function Tmp; + + unique_function C1 = [A]() { return A; }; + EXPECT_EQ(C1(), 1); + Tmp = std::move(C1); + EXPECT_EQ(Tmp(), 1); + + unique_function C2 = [A, B]() { return A + B; }; + EXPECT_EQ(C2(), 3); + Tmp = std::move(C2); + EXPECT_EQ(Tmp(), 3); + + unique_function C3 = [A, B, C]() { return A + B + C; }; + EXPECT_EQ(C3(), 6); + Tmp = std::move(C3); + EXPECT_EQ(Tmp(), 6); + + unique_function C4 = [A, B, C, D]() { return A + B + C + D; }; + EXPECT_EQ(C4(), 10); + Tmp = std::move(C4); + EXPECT_EQ(Tmp(), 10); + + unique_function C5 = [A, B, C, D, E]() { return A + B + C + D + E; }; + EXPECT_EQ(C5(), 15); + Tmp = std::move(C5); + EXPECT_EQ(Tmp(), 15); +} + +TEST(UniqueFunctionTest, MoveOnly) { + struct SmallCallable { + std::unique_ptr A{new int(1)}; + + int operator()(int B) { return *A + B; } + }; + unique_function Small = SmallCallable(); + EXPECT_EQ(Small(2), 3); + unique_function Small2 = std::move(Small); + EXPECT_EQ(Small2(2), 3); + + struct LargeCallable { + std::unique_ptr A{new int(1)}; + std::unique_ptr B{new int(2)}; + std::unique_ptr C{new int(3)}; + std::unique_ptr D{new int(4)}; + std::unique_ptr E{new int(5)}; + + int operator()() { return *A + *B + *C + *D + *E; } + }; + unique_function Large = LargeCallable(); + EXPECT_EQ(Large(), 15); + unique_function Large2 = std::move(Large); + EXPECT_EQ(Large2(), 15); +} + +} // anonymous namespace \ No newline at end of file