Index: src/CMakeLists.txt =================================================================== --- src/CMakeLists.txt +++ src/CMakeLists.txt @@ -14,6 +14,7 @@ cxa_vector.cpp cxa_virtual.cpp exception.cpp + fallback_malloc.cpp private_typeinfo.cpp stdexcept.cpp typeinfo.cpp Index: src/cxa_exception.cpp =================================================================== --- src/cxa_exception.cpp +++ src/cxa_exception.cpp @@ -15,13 +15,10 @@ #include "cxxabi.h" #include // for std::terminate -#include // for malloc, free #include // for memset -#if !LIBCXXABI_HAS_NO_THREADS -# include // for fallback_malloc.ipp's mutexes -#endif #include "cxa_exception.hpp" #include "cxa_handlers.hpp" +#include "fallback_malloc.h" // +---------------------------+-----------------------------+---------------+ // | __cxa_exception | _Unwind_Exception CLNGC++\0 | thrown object | @@ -104,20 +101,6 @@ return --exception->handlerCount; } -#include "fallback_malloc.ipp" - -// Allocate some memory from _somewhere_ -static void *do_malloc(size_t size) { - void *ptr = std::malloc(size); - if (NULL == ptr) // if malloc fails, fall back to emergency stash - ptr = fallback_malloc(size); - return ptr; -} - -static void do_free(void *ptr) { - is_fallback_ptr(ptr) ? fallback_free(ptr) : std::free(ptr); -} - /* If reason isn't _URC_FOREIGN_EXCEPTION_CAUGHT, then the terminateHandler stored in exc is called. Otherwise the exceptionDestructor stored in @@ -158,7 +141,8 @@ // user's exception object. _LIBCXXABI_FUNC_VIS void *__cxa_allocate_exception(size_t thrown_size) throw() { size_t actual_size = cxa_exception_size_from_exception_thrown_size(thrown_size); - __cxa_exception* exception_header = static_cast<__cxa_exception*>(do_malloc(actual_size)); + __cxa_exception *exception_header = + static_cast<__cxa_exception *>(__cxa_malloc_with_fallback(actual_size)); if (NULL == exception_header) std::terminate(); std::memset(exception_header, 0, actual_size); @@ -168,7 +152,7 @@ // Free a __cxa_exception object allocated with __cxa_allocate_exception. _LIBCXXABI_FUNC_VIS void __cxa_free_exception(void *thrown_object) throw() { - do_free(cxa_exception_from_thrown_object(thrown_object)); + __cxa_free_with_fallback(cxa_exception_from_thrown_object(thrown_object)); } @@ -177,7 +161,7 @@ // Otherwise, it will work like __cxa_allocate_exception. void * __cxa_allocate_dependent_exception () { size_t actual_size = sizeof(__cxa_dependent_exception); - void *ptr = do_malloc(actual_size); + void *ptr = __cxa_malloc_with_fallback(actual_size); if (NULL == ptr) std::terminate(); std::memset(ptr, 0, actual_size); @@ -188,7 +172,7 @@ // This function shall free a dependent_exception. // It does not affect the reference count of the primary exception. void __cxa_free_dependent_exception (void * dependent_exception) { - do_free(dependent_exception); + __cxa_free_with_fallback(dependent_exception); } Index: src/cxa_exception_storage.cpp =================================================================== --- src/cxa_exception_storage.cpp +++ src/cxa_exception_storage.cpp @@ -45,8 +45,8 @@ #else #include -#include // for calloc, free #include "abort_message.h" +#include "fallback_malloc.h" // In general, we treat all pthread errors as fatal. // We cannot call std::terminate() because that will in turn @@ -58,7 +58,7 @@ pthread_once_t flag_ = PTHREAD_ONCE_INIT; void destruct_ (void *p) { - std::free ( p ); + __cxa_free_with_fallback ( p ); if ( 0 != ::pthread_setspecific ( key_, NULL ) ) abort_message("cannot zero out thread value for __cxa_get_globals()"); } @@ -77,7 +77,7 @@ // If this is the first time we've been asked for these globals, create them if ( NULL == retVal ) { retVal = static_cast<__cxa_eh_globals*> - (std::calloc (1, sizeof (__cxa_eh_globals))); + (__cxa_calloc_with_fallback (1, sizeof (__cxa_eh_globals))); if ( NULL == retVal ) abort_message("cannot allocate __cxa_eh_globals"); if ( 0 != pthread_setspecific ( key_, retVal ) ) Index: src/fallback_malloc.h =================================================================== --- /dev/null +++ src/fallback_malloc.h @@ -0,0 +1,31 @@ +//===------------------------- fallback_malloc.h --------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef _FALLBACK_MALLOC_H +#define _FALLBACK_MALLOC_H + +#include // for size_t + +namespace __cxxabiv1 { + +#pragma GCC visibility push(hidden) + +// Allocate some memory from _somewhere_ +void * __cxa_malloc_with_fallback(size_t size); + +// Allocate and zero-initialize memory from _somewhere_ +void * __cxa_calloc_with_fallback(size_t count, size_t size); + +void __cxa_free_with_fallback(void *ptr); + +#pragma GCC visibility pop + +} // namespace __cxxabiv1 + +#endif Index: src/fallback_malloc.cpp =================================================================== --- /dev/null +++ src/fallback_malloc.cpp @@ -0,0 +1,226 @@ +//===------------------------ fallback_malloc.cpp -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "fallback_malloc.h" + +#include "config.h" + +#include // for malloc, calloc, free +#include // for memset + +#if !LIBCXXABI_HAS_NO_THREADS +#include // for fallback_malloc.ipp's mutexes +#endif + +// A small, simple heap manager based (loosely) on +// the startup heap manager from FreeBSD, optimized for space. +// +// Manages a fixed-size memory pool, supports malloc and free only. +// No support for realloc. +// +// Allocates chunks in multiples of four bytes, with a four byte header +// for each chunk. The overhead of each chunk is kept low by keeping pointers +// as two byte offsets within the heap, rather than (4 or 8 byte) pointers. + +namespace { + +// When POSIX threads are not available, make the mutex operations a nop +#if LIBCXXABI_HAS_NO_THREADS +static void * heap_mutex = 0; +#else +static pthread_mutex_t heap_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +class mutexor { +public: +#if LIBCXXABI_HAS_NO_THREADS + mutexor ( void * ) {} + ~mutexor () {} +#else + mutexor ( pthread_mutex_t *m ) : mtx_(m) { pthread_mutex_lock ( mtx_ ); } + ~mutexor () { pthread_mutex_unlock ( mtx_ ); } +#endif +private: + mutexor ( const mutexor &rhs ); + mutexor & operator = ( const mutexor &rhs ); +#if !LIBCXXABI_HAS_NO_THREADS + pthread_mutex_t *mtx_; +#endif + }; + + +#define HEAP_SIZE 512 +char heap [ HEAP_SIZE ]; + +typedef unsigned short heap_offset; +typedef unsigned short heap_size; + +struct heap_node { + heap_offset next_node; // offset into heap + heap_size len; // size in units of "sizeof(heap_node)" +}; + +static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap +static heap_node *freelist = NULL; + +heap_node *node_from_offset ( const heap_offset offset ) + { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); } + +heap_offset offset_from_node ( const heap_node *ptr ) + { return static_cast(static_cast(reinterpret_cast(ptr) - heap) / sizeof (heap_node)); } + +void init_heap () { + freelist = (heap_node *) heap; + freelist->next_node = offset_from_node ( list_end ); + freelist->len = HEAP_SIZE / sizeof (heap_node); + } + +// How big a chunk we allocate +size_t alloc_size (size_t len) + { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; } + +bool is_fallback_ptr ( void *ptr ) + { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); } + +void *fallback_malloc(size_t len) { + heap_node *p, *prev; + const size_t nelems = alloc_size ( len ); + mutexor mtx ( &heap_mutex ); + + if ( NULL == freelist ) + init_heap (); + +// Walk the free list, looking for a "big enough" chunk + for (p = freelist, prev = 0; + p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) { + + if (p->len > nelems) { // chunk is larger, shorten, and return the tail + heap_node *q; + + p->len = static_cast(p->len - nelems); + q = p + p->len; + q->next_node = 0; + q->len = static_cast(nelems); + return (void *) (q + 1); + } + + if (p->len == nelems) { // exact size match + if (prev == 0) + freelist = node_from_offset(p->next_node); + else + prev->next_node = p->next_node; + p->next_node = 0; + return (void *) (p + 1); + } + } + return NULL; // couldn't find a spot big enough +} + +// Return the start of the next block +heap_node *after ( struct heap_node *p ) { return p + p->len; } + +void fallback_free (void *ptr) { + struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk + struct heap_node *p, *prev; + + mutexor mtx ( &heap_mutex ); + +#ifdef DEBUG_FALLBACK_MALLOC + std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl; +#endif + + for (p = freelist, prev = 0; + p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { +#ifdef DEBUG_FALLBACK_MALLOC + std::cout << " p, cp, after (p), after(cp) " + << offset_from_node ( p ) << ' ' + << offset_from_node ( cp ) << ' ' + << offset_from_node ( after ( p )) << ' ' + << offset_from_node ( after ( cp )) << std::endl; +#endif + if ( after ( p ) == cp ) { +#ifdef DEBUG_FALLBACK_MALLOC + std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl; +#endif + p->len = static_cast(p->len + cp->len); // make the free heap_node larger + return; + } + else if ( after ( cp ) == p ) { // there's a free heap_node right after +#ifdef DEBUG_FALLBACK_MALLOC + std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl; +#endif + cp->len = static_cast(cp->len + p->len); + if ( prev == 0 ) { + freelist = cp; + cp->next_node = p->next_node; + } + else + prev->next_node = offset_from_node(cp); + return; + } + } +// Nothing to merge with, add it to the start of the free list +#ifdef DEBUG_FALLBACK_MALLOC + std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl; +#endif + cp->next_node = offset_from_node ( freelist ); + freelist = cp; +} + +#ifdef INSTRUMENT_FALLBACK_MALLOC +size_t print_free_list () { + struct heap_node *p, *prev; + heap_size total_free = 0; + if ( NULL == freelist ) + init_heap (); + + for (p = freelist, prev = 0; + p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { + std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p ) + << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; + total_free += p->len; + } + std::cout << "Total Free space: " << total_free << std::endl; + return total_free; + } +#endif +} // end unnamed namespace + +namespace __cxxabiv1 { + +#pragma GCC visibility push(hidden) + +void * __cxa_malloc_with_fallback(size_t size) { + void *ptr = std::malloc(size); + if (NULL == ptr) // if malloc fails, fall back to emergency stash + ptr = fallback_malloc(size); + return ptr; +} + +void * __cxa_calloc_with_fallback(size_t count, size_t size) { + void *ptr = std::calloc(count, size); + if (NULL != ptr) + return ptr; + // if calloc fails, fall back to emergency stash + ptr = fallback_malloc(size * count); + if (NULL != ptr) + std::memset(ptr, 0, size * count); + return ptr; +} + +void __cxa_free_with_fallback(void *ptr) { + if (is_fallback_ptr(ptr)) + fallback_free(ptr); + else + std::free(ptr); +} + +#pragma GCC visibility pop + +} // namespace __cxxabiv1 Index: src/fallback_malloc.ipp =================================================================== --- src/fallback_malloc.ipp +++ /dev/null @@ -1,188 +0,0 @@ -//===------------------------ fallback_malloc.ipp -------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.TXT for details. -// -// -// This file implements the "Exception Handling APIs" -// http://mentorembedded.github.io/cxx-abi/abi-eh.html -// -//===----------------------------------------------------------------------===// - -#include "config.h" - -// A small, simple heap manager based (loosely) on -// the startup heap manager from FreeBSD, optimized for space. -// -// Manages a fixed-size memory pool, supports malloc and free only. -// No support for realloc. -// -// Allocates chunks in multiples of four bytes, with a four byte header -// for each chunk. The overhead of each chunk is kept low by keeping pointers -// as two byte offsets within the heap, rather than (4 or 8 byte) pointers. - -namespace { - -// When POSIX threads are not available, make the mutex operations a nop -#if LIBCXXABI_HAS_NO_THREADS -static void * heap_mutex = 0; -#else -static pthread_mutex_t heap_mutex = PTHREAD_MUTEX_INITIALIZER; -#endif - -class mutexor { -public: -#if LIBCXXABI_HAS_NO_THREADS - mutexor ( void * ) {} - ~mutexor () {} -#else - mutexor ( pthread_mutex_t *m ) : mtx_(m) { pthread_mutex_lock ( mtx_ ); } - ~mutexor () { pthread_mutex_unlock ( mtx_ ); } -#endif -private: - mutexor ( const mutexor &rhs ); - mutexor & operator = ( const mutexor &rhs ); -#if !LIBCXXABI_HAS_NO_THREADS - pthread_mutex_t *mtx_; -#endif - }; - - -#define HEAP_SIZE 512 -char heap [ HEAP_SIZE ]; - -typedef unsigned short heap_offset; -typedef unsigned short heap_size; - -struct heap_node { - heap_offset next_node; // offset into heap - heap_size len; // size in units of "sizeof(heap_node)" -}; - -static const heap_node *list_end = (heap_node *) ( &heap [ HEAP_SIZE ] ); // one past the end of the heap -static heap_node *freelist = NULL; - -heap_node *node_from_offset ( const heap_offset offset ) - { return (heap_node *) ( heap + ( offset * sizeof (heap_node))); } - -heap_offset offset_from_node ( const heap_node *ptr ) - { return static_cast(static_cast(reinterpret_cast(ptr) - heap) / sizeof (heap_node)); } - -void init_heap () { - freelist = (heap_node *) heap; - freelist->next_node = offset_from_node ( list_end ); - freelist->len = HEAP_SIZE / sizeof (heap_node); - } - -// How big a chunk we allocate -size_t alloc_size (size_t len) - { return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; } - -bool is_fallback_ptr ( void *ptr ) - { return ptr >= heap && ptr < ( heap + HEAP_SIZE ); } - -void *fallback_malloc(size_t len) { - heap_node *p, *prev; - const size_t nelems = alloc_size ( len ); - mutexor mtx ( &heap_mutex ); - - if ( NULL == freelist ) - init_heap (); - -// Walk the free list, looking for a "big enough" chunk - for (p = freelist, prev = 0; - p && p != list_end; prev = p, p = node_from_offset ( p->next_node)) { - - if (p->len > nelems) { // chunk is larger, shorten, and return the tail - heap_node *q; - - p->len = static_cast(p->len - nelems); - q = p + p->len; - q->next_node = 0; - q->len = static_cast(nelems); - return (void *) (q + 1); - } - - if (p->len == nelems) { // exact size match - if (prev == 0) - freelist = node_from_offset(p->next_node); - else - prev->next_node = p->next_node; - p->next_node = 0; - return (void *) (p + 1); - } - } - return NULL; // couldn't find a spot big enough -} - -// Return the start of the next block -heap_node *after ( struct heap_node *p ) { return p + p->len; } - -void fallback_free (void *ptr) { - struct heap_node *cp = ((struct heap_node *) ptr) - 1; // retrieve the chunk - struct heap_node *p, *prev; - - mutexor mtx ( &heap_mutex ); - -#ifdef DEBUG_FALLBACK_MALLOC - std::cout << "Freeing item at " << offset_from_node ( cp ) << " of size " << cp->len << std::endl; -#endif - - for (p = freelist, prev = 0; - p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { -#ifdef DEBUG_FALLBACK_MALLOC - std::cout << " p, cp, after (p), after(cp) " - << offset_from_node ( p ) << ' ' - << offset_from_node ( cp ) << ' ' - << offset_from_node ( after ( p )) << ' ' - << offset_from_node ( after ( cp )) << std::endl; -#endif - if ( after ( p ) == cp ) { -#ifdef DEBUG_FALLBACK_MALLOC - std::cout << " Appending onto chunk at " << offset_from_node ( p ) << std::endl; -#endif - p->len = static_cast(p->len + cp->len); // make the free heap_node larger - return; - } - else if ( after ( cp ) == p ) { // there's a free heap_node right after -#ifdef DEBUG_FALLBACK_MALLOC - std::cout << " Appending free chunk at " << offset_from_node ( p ) << std::endl; -#endif - cp->len = static_cast(cp->len + p->len); - if ( prev == 0 ) { - freelist = cp; - cp->next_node = p->next_node; - } - else - prev->next_node = offset_from_node(cp); - return; - } - } -// Nothing to merge with, add it to the start of the free list -#ifdef DEBUG_FALLBACK_MALLOC - std::cout << " Making new free list entry " << offset_from_node ( cp ) << std::endl; -#endif - cp->next_node = offset_from_node ( freelist ); - freelist = cp; -} - -#ifdef INSTRUMENT_FALLBACK_MALLOC -size_t print_free_list () { - struct heap_node *p, *prev; - heap_size total_free = 0; - if ( NULL == freelist ) - init_heap (); - - for (p = freelist, prev = 0; - p && p != list_end; prev = p, p = node_from_offset (p->next_node)) { - std::cout << ( prev == 0 ? "" : " ") << "Offset: " << offset_from_node ( p ) - << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; - total_free += p->len; - } - std::cout << "Total Free space: " << total_free << std::endl; - return total_free; - } -#endif -} // end unnamed namespace Index: test/test_exception_storage_nodynmem.pass.cpp =================================================================== --- /dev/null +++ test/test_exception_storage_nodynmem.pass.cpp @@ -0,0 +1,32 @@ +//===--------------- test_exception_storage_nodynmem.cpp ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include +#include + +static bool OverwrittenCallocCalled = false; + +// Override calloc to simulate exhaustion of dynamic memory +void *calloc(size_t, size_t) { + OverwrittenCallocCalled = true; + return 0; +} + +int main(int argc, char *argv[]) { + // Run the test a couple of times + // to ensure that fallback memory doesn't leak. + for (int I = 0; I < 1000; ++I) + try { + throw 42; + } catch (...) { + } + + assert(OverwrittenCallocCalled); + return 0; +} Index: test/test_fallback_malloc.pass.cpp =================================================================== --- test/test_fallback_malloc.pass.cpp +++ test/test_fallback_malloc.pass.cpp @@ -16,7 +16,7 @@ // #define DEBUG_FALLBACK_MALLOC #define INSTRUMENT_FALLBACK_MALLOC -#include "../src/fallback_malloc.ipp" +#include "../src/fallback_malloc.cpp" container alloc_series ( size_t sz ) { container ptrs;