2009-12-29 16:09:15 +08:00
|
|
|
/*
|
2012-03-02 09:19:20 +08:00
|
|
|
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
|
|
|
* public APIs to be prefixed. This makes it possible, with some care, to use
|
|
|
|
* multiple allocators simultaneously.
|
2009-12-29 16:09:15 +08:00
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PREFIX
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef JEMALLOC_CPREFIX
|
2012-03-02 09:19:20 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Name mangling for public symbols is controlled by --with-mangling and
|
|
|
|
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
|
|
|
* these macro definitions.
|
|
|
|
*/
|
|
|
|
#undef je_malloc_conf
|
|
|
|
#undef je_malloc_message
|
|
|
|
#undef je_malloc
|
|
|
|
#undef je_calloc
|
|
|
|
#undef je_posix_memalign
|
2012-03-14 03:55:21 +08:00
|
|
|
#undef je_aligned_alloc
|
2012-03-02 09:19:20 +08:00
|
|
|
#undef je_realloc
|
|
|
|
#undef je_free
|
|
|
|
#undef je_malloc_usable_size
|
|
|
|
#undef je_malloc_stats_print
|
|
|
|
#undef je_mallctl
|
|
|
|
#undef je_mallctlnametomib
|
|
|
|
#undef je_mallctlbymib
|
|
|
|
#undef je_memalign
|
|
|
|
#undef je_valloc
|
|
|
|
#undef je_allocm
|
|
|
|
#undef je_rallocm
|
|
|
|
#undef je_sallocm
|
2012-03-02 09:53:16 +08:00
|
|
|
#undef je_dallocm
|
|
|
|
#undef je_nallocm
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2011-07-31 07:40:52 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
|
|
|
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
|
|
|
* from being exported, but for static libraries, naming collisions are a real
|
|
|
|
* possibility.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PRIVATE_NAMESPACE
|
|
|
|
#undef JEMALLOC_N
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
|
|
|
* order to yield to another virtual CPU.
|
|
|
|
*/
|
|
|
|
#undef CPU_SPINWAIT
|
|
|
|
|
2012-04-18 04:17:54 +08:00
|
|
|
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
|
|
|
|
#undef JEMALLOC_ATOMIC9
|
|
|
|
|
2011-03-19 10:10:31 +08:00
|
|
|
/*
|
|
|
|
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
|
|
|
* documented in the atomic(3) manual page.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OSATOMIC
|
|
|
|
|
2012-03-26 23:03:41 +08:00
|
|
|
/*
|
|
|
|
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
|
|
|
|
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
|
|
|
|
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
|
|
|
|
* functions are defined in libgcc instead of being inlines)
|
|
|
|
*/
|
|
|
|
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
|
|
|
|
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
|
|
|
|
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
|
|
|
|
* functions are defined in libgcc instead of being inlines)
|
|
|
|
*/
|
|
|
|
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
|
|
|
|
|
2011-03-19 10:30:18 +08:00
|
|
|
/*
|
|
|
|
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
|
|
|
* documented in the spinlock(3) manual page.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OSSPIN
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/*
|
|
|
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
|
|
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
|
|
|
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
|
|
|
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
|
|
|
* malloc_tsd.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
/*
|
|
|
|
* Defined if threaded initialization is known to be safe on this platform.
|
|
|
|
* Among other things, it must be possible to initialize a mutex without
|
|
|
|
* triggering allocation in order for threaded allocation to be safe.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_THREADED_INIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Defined if the pthreads implementation defines
|
|
|
|
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
|
|
|
|
* to avoid recursive allocation during mutex initialization.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_MUTEX_INIT_CB
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
/* Defined if __attribute__((...)) syntax is supported. */
|
|
|
|
#undef JEMALLOC_HAVE_ATTR
|
|
|
|
#ifdef JEMALLOC_HAVE_ATTR
|
2012-04-30 18:38:29 +08:00
|
|
|
# define JEMALLOC_ATTR(s) __attribute__((s))
|
|
|
|
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
|
|
|
|
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
|
|
|
|
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
|
|
|
|
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
|
2012-04-30 18:38:31 +08:00
|
|
|
#elif _MSC_VER
|
|
|
|
# define JEMALLOC_ATTR(s)
|
2012-05-02 18:02:53 +08:00
|
|
|
# ifdef DLLEXPORT
|
|
|
|
# define JEMALLOC_EXPORT __declspec(dllexport)
|
|
|
|
# else
|
|
|
|
# define JEMALLOC_EXPORT __declspec(dllimport)
|
|
|
|
# endif
|
2012-04-30 18:38:31 +08:00
|
|
|
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
|
|
|
|
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
|
|
|
|
# define JEMALLOC_NOINLINE __declspec(noinline)
|
2010-01-04 03:59:20 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
# define JEMALLOC_ATTR(s)
|
|
|
|
# define JEMALLOC_EXPORT
|
|
|
|
# define JEMALLOC_ALIGNED(s)
|
|
|
|
# define JEMALLOC_SECTION(s)
|
|
|
|
# define JEMALLOC_NOINLINE
|
2010-01-04 03:59:20 +08:00
|
|
|
#endif
|
2009-07-01 07:17:05 +08:00
|
|
|
|
2012-04-12 16:13:03 +08:00
|
|
|
/* Defined if sbrk() is supported. */
|
|
|
|
#undef JEMALLOC_HAVE_SBRK
|
|
|
|
|
2012-04-04 13:30:05 +08:00
|
|
|
/* Non-empty if the tls_model attribute is supported. */
|
|
|
|
#undef JEMALLOC_TLS_MODEL
|
|
|
|
|
2010-09-21 10:20:48 +08:00
|
|
|
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
|
|
|
#undef JEMALLOC_CC_SILENCE
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
|
|
|
* inline functions.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_DEBUG
|
|
|
|
|
|
|
|
/* JEMALLOC_STATS enables statistics calculation. */
|
|
|
|
#undef JEMALLOC_STATS
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/* JEMALLOC_PROF enables allocation profiling. */
|
|
|
|
#undef JEMALLOC_PROF
|
|
|
|
|
|
|
|
/* Use libunwind for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_LIBUNWIND
|
|
|
|
|
2010-02-11 10:15:53 +08:00
|
|
|
/* Use libgcc for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_LIBGCC
|
|
|
|
|
2011-03-16 13:23:12 +08:00
|
|
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_GCC
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
2010-03-18 07:27:39 +08:00
|
|
|
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
|
|
|
* This makes it possible to allocate/deallocate objects without any locking
|
|
|
|
* when the cache is in the steady state.
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2009-12-29 16:09:15 +08:00
|
|
|
#undef JEMALLOC_TCACHE
|
2009-06-24 10:01:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
|
|
|
|
* segment (DSS).
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_DSS
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
/* Support memory filling (junk/zero/quarantine/redzone). */
|
2009-06-24 10:01:18 +08:00
|
|
|
#undef JEMALLOC_FILL
|
|
|
|
|
2012-03-03 09:47:37 +08:00
|
|
|
/* Support the experimental API. */
|
|
|
|
#undef JEMALLOC_EXPERIMENTAL
|
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
/* Support utrace(2)-based tracing. */
|
|
|
|
#undef JEMALLOC_UTRACE
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
/* Support Valgrind. */
|
|
|
|
#undef JEMALLOC_VALGRIND
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Support optional abort() on OOM. */
|
|
|
|
#undef JEMALLOC_XMALLOC
|
|
|
|
|
|
|
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
|
|
|
#undef JEMALLOC_LAZY_LOCK
|
|
|
|
|
|
|
|
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
|
|
|
#undef STATIC_PAGE_SHIFT
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
/*
|
|
|
|
* If defined, use munmap() to unmap freed chunks, rather than storing them for
|
2012-05-10 07:12:00 +08:00
|
|
|
* later reuse. This is disabled by default on Linux because common sequences
|
|
|
|
* of mmap()/munmap() calls will cause virtual memory map holes.
|
2012-04-13 11:20:58 +08:00
|
|
|
*/
|
|
|
|
#undef JEMALLOC_MUNMAP
|
|
|
|
|
2012-05-10 07:12:00 +08:00
|
|
|
/*
|
|
|
|
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
|
|
|
* disabled by default because it is Linux-specific and it will cause virtual
|
|
|
|
* memory map holes, much like munmap(2) does.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_MREMAP
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* TLS is used to map arenas and magazine caches to threads. */
|
2012-03-20 01:21:17 +08:00
|
|
|
#undef JEMALLOC_TLS
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
|
|
|
* within jemalloc-owned chunks before dereferencing them.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_IVSALLOC
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Define overrides for non-standard allocator-related functions if they
|
|
|
|
* are present on the system.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OVERRIDE_MEMALIGN
|
|
|
|
#undef JEMALLOC_OVERRIDE_VALLOC
|
|
|
|
|
2012-10-10 07:20:10 +08:00
|
|
|
/*
|
|
|
|
* At least Linux omits the "const" in:
|
|
|
|
*
|
|
|
|
* size_t malloc_usable_size(const void *ptr);
|
|
|
|
*
|
|
|
|
* Match the operating system's prototype.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_USABLE_SIZE_CONST
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_ZONE
|
|
|
|
#undef JEMALLOC_ZONE_VERSION
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Methods for purging unused pages differ between operating systems.
|
|
|
|
*
|
|
|
|
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
|
|
|
|
* such that new pages will be demand-zeroed if
|
|
|
|
* the address region is later touched.
|
2010-10-25 04:03:07 +08:00
|
|
|
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
|
|
|
|
* unused, such that they will be discarded rather
|
|
|
|
* than swapped out.
|
2010-09-06 01:35:13 +08:00
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
|
|
|
#undef JEMALLOC_PURGE_MADVISE_FREE
|
|
|
|
|
2012-12-03 09:58:40 +08:00
|
|
|
/*
|
|
|
|
* Define if operating system has alloca.h header.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_HAS_ALLOCA_H
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
|
|
|
#undef LG_SIZEOF_PTR
|
|
|
|
|
|
|
|
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
|
|
|
#undef LG_SIZEOF_INT
|
2009-06-26 09:06:48 +08:00
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
|
|
|
#undef LG_SIZEOF_LONG
|
2012-03-07 06:57:45 +08:00
|
|
|
|
|
|
|
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
|
|
|
#undef LG_SIZEOF_INTMAX_T
|