2009-12-29 16:09:15 +08:00
|
|
|
/*
|
2012-03-02 09:19:20 +08:00
|
|
|
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
|
|
|
|
* public APIs to be prefixed. This makes it possible, with some care, to use
|
|
|
|
* multiple allocators simultaneously.
|
2009-12-29 16:09:15 +08:00
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PREFIX
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef JEMALLOC_CPREFIX
|
2012-03-02 09:19:20 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Name mangling for public symbols is controlled by --with-mangling and
|
|
|
|
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
|
|
|
|
* these macro definitions.
|
|
|
|
*/
|
|
|
|
#undef je_malloc_conf
|
|
|
|
#undef je_malloc_message
|
|
|
|
#undef je_malloc
|
|
|
|
#undef je_calloc
|
|
|
|
#undef je_posix_memalign
|
2012-03-14 03:55:21 +08:00
|
|
|
#undef je_aligned_alloc
|
2012-03-02 09:19:20 +08:00
|
|
|
#undef je_realloc
|
|
|
|
#undef je_free
|
|
|
|
#undef je_malloc_usable_size
|
|
|
|
#undef je_malloc_stats_print
|
|
|
|
#undef je_mallctl
|
|
|
|
#undef je_mallctlnametomib
|
|
|
|
#undef je_mallctlbymib
|
|
|
|
#undef je_memalign
|
|
|
|
#undef je_valloc
|
|
|
|
#undef je_allocm
|
|
|
|
#undef je_rallocm
|
|
|
|
#undef je_sallocm
|
2012-03-02 09:53:16 +08:00
|
|
|
#undef je_dallocm
|
|
|
|
#undef je_nallocm
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2011-07-31 07:40:52 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
|
|
|
|
* For shared libraries, symbol visibility mechanisms prevent these symbols
|
|
|
|
* from being exported, but for static libraries, naming collisions are a real
|
|
|
|
* possibility.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PRIVATE_NAMESPACE
|
|
|
|
#undef JEMALLOC_N
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Hyper-threaded CPUs may need a special instruction inside spin loops in
|
|
|
|
* order to yield to another virtual CPU.
|
|
|
|
*/
|
|
|
|
#undef CPU_SPINWAIT
|
|
|
|
|
2011-03-19 10:10:31 +08:00
|
|
|
/*
|
|
|
|
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
|
|
|
|
* documented in the atomic(3) manual page.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OSATOMIC
|
|
|
|
|
2011-03-19 10:30:18 +08:00
|
|
|
/*
|
|
|
|
* Defined if OSSpin*() functions are available, as provided by Darwin, and
|
|
|
|
* documented in the spinlock(3) manual page.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OSSPIN
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/*
|
|
|
|
* Defined if _malloc_thread_cleanup() exists. At least in the case of
|
|
|
|
* FreeBSD, pthread_key_create() allocates, which if used during malloc
|
|
|
|
* bootstrapping will cause recursion into the pthreads library. Therefore, if
|
|
|
|
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
|
|
|
|
* malloc_tsd.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_MALLOC_THREAD_CLEANUP
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
/* Defined if __attribute__((...)) syntax is supported. */
|
|
|
|
#undef JEMALLOC_HAVE_ATTR
|
|
|
|
#ifdef JEMALLOC_HAVE_ATTR
|
2012-02-11 12:22:09 +08:00
|
|
|
# define JEMALLOC_CATTR(s, a) __attribute__((s))
|
|
|
|
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
|
2010-01-04 03:59:20 +08:00
|
|
|
#else
|
2012-02-11 12:22:09 +08:00
|
|
|
# define JEMALLOC_CATTR(s, a) a
|
|
|
|
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
|
2010-01-04 03:59:20 +08:00
|
|
|
#endif
|
2009-07-01 07:17:05 +08:00
|
|
|
|
2010-09-21 10:20:48 +08:00
|
|
|
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
|
|
|
#undef JEMALLOC_CC_SILENCE
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
|
|
|
|
* inline functions.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_DEBUG
|
|
|
|
|
|
|
|
/* JEMALLOC_STATS enables statistics calculation. */
|
|
|
|
#undef JEMALLOC_STATS
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/* JEMALLOC_PROF enables allocation profiling. */
|
|
|
|
#undef JEMALLOC_PROF
|
|
|
|
|
|
|
|
/* Use libunwind for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_LIBUNWIND
|
|
|
|
|
2010-02-11 10:15:53 +08:00
|
|
|
/* Use libgcc for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_LIBGCC
|
|
|
|
|
2011-03-16 13:23:12 +08:00
|
|
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
|
|
|
#undef JEMALLOC_PROF_GCC
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
2010-03-18 07:27:39 +08:00
|
|
|
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
|
|
|
* This makes it possible to allocate/deallocate objects without any locking
|
|
|
|
* when the cache is in the steady state.
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2009-12-29 16:09:15 +08:00
|
|
|
#undef JEMALLOC_TCACHE
|
2009-06-24 10:01:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
|
|
|
|
* segment (DSS).
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_DSS
|
|
|
|
|
|
|
|
/* Support memory filling (junk/zero). */
|
|
|
|
#undef JEMALLOC_FILL
|
|
|
|
|
2012-03-03 09:47:37 +08:00
|
|
|
/* Support the experimental API. */
|
|
|
|
#undef JEMALLOC_EXPERIMENTAL
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Support optional abort() on OOM. */
|
|
|
|
#undef JEMALLOC_XMALLOC
|
|
|
|
|
|
|
|
/* Support lazy locking (avoid locking unless a second thread is launched). */
|
|
|
|
#undef JEMALLOC_LAZY_LOCK
|
|
|
|
|
|
|
|
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
|
|
|
|
#undef STATIC_PAGE_SHIFT
|
|
|
|
|
|
|
|
/* TLS is used to map arenas and magazine caches to threads. */
|
2012-03-20 01:21:17 +08:00
|
|
|
#undef JEMALLOC_TLS
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
|
|
|
|
* within jemalloc-owned chunks before dereferencing them.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_IVSALLOC
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Define overrides for non-standard allocator-related functions if they
|
|
|
|
* are present on the system.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_OVERRIDE_MEMALIGN
|
|
|
|
#undef JEMALLOC_OVERRIDE_VALLOC
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
|
|
|
|
*/
|
|
|
|
#undef JEMALLOC_ZONE
|
|
|
|
#undef JEMALLOC_ZONE_VERSION
|
|
|
|
|
2010-12-01 08:50:58 +08:00
|
|
|
/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
|
|
|
|
#undef JEMALLOC_MREMAP_FIXED
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/*
|
|
|
|
* Methods for purging unused pages differ between operating systems.
|
|
|
|
*
|
|
|
|
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
|
|
|
|
* such that new pages will be demand-zeroed if
|
|
|
|
* the address region is later touched.
|
2010-10-25 04:03:07 +08:00
|
|
|
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
|
|
|
|
* unused, such that they will be discarded rather
|
|
|
|
* than swapped out.
|
2010-09-06 01:35:13 +08:00
|
|
|
*/
|
|
|
|
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
|
|
|
|
#undef JEMALLOC_PURGE_MADVISE_FREE
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
|
|
|
#undef LG_SIZEOF_PTR
|
|
|
|
|
|
|
|
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
|
|
|
#undef LG_SIZEOF_INT
|
2009-06-26 09:06:48 +08:00
|
|
|
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
|
|
|
|
#undef LG_SIZEOF_LONG
|
2012-03-07 06:57:45 +08:00
|
|
|
|
|
|
|
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
|
|
|
|
#undef LG_SIZEOF_INTMAX_T
|