2009-06-23 03:08:42 +08:00
|
|
|
/*-
|
|
|
|
* This allocator implementation is designed to provide scalable performance
|
|
|
|
* for multi-threaded programs on multi-processor systems. The following
|
|
|
|
* features are included for this purpose:
|
|
|
|
*
|
|
|
|
* + Multiple arenas are used if there are multiple CPUs, which reduces lock
|
|
|
|
* contention and cache sloshing.
|
|
|
|
*
|
|
|
|
* + Thread-specific caching is used if there are multiple threads, which
|
|
|
|
* reduces the amount of locking.
|
|
|
|
*
|
|
|
|
* + Cache line sharing between arenas is avoided for internal data
|
|
|
|
* structures.
|
|
|
|
*
|
|
|
|
* + Memory is managed in chunks and runs (chunks can be split into runs),
|
|
|
|
* rather than as individual pages. This provides a constant-time
|
|
|
|
* mechanism for associating allocations with particular arenas.
|
|
|
|
*
|
|
|
|
* Allocation requests are rounded up to the nearest size class, and no record
|
|
|
|
* of the original request size is maintained. Allocations are broken into
|
2009-12-29 16:09:15 +08:00
|
|
|
* categories according to size class. Assuming runtime defaults, 4 KiB pages
|
2009-06-23 03:08:42 +08:00
|
|
|
* and a 16 byte quantum on a 32-bit system, the size classes in each category
|
|
|
|
* are as follows:
|
|
|
|
*
|
2009-12-29 16:09:15 +08:00
|
|
|
* |========================================|
|
|
|
|
* | Category | Subcategory | Size |
|
|
|
|
* |========================================|
|
|
|
|
* | Small | Tiny | 2 |
|
|
|
|
* | | | 4 |
|
|
|
|
* | | | 8 |
|
|
|
|
* | |------------------+----------|
|
|
|
|
* | | Quantum-spaced | 16 |
|
|
|
|
* | | | 32 |
|
|
|
|
* | | | 48 |
|
|
|
|
* | | | ... |
|
|
|
|
* | | | 96 |
|
|
|
|
* | | | 112 |
|
|
|
|
* | | | 128 |
|
|
|
|
* | |------------------+----------|
|
|
|
|
* | | Cacheline-spaced | 192 |
|
|
|
|
* | | | 256 |
|
|
|
|
* | | | 320 |
|
|
|
|
* | | | 384 |
|
|
|
|
* | | | 448 |
|
|
|
|
* | | | 512 |
|
|
|
|
* | |------------------+----------|
|
|
|
|
* | | Sub-page | 760 |
|
|
|
|
* | | | 1024 |
|
|
|
|
* | | | 1280 |
|
|
|
|
* | | | ... |
|
|
|
|
* | | | 3328 |
|
|
|
|
* | | | 3584 |
|
|
|
|
* | | | 3840 |
|
|
|
|
* |========================================|
|
|
|
|
* | Medium | 4 KiB |
|
|
|
|
* | | 6 KiB |
|
|
|
|
* | | 8 KiB |
|
|
|
|
* | | ... |
|
|
|
|
* | | 28 KiB |
|
|
|
|
* | | 30 KiB |
|
|
|
|
* | | 32 KiB |
|
|
|
|
* |========================================|
|
|
|
|
* | Large | 36 KiB |
|
|
|
|
* | | 40 KiB |
|
|
|
|
* | | 44 KiB |
|
|
|
|
* | | ... |
|
|
|
|
* | | 1012 KiB |
|
|
|
|
* | | 1016 KiB |
|
|
|
|
* | | 1020 KiB |
|
|
|
|
* |========================================|
|
|
|
|
* | Huge | 1 MiB |
|
|
|
|
* | | 2 MiB |
|
|
|
|
* | | 3 MiB |
|
|
|
|
* | | ... |
|
|
|
|
* |========================================|
|
2009-06-23 03:08:42 +08:00
|
|
|
*
|
2009-12-29 16:09:15 +08:00
|
|
|
* Different mechanisms are used accoding to category:
|
2009-06-23 03:08:42 +08:00
|
|
|
*
|
2009-12-29 16:09:15 +08:00
|
|
|
* Small/medium : Each size class is segregated into its own set of runs.
|
|
|
|
* Each run maintains a bitmap of which regions are
|
|
|
|
* free/allocated.
|
2009-06-23 03:08:42 +08:00
|
|
|
*
|
|
|
|
* Large : Each allocation is backed by a dedicated run. Metadata are stored
|
|
|
|
* in the associated arena chunk header maps.
|
|
|
|
*
|
|
|
|
* Huge : Each allocation is backed by a dedicated contiguous set of chunks.
|
|
|
|
* Metadata are stored in a separate red-black tree.
|
|
|
|
*
|
|
|
|
*******************************************************************************
|
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_C_
|
2010-01-17 17:49:20 +08:00
|
|
|
#include "internal/jemalloc_internal.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t **arenas;
|
|
|
|
unsigned narenas;
|
|
|
|
#ifndef NO_TLS
|
|
|
|
static unsigned next_arena;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifndef NO_TLS
|
|
|
|
__thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec"));
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Set to true once the allocator has been initialized. */
|
|
|
|
static bool malloc_initialized = false;
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
|
|
|
static pthread_t malloc_initializer = (unsigned long)0;
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Used to avoid initialization races. */
|
2009-06-23 05:44:08 +08:00
|
|
|
static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef DYNAMIC_PAGE_SHIFT
|
|
|
|
size_t pagesize;
|
|
|
|
size_t pagesize_mask;
|
|
|
|
size_t lg_pagesize;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
unsigned ncpus;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
|
|
|
const char *JEMALLOC_P(malloc_options)
|
|
|
|
JEMALLOC_ATTR(visibility("default"));
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DEBUG
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_abort = true;
|
|
|
|
# ifdef JEMALLOC_FILL
|
|
|
|
bool opt_junk = true;
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
bool opt_abort = false;
|
|
|
|
# ifdef JEMALLOC_FILL
|
|
|
|
bool opt_junk = false;
|
|
|
|
# endif
|
2010-01-04 04:10:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
bool opt_sysv = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
|
|
|
bool opt_xmalloc = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_FILL
|
|
|
|
bool opt_zero = false;
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
static int opt_narenas_lshift = 0;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void wrtmessage(const char *p1, const char *p2, const char *p3,
|
|
|
|
const char *p4);
|
|
|
|
static void stats_print_atexit(void);
|
|
|
|
static unsigned malloc_ncpus(void);
|
|
|
|
static bool malloc_init_hard(void);
|
|
|
|
static void jemalloc_prefork(void);
|
|
|
|
static void jemalloc_postfork(void);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2010-01-17 01:53:50 +08:00
|
|
|
/* malloc_message() setup. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_HAVE_ATTR
|
|
|
|
JEMALLOC_ATTR(visibility("hidden"))
|
|
|
|
#else
|
|
|
|
static
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
void
|
|
|
|
wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (write(STDERR_FILENO, p1, strlen(p1)) < 0
|
|
|
|
|| write(STDERR_FILENO, p2, strlen(p2)) < 0
|
|
|
|
|| write(STDERR_FILENO, p3, strlen(p3)) < 0
|
|
|
|
|| write(STDERR_FILENO, p4, strlen(p4)) < 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void (*JEMALLOC_P(malloc_message))(const char *p1, const char *p2,
|
|
|
|
const char *p3, const char *p4) JEMALLOC_ATTR(visibility("default")) =
|
|
|
|
wrtmessage;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
|
|
|
arena_t *
|
|
|
|
arenas_extend(unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Allocate enough space for trailing bins. */
|
|
|
|
ret = (arena_t *)base_alloc(sizeof(arena_t)
|
|
|
|
+ (sizeof(arena_bin_t) * (nbins - 1)));
|
|
|
|
if (ret != NULL && arena_new(ret, ind) == false) {
|
|
|
|
arenas[ind] = ret;
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
/* Only reached if there is an OOM error. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* OOM here is quite inconvenient to propagate, since dealing with it
|
|
|
|
* would require a check for failure in the fast path. Instead, punt
|
|
|
|
* by using arenas[0]. In practice, this is an extremely unlikely
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
malloc_write4("<jemalloc>", ": Error initializing arena\n", "", "");
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (arenas[0]);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifndef NO_TLS
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Choose an arena based on a per-thread value (slow-path code only, called
|
|
|
|
* only by choose_arena()).
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
|
|
|
choose_arena_hard(void)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (narenas > 1) {
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
if ((ret = arenas[next_arena]) == NULL)
|
|
|
|
ret = arenas_extend(next_arena);
|
|
|
|
next_arena = (next_arena + 1) % narenas;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2009-12-29 16:09:15 +08:00
|
|
|
} else
|
2010-01-17 01:53:50 +08:00
|
|
|
ret = arenas[0];
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
arenas_map = ret;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static inline void *
|
|
|
|
ipalloc(size_t alignment, size_t size)
|
2009-12-29 16:09:15 +08:00
|
|
|
{
|
2010-01-17 01:53:50 +08:00
|
|
|
void *ret;
|
|
|
|
size_t ceil_size;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Round size up to the nearest multiple of alignment.
|
|
|
|
*
|
|
|
|
* This done, we can take advantage of the fact that for each small
|
|
|
|
* size class, every object is aligned at the smallest power of two
|
|
|
|
* that is non-zero in the base two representation of the size. For
|
|
|
|
* example:
|
|
|
|
*
|
|
|
|
* Size | Base 2 | Minimum alignment
|
|
|
|
* -----+----------+------------------
|
|
|
|
* 96 | 1100000 | 32
|
|
|
|
* 144 | 10100000 | 32
|
|
|
|
* 192 | 11000000 | 64
|
|
|
|
*
|
|
|
|
* Depending on runtime settings, it is possible that arena_malloc()
|
|
|
|
* will further round up to a power of two, but that never causes
|
|
|
|
* correctness issues.
|
|
|
|
*/
|
|
|
|
ceil_size = (size + (alignment - 1)) & (-alignment);
|
|
|
|
/*
|
|
|
|
* (ceil_size < size) protects against the combination of maximal
|
|
|
|
* alignment and size greater than maximal alignment.
|
|
|
|
*/
|
|
|
|
if (ceil_size < size) {
|
|
|
|
/* size_t overflow. */
|
|
|
|
return (NULL);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE
|
|
|
|
&& ceil_size <= arena_maxclass))
|
|
|
|
ret = arena_malloc(ceil_size, false);
|
|
|
|
else {
|
|
|
|
size_t run_size;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* We can't achieve subpage alignment, so round up alignment
|
|
|
|
* permanently; it makes later calculations simpler.
|
|
|
|
*/
|
|
|
|
alignment = PAGE_CEILING(alignment);
|
|
|
|
ceil_size = PAGE_CEILING(size);
|
|
|
|
/*
|
|
|
|
* (ceil_size < size) protects against very large sizes within
|
|
|
|
* PAGE_SIZE of SIZE_T_MAX.
|
|
|
|
*
|
|
|
|
* (ceil_size + alignment < ceil_size) protects against the
|
|
|
|
* combination of maximal alignment and ceil_size large enough
|
|
|
|
* to cause overflow. This is similar to the first overflow
|
|
|
|
* check above, but it needs to be repeated due to the new
|
|
|
|
* ceil_size value, which may now be *equal* to maximal
|
|
|
|
* alignment, whereas before we only detected overflow if the
|
|
|
|
* original size was *greater* than maximal alignment.
|
|
|
|
*/
|
|
|
|
if (ceil_size < size || ceil_size + alignment < ceil_size) {
|
|
|
|
/* size_t overflow. */
|
|
|
|
return (NULL);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Calculate the size of the over-size run that arena_palloc()
|
|
|
|
* would need to allocate in order to guarantee the alignment.
|
|
|
|
*/
|
|
|
|
if (ceil_size >= alignment)
|
|
|
|
run_size = ceil_size + alignment - PAGE_SIZE;
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* It is possible that (alignment << 1) will cause
|
|
|
|
* overflow, but it doesn't matter because we also
|
|
|
|
* subtract PAGE_SIZE, which in the case of overflow
|
|
|
|
* leaves us with a very large run_size. That causes
|
|
|
|
* the first conditional below to fail, which means
|
|
|
|
* that the bogus run_size value never gets used for
|
|
|
|
* anything important.
|
|
|
|
*/
|
|
|
|
run_size = (alignment << 1) - PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (run_size <= arena_maxclass) {
|
|
|
|
ret = arena_palloc(choose_arena(), alignment, ceil_size,
|
|
|
|
run_size);
|
|
|
|
} else if (alignment <= chunksize)
|
|
|
|
ret = huge_malloc(ceil_size, false);
|
|
|
|
else
|
|
|
|
ret = huge_palloc(alignment, ceil_size);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
assert(((uintptr_t)ret & (alignment - 1)) == 0);
|
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_print_atexit(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
|
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since individual
|
|
|
|
* threads do not lock when recording tcache stats events. As a
|
|
|
|
* consequence, the final stats may be slightly out of date by the time
|
|
|
|
* they are reported, if other threads continue to allocate.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
arena_t *arena = arenas[i];
|
|
|
|
if (arena != NULL) {
|
|
|
|
tcache_t *tcache;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_lock(&arena->lock);
|
|
|
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
|
|
|
tcache_stats_merge(tcache, arena);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&arena->lock);
|
|
|
|
}
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-18 07:49:25 +08:00
|
|
|
JEMALLOC_P(malloc_stats_print)(NULL, NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static inline void *
|
|
|
|
iralloc(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-01-17 01:53:50 +08:00
|
|
|
size_t oldsize;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
oldsize = isalloc(ptr);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (size <= arena_maxclass)
|
|
|
|
return (arena_ralloc(ptr, size, oldsize));
|
|
|
|
else
|
|
|
|
return (huge_ralloc(ptr, size, oldsize));
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_ncpus(void)
|
|
|
|
{
|
|
|
|
unsigned ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
if (result == -1) {
|
|
|
|
/* Error. */
|
|
|
|
ret = 1;
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
ret = (unsigned)result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
|
|
|
|
* implementation has to take pains to avoid infinite recursion during
|
|
|
|
* initialization.
|
|
|
|
*/
|
|
|
|
static inline bool
|
|
|
|
malloc_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_initialized == false)
|
|
|
|
return (malloc_init_hard());
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
malloc_init_hard(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
int linklen;
|
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
const char *opts;
|
2009-06-24 10:01:18 +08:00
|
|
|
arena_t *init_arenas[1];
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(&init_lock);
|
2009-06-24 10:01:18 +08:00
|
|
|
if (malloc_initialized || malloc_initializer == pthread_self()) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
2009-11-10 06:57:38 +08:00
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
if (malloc_initializer != (unsigned long)0) {
|
|
|
|
/* Busy-wait until the initializing thread completes. */
|
|
|
|
do {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
CPU_SPINWAIT;
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
} while (malloc_initialized == false);
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef DYNAMIC_PAGE_SHIFT
|
2009-06-23 05:44:08 +08:00
|
|
|
/* Get page size. */
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2009-06-23 05:44:08 +08:00
|
|
|
long result;
|
|
|
|
|
|
|
|
result = sysconf(_SC_PAGESIZE);
|
|
|
|
assert(result != -1);
|
2009-06-24 10:01:18 +08:00
|
|
|
pagesize = (unsigned)result;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume that pagesize is a power of 2 when calculating
|
2009-12-29 16:09:15 +08:00
|
|
|
* pagesize_mask and lg_pagesize.
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
|
|
|
assert(((result - 1) & result) == 0);
|
|
|
|
pagesize_mask = result - 1;
|
2009-12-29 16:09:15 +08:00
|
|
|
lg_pagesize = ffs((int)result) - 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
unsigned j;
|
|
|
|
|
|
|
|
/* Get runtime configuration. */
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
2009-06-23 08:44:33 +08:00
|
|
|
if ((linklen = readlink("/etc/jemalloc.conf", buf,
|
2009-06-23 03:08:42 +08:00
|
|
|
sizeof(buf) - 1)) != -1) {
|
|
|
|
/*
|
2009-06-23 08:44:33 +08:00
|
|
|
* Use the contents of the "/etc/jemalloc.conf"
|
2009-06-23 03:08:42 +08:00
|
|
|
* symbolic link's name.
|
|
|
|
*/
|
|
|
|
buf[linklen] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
2009-06-24 10:01:18 +08:00
|
|
|
if ((opts = getenv("JEMALLOC_OPTIONS")) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Do nothing; opts is already initialized to
|
2009-06-23 08:44:33 +08:00
|
|
|
* the value of the JEMALLOC_OPTIONS
|
|
|
|
* environment variable.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
2010-01-17 01:53:50 +08:00
|
|
|
if (JEMALLOC_P(malloc_options) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Use options that were compiled into the
|
|
|
|
* program.
|
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
opts = JEMALLOC_P(malloc_options);
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* NOTREACHED */
|
|
|
|
assert(false);
|
2009-06-24 10:01:18 +08:00
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; opts[j] != '\0'; j++) {
|
|
|
|
unsigned k, nreps;
|
|
|
|
bool nseen;
|
|
|
|
|
|
|
|
/* Parse repetition count, if any. */
|
|
|
|
for (nreps = 0, nseen = false;; j++, nseen = true) {
|
|
|
|
switch (opts[j]) {
|
|
|
|
case '0': case '1': case '2': case '3':
|
|
|
|
case '4': case '5': case '6': case '7':
|
|
|
|
case '8': case '9':
|
|
|
|
nreps *= 10;
|
|
|
|
nreps += opts[j] - '0';
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto MALLOC_OUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MALLOC_OUT:
|
|
|
|
if (nseen == false)
|
|
|
|
nreps = 1;
|
|
|
|
|
|
|
|
for (k = 0; k < nreps; k++) {
|
|
|
|
switch (opts[j]) {
|
|
|
|
case 'a':
|
|
|
|
opt_abort = false;
|
|
|
|
break;
|
|
|
|
case 'A':
|
|
|
|
opt_abort = true;
|
|
|
|
break;
|
|
|
|
case 'c':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_cspace_max - 1 >
|
|
|
|
opt_lg_qspace_max &&
|
|
|
|
opt_lg_cspace_max >
|
|
|
|
LG_CACHELINE)
|
|
|
|
opt_lg_cspace_max--;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'C':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_cspace_max < PAGE_SHIFT
|
2009-06-23 03:08:42 +08:00
|
|
|
- 1)
|
2009-12-29 16:09:15 +08:00
|
|
|
opt_lg_cspace_max++;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-12-29 16:09:15 +08:00
|
|
|
case 'd':
|
|
|
|
if (opt_lg_dirty_mult + 1 <
|
|
|
|
(sizeof(size_t) << 3))
|
|
|
|
opt_lg_dirty_mult++;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-12-29 16:09:15 +08:00
|
|
|
case 'D':
|
|
|
|
if (opt_lg_dirty_mult >= 0)
|
|
|
|
opt_lg_dirty_mult--;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'g':
|
2010-01-04 06:45:26 +08:00
|
|
|
if (opt_lg_tcache_gc_sweep >= 0)
|
|
|
|
opt_lg_tcache_gc_sweep--;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'G':
|
2010-01-04 06:45:26 +08:00
|
|
|
if (opt_lg_tcache_gc_sweep + 1 <
|
|
|
|
(sizeof(size_t) << 3))
|
|
|
|
opt_lg_tcache_gc_sweep++;
|
2009-12-29 16:09:15 +08:00
|
|
|
break;
|
|
|
|
case 'h':
|
2010-01-04 08:16:10 +08:00
|
|
|
if (opt_lg_tcache_nslots > 0)
|
|
|
|
opt_lg_tcache_nslots--;
|
2009-12-29 16:09:15 +08:00
|
|
|
break;
|
|
|
|
case 'H':
|
2010-01-04 08:16:10 +08:00
|
|
|
if (opt_lg_tcache_nslots + 1 <
|
|
|
|
(sizeof(size_t) << 3))
|
|
|
|
opt_lg_tcache_nslots++;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
#endif
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_FILL
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'j':
|
|
|
|
opt_junk = false;
|
|
|
|
break;
|
|
|
|
case 'J':
|
|
|
|
opt_junk = true;
|
|
|
|
break;
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'k':
|
|
|
|
/*
|
|
|
|
* Chunks always require at least one
|
2009-12-29 16:09:15 +08:00
|
|
|
* header page, plus enough room to
|
|
|
|
* hold a run for the largest medium
|
|
|
|
* size class (one page more than the
|
|
|
|
* size).
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2009-12-29 16:09:15 +08:00
|
|
|
if ((1U << (opt_lg_chunk - 1)) >=
|
2009-12-29 16:09:15 +08:00
|
|
|
(2U << PAGE_SHIFT) + (1U <<
|
2009-12-29 16:09:15 +08:00
|
|
|
opt_lg_medium_max))
|
|
|
|
opt_lg_chunk--;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'K':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_chunk + 1 <
|
2009-06-23 03:08:42 +08:00
|
|
|
(sizeof(size_t) << 3))
|
2009-12-29 16:09:15 +08:00
|
|
|
opt_lg_chunk++;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-12-29 16:09:15 +08:00
|
|
|
case 'm':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_medium_max > PAGE_SHIFT)
|
|
|
|
opt_lg_medium_max--;
|
2009-12-29 16:09:15 +08:00
|
|
|
break;
|
|
|
|
case 'M':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_medium_max + 1 <
|
|
|
|
opt_lg_chunk)
|
|
|
|
opt_lg_medium_max++;
|
2009-12-29 16:09:15 +08:00
|
|
|
break;
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'n':
|
|
|
|
opt_narenas_lshift--;
|
|
|
|
break;
|
|
|
|
case 'N':
|
|
|
|
opt_narenas_lshift++;
|
|
|
|
break;
|
|
|
|
case 'p':
|
2010-01-04 04:10:42 +08:00
|
|
|
opt_stats_print = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'P':
|
2010-01-04 04:10:42 +08:00
|
|
|
opt_stats_print = true;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'q':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_qspace_max > LG_QUANTUM)
|
|
|
|
opt_lg_qspace_max--;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
|
|
|
case 'Q':
|
2009-12-29 16:09:15 +08:00
|
|
|
if (opt_lg_qspace_max + 1 <
|
|
|
|
opt_lg_cspace_max)
|
|
|
|
opt_lg_qspace_max++;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2010-01-04 08:17:52 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
|
|
|
case 's':
|
|
|
|
opt_tcache_sort = false;
|
|
|
|
break;
|
|
|
|
case 'S':
|
|
|
|
opt_tcache_sort = true;
|
|
|
|
break;
|
|
|
|
#endif
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
case 't':
|
|
|
|
opt_trace = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-12-29 16:09:15 +08:00
|
|
|
case 'T':
|
|
|
|
opt_trace = true;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'v':
|
|
|
|
opt_sysv = false;
|
|
|
|
break;
|
|
|
|
case 'V':
|
|
|
|
opt_sysv = true;
|
|
|
|
break;
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'x':
|
|
|
|
opt_xmalloc = false;
|
|
|
|
break;
|
|
|
|
case 'X':
|
|
|
|
opt_xmalloc = true;
|
|
|
|
break;
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_FILL
|
2009-06-23 03:08:42 +08:00
|
|
|
case 'z':
|
|
|
|
opt_zero = false;
|
|
|
|
break;
|
|
|
|
case 'Z':
|
|
|
|
opt_zero = true;
|
|
|
|
break;
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
default: {
|
|
|
|
char cbuf[2];
|
|
|
|
|
|
|
|
cbuf[0] = opts[j];
|
|
|
|
cbuf[1] = '\0';
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Unsupported character "
|
2009-06-23 03:08:42 +08:00
|
|
|
"in malloc options: '", cbuf,
|
|
|
|
"'\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
2010-01-17 01:53:50 +08:00
|
|
|
if (opt_trace)
|
|
|
|
trace_boot();
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-04 04:10:42 +08:00
|
|
|
atexit(stats_print_atexit);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
/* Register fork handlers. */
|
2009-06-23 08:44:33 +08:00
|
|
|
pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork);
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (arena_boot0()) {
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
2010-01-17 01:53:50 +08:00
|
|
|
tcache_boot();
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (chunk_boot()) {
|
2009-06-23 05:44:08 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_boot1();
|
|
|
|
|
|
|
|
if (huge_boot()) {
|
2009-06-23 05:44:08 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (huge_boot()) {
|
2009-06-23 05:44:08 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
|
|
|
narenas = 1;
|
|
|
|
arenas = init_arenas;
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
|
|
|
* choose_arena_hard().
|
|
|
|
*/
|
|
|
|
arenas_extend(0);
|
|
|
|
if (arenas[0] == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NO_TLS
|
|
|
|
/*
|
|
|
|
* Assign the initial arena to the initial thread, in order to avoid
|
|
|
|
* spurious creation of an extra arena if the application switches to
|
|
|
|
* threaded mode.
|
|
|
|
*/
|
|
|
|
arenas_map = arenas[0];
|
|
|
|
#endif
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_init(&arenas_lock);
|
2009-06-24 10:01:18 +08:00
|
|
|
|
|
|
|
/* Get number of CPUs. */
|
|
|
|
malloc_initializer = pthread_self();
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
ncpus = malloc_ncpus();
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ncpus > 1) {
|
|
|
|
/*
|
2009-12-29 16:09:15 +08:00
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
2010-01-04 08:16:10 +08:00
|
|
|
if (tcache_nslots) {
|
2009-12-29 16:09:15 +08:00
|
|
|
/*
|
|
|
|
* Only large object allocation/deallocation is
|
|
|
|
* guaranteed to acquire an arena mutex, so we can get
|
|
|
|
* away with fewer arenas than without thread caching.
|
|
|
|
*/
|
|
|
|
opt_narenas_lshift += 1;
|
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* All allocations must acquire an arena mutex, so use
|
|
|
|
* plenty of arenas.
|
|
|
|
*/
|
|
|
|
opt_narenas_lshift += 2;
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine how many arenas to use. */
|
|
|
|
narenas = ncpus;
|
|
|
|
if (opt_narenas_lshift > 0) {
|
|
|
|
if ((narenas << opt_narenas_lshift) > narenas)
|
|
|
|
narenas <<= opt_narenas_lshift;
|
|
|
|
/*
|
|
|
|
* Make sure not to exceed the limits of what base_alloc() can
|
|
|
|
* handle.
|
|
|
|
*/
|
|
|
|
if (narenas * sizeof(arena_t *) > chunksize)
|
|
|
|
narenas = chunksize / sizeof(arena_t *);
|
|
|
|
} else if (opt_narenas_lshift < 0) {
|
|
|
|
if ((narenas >> -opt_narenas_lshift) < narenas)
|
|
|
|
narenas >>= -opt_narenas_lshift;
|
|
|
|
/* Make sure there is at least one arena. */
|
|
|
|
if (narenas == 0)
|
|
|
|
narenas = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NO_TLS
|
|
|
|
if (narenas > 1) {
|
|
|
|
static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
|
|
|
|
23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
|
|
|
|
89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
|
|
|
|
151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
|
|
|
|
223, 227, 229, 233, 239, 241, 251, 257, 263};
|
|
|
|
unsigned nprimes, parenas;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pick a prime number of hash arenas that is more than narenas
|
|
|
|
* so that direct hashing of pthread_self() pointers tends to
|
|
|
|
* spread allocations evenly among the arenas.
|
|
|
|
*/
|
|
|
|
assert((narenas & 1) == 0); /* narenas must be even. */
|
2009-12-29 16:09:15 +08:00
|
|
|
nprimes = (sizeof(primes) >> LG_SIZEOF_INT);
|
2009-06-23 03:08:42 +08:00
|
|
|
parenas = primes[nprimes - 1]; /* In case not enough primes. */
|
|
|
|
for (i = 1; i < nprimes; i++) {
|
|
|
|
if (primes[i] > narenas) {
|
|
|
|
parenas = primes[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
narenas = parenas;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NO_TLS
|
|
|
|
next_arena = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Allocate and initialize arenas. */
|
|
|
|
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
|
|
|
|
if (arenas == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Zero the array. In practice, this should always be pre-zeroed,
|
|
|
|
* since it was just mmap()ed, but let's be sure.
|
|
|
|
*/
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Copy the pointer to the one arena that was already initialized. */
|
|
|
|
arenas[0] = init_arenas[0];
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
malloc_initialized = true;
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(malloc)(size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
ret = NULL;
|
2009-12-29 16:09:15 +08:00
|
|
|
goto OOM;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_sysv == false)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
2009-12-29 16:09:15 +08:00
|
|
|
# ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-12-29 16:09:15 +08:00
|
|
|
": Error in malloc(): invalid size 0\n", "",
|
|
|
|
"");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
# endif
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = imalloc(size);
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
OOM:
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in malloc(): out of memory\n", "",
|
2009-06-23 03:08:42 +08:00
|
|
|
"");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
RETURN:
|
|
|
|
#endif
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_malloc(ret, size);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
int
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
void *result;
|
|
|
|
|
|
|
|
if (malloc_init())
|
|
|
|
result = NULL;
|
|
|
|
else {
|
2009-12-29 16:09:15 +08:00
|
|
|
if (size == 0) {
|
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
if (opt_sysv == false)
|
|
|
|
#endif
|
|
|
|
size = 1;
|
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
else {
|
|
|
|
# ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-12-29 16:09:15 +08:00
|
|
|
": Error in posix_memalign(): "
|
|
|
|
"invalid size 0\n", "", "");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
result = NULL;
|
|
|
|
*memptr = NULL;
|
|
|
|
ret = 0;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Make sure that alignment is a large enough power of 2. */
|
|
|
|
if (((alignment - 1) & alignment) != 0
|
|
|
|
|| alignment < sizeof(void *)) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in posix_memalign(): "
|
2009-06-23 03:08:42 +08:00
|
|
|
"invalid alignment\n", "", "");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = ipalloc(alignment, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in posix_memalign(): out of memory\n",
|
2009-06-23 03:08:42 +08:00
|
|
|
"", "");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = ENOMEM;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
*memptr = result;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
RETURN:
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_posix_memalign(result, alignment, size);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(calloc)(size_t num, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
size_t num_size;
|
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
num_size = 0;
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (num_size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if ((opt_sysv == false) && ((num == 0) || (size == 0)))
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Try to avoid division here. We know that it isn't possible to
|
|
|
|
* overflow during multiplication if neither operand uses any of the
|
|
|
|
* most significant half of the bits in a size_t.
|
|
|
|
*/
|
|
|
|
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
|
|
|
|
&& (num_size / size != num)) {
|
|
|
|
/* size_t overflow. */
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = icalloc(num_size);
|
|
|
|
|
|
|
|
RETURN:
|
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in calloc(): out of memory\n", "",
|
2009-06-23 03:08:42 +08:00
|
|
|
"");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_calloc(ret, num, size);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(realloc)(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
size_t old_size;
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_sysv == false)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ptr != NULL) {
|
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
old_size = isalloc(ptr);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
idalloc(ptr);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ptr != NULL) {
|
2009-11-10 06:57:38 +08:00
|
|
|
assert(malloc_initialized || malloc_initializer ==
|
|
|
|
pthread_self());
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
old_size = isalloc(ptr);
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = iralloc(ptr, size);
|
|
|
|
|
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in realloc(): out of "
|
2009-06-23 03:08:42 +08:00
|
|
|
"memory\n", "", "");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (malloc_init())
|
|
|
|
ret = NULL;
|
|
|
|
else
|
|
|
|
ret = imalloc(size);
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
old_size = 0;
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_write4("<jemalloc>",
|
2009-06-23 08:44:33 +08:00
|
|
|
": Error in realloc(): out of "
|
2009-06-23 03:08:42 +08:00
|
|
|
"memory\n", "", "");
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-30 00:41:43 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
RETURN:
|
2009-06-30 00:41:43 +08:00
|
|
|
#endif
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_realloc(ret, ptr, size, old_size);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(free)(void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (ptr != NULL) {
|
2009-11-10 06:57:38 +08:00
|
|
|
assert(malloc_initialized || malloc_initializer ==
|
|
|
|
pthread_self());
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_free(ptr, isalloc(ptr));
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
idalloc(ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
size_t
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(malloc_usable_size)(const void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2009-12-29 16:09:15 +08:00
|
|
|
size_t ret;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2009-12-29 16:09:15 +08:00
|
|
|
ret = isalloc(ptr);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TRACE
|
|
|
|
if (opt_trace)
|
|
|
|
trace_malloc_usable_size(ret, ptr);
|
|
|
|
#endif
|
|
|
|
return (ret);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-12-29 16:09:15 +08:00
|
|
|
void
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(malloc_tcache_flush)(void)
|
2009-12-29 16:09:15 +08:00
|
|
|
{
|
|
|
|
tcache_t *tcache;
|
|
|
|
|
|
|
|
tcache = tcache_tls;
|
|
|
|
if (tcache == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tcache_destroy(tcache);
|
|
|
|
tcache_tls = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
|
|
|
* malloc during fork(). These functions are only called if the program is
|
|
|
|
* running in threaded mode, so there is no need to check whether the program
|
|
|
|
* is threaded here.
|
|
|
|
*/
|
|
|
|
|
2009-06-26 09:06:48 +08:00
|
|
|
static void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
bool again;
|
|
|
|
unsigned i, j;
|
|
|
|
arena_t *larenas[narenas], *tarenas[narenas];
|
|
|
|
|
|
|
|
/* Acquire all mutexes in a safe order. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* arenas_lock must be acquired after all of the arena mutexes, in
|
|
|
|
* order to avoid potential deadlock with arena_lock_balance[_hard]().
|
|
|
|
* Since arenas_lock protects the arenas array, the following code has
|
|
|
|
* to race with arenas_extend() callers until it succeeds in locking
|
|
|
|
* all arenas before locking arenas_lock.
|
|
|
|
*/
|
|
|
|
memset(larenas, 0, sizeof(arena_t *) * narenas);
|
|
|
|
do {
|
|
|
|
again = false;
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_lock(&arenas_lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (arenas[i] != larenas[i]) {
|
|
|
|
memcpy(tarenas, arenas, sizeof(arena_t *) *
|
|
|
|
narenas);
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
for (j = 0; j < narenas; j++) {
|
|
|
|
if (larenas[j] != tarenas[j]) {
|
|
|
|
larenas[j] = tarenas[j];
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_lock(
|
2009-06-23 03:08:42 +08:00
|
|
|
&larenas[j]->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
again = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while (again);
|
|
|
|
|
|
|
|
malloc_mutex_lock(&base_mtx);
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-06-26 09:06:48 +08:00
|
|
|
static void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_postfork(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
arena_t *larenas[narenas];
|
|
|
|
|
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&base_mtx);
|
|
|
|
|
|
|
|
memcpy(larenas, arenas, sizeof(arena_t *) * narenas);
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (larenas[i] != NULL)
|
2009-12-29 16:09:15 +08:00
|
|
|
malloc_mutex_unlock(&larenas[i]->lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
}
|