Refactor bootstrapping to delay tsd initialization.

Refactor bootstrapping to delay tsd initialization, primarily to support
integration with FreeBSD's libc.

Refactor a0*() for internal-only use, and add the
bootstrap_{malloc,calloc,free}() API for use by FreeBSD's libc.  This
separation limits use of the a0*() functions to metadata allocation,
which doesn't require malloc/calloc/free API compatibility.

This resolves #170.
This commit is contained in:
Jason Evans 2015-01-20 15:37:51 -08:00 committed by Jason Evans
parent bc96876f99
commit 10aff3f3e1
6 changed files with 210 additions and 132 deletions

View File

@ -404,9 +404,8 @@ extern size_t const index2size_tab[NSIZES];
extern uint8_t const size2index_tab[]; extern uint8_t const size2index_tab[];
arena_t *a0get(void); arena_t *a0get(void);
void *a0malloc(size_t size); void *a0malloc(size_t size, bool zero);
void *a0calloc(size_t num, size_t size); void a0dalloc(void *ptr);
void a0free(void *ptr);
arena_t *arenas_extend(unsigned ind); arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind); arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void); unsigned narenas_total_get(void);

View File

@ -1,5 +1,4 @@
a0calloc a0dalloc
a0free
a0get a0get
a0malloc a0malloc
arena_get arena_get
@ -107,6 +106,9 @@ bitmap_set
bitmap_sfu bitmap_sfu
bitmap_size bitmap_size
bitmap_unset bitmap_unset
bootstrap_calloc
bootstrap_free
bootstrap_malloc
bt_init bt_init
buferror buferror
chunk_alloc_arena chunk_alloc_arena

View File

@ -48,7 +48,7 @@ typedef enum {
* void example_tsd_set(example_t *val) {...} * void example_tsd_set(example_t *val) {...}
* *
* Note that all of the functions deal in terms of (a_type *) rather than * Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike * (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast the * cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to * function argument to (a_type *), then dereference the resulting pointer to

View File

@ -484,14 +484,14 @@ ctl_arena_init(ctl_arena_stats_t *astats)
if (astats->lstats == NULL) { if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
sizeof(malloc_large_stats_t)); sizeof(malloc_large_stats_t), false);
if (astats->lstats == NULL) if (astats->lstats == NULL)
return (true); return (true);
} }
if (astats->hstats == NULL) { if (astats->hstats == NULL) {
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
sizeof(malloc_huge_stats_t)); sizeof(malloc_huge_stats_t), false);
if (astats->hstats == NULL) if (astats->hstats == NULL)
return (true); return (true);
} }
@ -627,7 +627,7 @@ ctl_grow(void)
/* Allocate extended arena stats. */ /* Allocate extended arena stats. */
astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t)); sizeof(ctl_arena_stats_t), false);
if (astats == NULL) if (astats == NULL)
return (true); return (true);
@ -636,7 +636,7 @@ ctl_grow(void)
sizeof(ctl_arena_stats_t)); sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
a0free(astats); a0dalloc(astats);
return (true); return (true);
} }
/* Swap merged stats to their new location. */ /* Swap merged stats to their new location. */
@ -649,7 +649,7 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats, memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t)); sizeof(ctl_arena_stats_t));
} }
a0free(ctl_stats.arenas); a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = astats; ctl_stats.arenas = astats;
ctl_stats.narenas++; ctl_stats.narenas++;
@ -723,7 +723,7 @@ ctl_init(void)
*/ */
ctl_stats.narenas = narenas_total_get(); ctl_stats.narenas = narenas_total_get();
ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t), false);
if (ctl_stats.arenas == NULL) { if (ctl_stats.arenas == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
@ -742,12 +742,12 @@ ctl_init(void)
if (ctl_arena_init(&ctl_stats.arenas[i])) { if (ctl_arena_init(&ctl_stats.arenas[i])) {
unsigned j; unsigned j;
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
a0free( a0dalloc(
ctl_stats.arenas[j].lstats); ctl_stats.arenas[j].lstats);
a0free( a0dalloc(
ctl_stats.arenas[j].hstats); ctl_stats.arenas[j].hstats);
} }
a0free(ctl_stats.arenas); a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = NULL; ctl_stats.arenas = NULL;
ret = true; ret = true;
goto label_return; goto label_return;

View File

@ -62,8 +62,13 @@ static unsigned narenas_total;
static arena_t *a0; /* arenas[0]; read-only after initialization. */ static arena_t *a0; /* arenas[0]; read-only after initialization. */
static unsigned narenas_auto; /* Read-only after initialization. */ static unsigned narenas_auto; /* Read-only after initialization. */
/* Set to true once the allocator has been initialized. */ typedef enum {
static bool malloc_initialized = false; malloc_init_uninitialized = 3,
malloc_init_a0_initialized = 2,
malloc_init_recursible = 1,
malloc_init_initialized = 0 /* Common case --> jnz. */
} malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
JEMALLOC_ALIGNED(CACHELINE) JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = { const size_t index2size_tab[NSIZES] = {
@ -218,6 +223,7 @@ typedef struct {
* definition. * definition.
*/ */
static bool malloc_init_hard_a0(void);
static bool malloc_init_hard(void); static bool malloc_init_hard(void);
/******************************************************************************/ /******************************************************************************/
@ -225,6 +231,13 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions. * Begin miscellaneous support functions.
*/ */
JEMALLOC_ALWAYS_INLINE_C bool
malloc_initialized(void)
{
return (malloc_init_state == malloc_init_initialized);
}
JEMALLOC_ALWAYS_INLINE_C void JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void) malloc_thread_init(void)
{ {
@ -242,11 +255,20 @@ malloc_thread_init(void)
quarantine_alloc_hook(); quarantine_alloc_hook();
} }
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init_a0(void)
{
if (unlikely(malloc_init_state == malloc_init_uninitialized))
return (malloc_init_hard_a0());
return (false);
}
JEMALLOC_ALWAYS_INLINE_C bool JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void) malloc_init(void)
{ {
if (unlikely(!malloc_initialized) && malloc_init_hard()) if (unlikely(!malloc_initialized()) && malloc_init_hard())
return (true); return (true);
malloc_thread_init(); malloc_thread_init();
@ -254,10 +276,8 @@ malloc_init(void)
} }
/* /*
* The a0*() functions are used instead of i[mcd]alloc() in bootstrap-sensitive * The a0*() functions are used instead of i[mcd]alloc() in situations that
* situations that cannot tolerate TLS variable access. These functions are * cannot tolerate TLS variable access.
* also exposed for use in static binaries on FreeBSD, hence the old-style
* malloc() API.
*/ */
arena_t * arena_t *
@ -269,16 +289,13 @@ a0get(void)
} }
static void * static void *
a0alloc(size_t size, bool zero) a0imalloc(size_t size, bool zero)
{ {
void *ret; void *ret;
if (unlikely(malloc_init())) if (unlikely(malloc_init_a0()))
return (NULL); return (NULL);
if (size == 0)
size = 1;
if (likely(size <= arena_maxclass)) if (likely(size <= arena_maxclass))
ret = arena_malloc(NULL, a0get(), size, zero, false); ret = arena_malloc(NULL, a0get(), size, zero, false);
else else
@ -287,28 +304,11 @@ a0alloc(size_t size, bool zero)
return (ret); return (ret);
} }
void * static void
a0malloc(size_t size) a0idalloc(void *ptr)
{
return (a0alloc(size, false));
}
void *
a0calloc(size_t num, size_t size)
{
return (a0alloc(num * size, true));
}
void
a0free(void *ptr)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
if (ptr == NULL)
return;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) if (likely(chunk != ptr))
arena_dalloc(NULL, chunk, ptr, false); arena_dalloc(NULL, chunk, ptr, false);
@ -316,6 +316,60 @@ a0free(void *ptr)
huge_dalloc(NULL, ptr, false); huge_dalloc(NULL, ptr, false);
} }
void *
a0malloc(size_t size, bool zero)
{
return (a0imalloc(size, zero));
}
void
a0dalloc(void *ptr)
{
a0idalloc(ptr);
}
/*
* FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
* situations that cannot tolerate TLS variable access (TLS allocation and very
* early internal data structure initialization).
*/
void *
bootstrap_malloc(size_t size)
{
if (unlikely(size == 0))
size = 1;
return (a0imalloc(size, false));
}
void *
bootstrap_calloc(size_t num, size_t size)
{
size_t num_size;
num_size = num * size;
if (unlikely(num_size == 0)) {
assert(num == 0 || size == 0);
num_size = 1;
}
return (a0imalloc(num_size, true));
}
void
bootstrap_free(void *ptr)
{
if (unlikely(ptr == NULL))
return;
a0idalloc(ptr);
}
/* Create a new arena and insert it into the arenas array at index ind. */ /* Create a new arena and insert it into the arenas array at index ind. */
static arena_t * static arena_t *
arena_init_locked(unsigned ind) arena_init_locked(unsigned ind)
@ -328,7 +382,7 @@ arena_init_locked(unsigned ind)
unsigned narenas_new = narenas_total + 1; unsigned narenas_new = narenas_total + 1;
arena_t **arenas_new = arena_t **arenas_new =
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
sizeof(arena_t *))); sizeof(arena_t *)), false);
if (arenas_new == NULL) if (arenas_new == NULL)
return (NULL); return (NULL);
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
@ -338,7 +392,7 @@ arena_init_locked(unsigned ind)
* base_alloc()). * base_alloc()).
*/ */
if (narenas_total != narenas_auto) if (narenas_total != narenas_auto)
a0free(arenas); a0dalloc(arenas);
arenas = arenas_new; arenas = arenas_new;
narenas_total = narenas_new; narenas_total = narenas_new;
} }
@ -449,7 +503,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
/* Deallocate old cache if it's too small. */ /* Deallocate old cache if it's too small. */
if (arenas_cache != NULL && narenas_cache < narenas_actual) { if (arenas_cache != NULL && narenas_cache < narenas_actual) {
a0free(arenas_cache); a0dalloc(arenas_cache);
arenas_cache = NULL; arenas_cache = NULL;
narenas_cache = 0; narenas_cache = 0;
tsd_arenas_cache_set(tsd, arenas_cache); tsd_arenas_cache_set(tsd, arenas_cache);
@ -465,7 +519,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
if (!*arenas_cache_bypassp) { if (!*arenas_cache_bypassp) {
*arenas_cache_bypassp = true; *arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache); narenas_cache, false);
*arenas_cache_bypassp = false; *arenas_cache_bypassp = false;
} else } else
arenas_cache = NULL; arenas_cache = NULL;
@ -602,7 +656,7 @@ arenas_cache_cleanup(tsd_t *tsd)
arenas_cache = tsd_arenas_cache_get(tsd); arenas_cache = tsd_arenas_cache_get(tsd);
if (arenas_cache != NULL) if (arenas_cache != NULL)
a0free(arenas_cache); a0dalloc(arenas_cache);
} }
void void
@ -1091,19 +1145,18 @@ malloc_conf_init(void)
} }
} }
/* init_lock must be held. */
static bool static bool
malloc_init_hard(void) malloc_init_hard_needed(void)
{ {
arena_t *init_arenas[1];
malloc_mutex_lock(&init_lock); if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
if (malloc_initialized || IS_INITIALIZER) { malloc_init_recursible)) {
/* /*
* Another thread initialized the allocator before this one * Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing * acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating. * thread, and it is recursively allocating.
*/ */
malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
#ifdef JEMALLOC_THREADED_INIT #ifdef JEMALLOC_THREADED_INIT
@ -1113,23 +1166,23 @@ malloc_init_hard(void)
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT; CPU_SPINWAIT;
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
} while (!malloc_initialized); } while (!malloc_initialized());
malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
#endif #endif
malloc_initializer = INITIALIZER; return (true);
}
if (malloc_tsd_boot0()) { /* init_lock must be held. */
malloc_mutex_unlock(&init_lock); static bool
return (true); malloc_init_hard_a0_locked(void)
} {
malloc_initializer = INITIALIZER;
if (config_prof) if (config_prof)
prof_boot0(); prof_boot0();
malloc_conf_init(); malloc_conf_init();
if (opt_stats_print) { if (opt_stats_print) {
/* Print statistics at exit. */ /* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) { if (atexit(stats_print_atexit) != 0) {
@ -1138,68 +1191,60 @@ malloc_init_hard(void)
abort(); abort();
} }
} }
if (base_boot())
if (base_boot()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} if (chunk_boot())
if (chunk_boot()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} if (ctl_boot())
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
}
if (config_prof) if (config_prof)
prof_boot1(); prof_boot1();
arena_boot(); arena_boot();
if (config_tcache && tcache_boot())
if (config_tcache && tcache_boot()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} if (huge_boot())
if (huge_boot()) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} if (malloc_mutex_init(&arenas_lock))
if (malloc_mutex_init(&arenas_lock)) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
}
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
*/ */
narenas_total = narenas_auto = 1; narenas_total = narenas_auto = 1;
arenas = init_arenas; arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto); memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/* /*
* Initialize one arena here. The rest are lazily created in * Initialize one arena here. The rest are lazily created in
* arena_choose_hard(). * arena_choose_hard().
*/ */
a0 = arena_init(0); if (arena_init(0) == NULL)
if (a0 == NULL) {
malloc_mutex_unlock(&init_lock);
return (true); return (true);
} malloc_init_state = malloc_init_a0_initialized;
return (false);
}
if (config_prof && prof_boot2()) { static bool
malloc_mutex_unlock(&init_lock); malloc_init_hard_a0(void)
return (true); {
} bool ret;
malloc_mutex_lock(&init_lock);
ret = malloc_init_hard_a0_locked();
malloc_mutex_unlock(&init_lock);
return (ret);
}
/*
* Initialize data structures which may trigger recursive allocation.
*
* init_lock must be held.
*/
static void
malloc_init_hard_recursible(void)
{
malloc_init_state = malloc_init_recursible;
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
/**********************************************************************/
/* Recursive allocation may follow. */
ncpus = malloc_ncpus(); ncpus = malloc_ncpus();
@ -1213,15 +1258,16 @@ malloc_init_hard(void)
abort(); abort();
} }
#endif #endif
/* Done recursively allocating. */
/**********************************************************************/
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
}
if (mutex_boot()) { /* init_lock must be held. */
malloc_mutex_unlock(&init_lock); static bool
malloc_init_hard_finish(void)
{
if (mutex_boot())
return (true); return (true);
}
if (opt_narenas == 0) { if (opt_narenas == 0) {
/* /*
@ -1248,22 +1294,53 @@ malloc_init_hard(void)
/* Allocate and initialize arenas. */ /* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL) { if (arenas == NULL)
malloc_mutex_unlock(&init_lock);
return (true); return (true);
}
/* /*
* Zero the array. In practice, this should always be pre-zeroed, * Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure. * since it was just mmap()ed, but let's be sure.
*/ */
memset(arenas, 0, sizeof(arena_t *) * narenas_total); memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */ /* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0]; arenas[0] = a0;
malloc_init_state = malloc_init_initialized;
return (false);
}
static bool
malloc_init_hard(void)
{
malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(&init_lock);
return (false);
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (malloc_tsd_boot0()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_init_hard_recursible();
if (malloc_init_hard_finish()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
malloc_initialized = true;
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
malloc_tsd_boot1(); malloc_tsd_boot1();
return (false); return (false);
} }
@ -1634,7 +1711,7 @@ ifree(tsd_t *tsd, void *ptr, bool try_tcache)
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof); usize = isalloc(ptr, config_prof);
@ -1655,7 +1732,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) if (config_prof && opt_prof)
prof_free(tsd, ptr, usize); prof_free(tsd, ptr, usize);
@ -1688,7 +1765,7 @@ je_realloc(void *ptr, size_t size)
} }
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
@ -2060,7 +2137,7 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL); assert(ptr != NULL);
assert(size != 0); assert(size != 0);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
@ -2200,7 +2277,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(ptr != NULL); assert(ptr != NULL);
assert(size != 0); assert(size != 0);
assert(SIZE_T_MAX - size >= extra); assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
tsd = tsd_fetch(); tsd = tsd_fetch();
@ -2234,7 +2311,7 @@ je_sallocx(const void *ptr, int flags)
{ {
size_t usize; size_t usize;
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
if (config_ivsalloc) if (config_ivsalloc)
@ -2254,7 +2331,7 @@ je_dallocx(void *ptr, int flags)
bool try_tcache; bool try_tcache;
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch(); tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
@ -2296,7 +2373,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
size_t usize; size_t usize;
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
usize = inallocx(size, flags); usize = inallocx(size, flags);
assert(usize == isalloc(ptr, config_prof)); assert(usize == isalloc(ptr, config_prof));
@ -2375,7 +2452,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{ {
size_t ret; size_t ret;
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init(); malloc_thread_init();
if (config_ivsalloc) if (config_ivsalloc)
@ -2427,10 +2504,10 @@ _malloc_prefork(void)
unsigned i; unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized) if (!malloc_initialized())
return; return;
#endif #endif
assert(malloc_initialized); assert(malloc_initialized());
/* Acquire all mutexes in a safe order. */ /* Acquire all mutexes in a safe order. */
ctl_prefork(); ctl_prefork();
@ -2456,10 +2533,10 @@ _malloc_postfork(void)
unsigned i; unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized) if (!malloc_initialized())
return; return;
#endif #endif
assert(malloc_initialized); assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
huge_postfork_parent(); huge_postfork_parent();
@ -2479,7 +2556,7 @@ jemalloc_postfork_child(void)
{ {
unsigned i; unsigned i;
assert(malloc_initialized); assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
huge_postfork_child(); huge_postfork_child();

View File

@ -15,14 +15,14 @@ void *
malloc_tsd_malloc(size_t size) malloc_tsd_malloc(size_t size)
{ {
return (a0malloc(CACHELINE_CEILING(size))); return (a0malloc(CACHELINE_CEILING(size), false));
} }
void void
malloc_tsd_dalloc(void *wrapper) malloc_tsd_dalloc(void *wrapper)
{ {
a0free(wrapper); a0dalloc(wrapper);
} }
void void