Refactor arenas array (fixes deadlock).
Refactor the arenas array, which contains pointers to all extant arenas, such that it starts out as a sparse array of maximum size, and use double-checked atomics-based reads as the basis for fast and simple arena_get(). Additionally, reduce arenas_lock's role such that it only protects against arena initalization races. These changes remove the possibility for arena lookups to trigger locking, which resolves at least one known (fork-related) deadlock. This resolves #315.
This commit is contained in:
parent
3812729167
commit
767d85061a
@ -290,14 +290,14 @@ struct arena_s {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of threads currently assigned to this arena. This field is
|
* Number of threads currently assigned to this arena. This field is
|
||||||
* protected by arenas_lock.
|
* synchronized via atomic operations.
|
||||||
*/
|
*/
|
||||||
unsigned nthreads;
|
unsigned nthreads;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are three classes of arena operations from a locking
|
* There are three classes of arena operations from a locking
|
||||||
* perspective:
|
* perspective:
|
||||||
* 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
|
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
|
||||||
* 2) Bin-related operations are protected by bin locks.
|
* 2) Bin-related operations are protected by bin locks.
|
||||||
* 3) Chunk- and run-related operations are protected by this mutex.
|
* 3) Chunk- and run-related operations are protected by this mutex.
|
||||||
*/
|
*/
|
||||||
@ -465,7 +465,6 @@ struct arena_s {
|
|||||||
|
|
||||||
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
||||||
struct arena_tdata_s {
|
struct arena_tdata_s {
|
||||||
arena_t *arena;
|
|
||||||
ticker_t decay_ticker;
|
ticker_t decay_ticker;
|
||||||
};
|
};
|
||||||
#endif /* JEMALLOC_ARENA_STRUCTS_B */
|
#endif /* JEMALLOC_ARENA_STRUCTS_B */
|
||||||
@ -578,6 +577,9 @@ void arena_stats_merge(arena_t *arena, const char **dss,
|
|||||||
ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
|
ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
|
||||||
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||||
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
|
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
|
||||||
|
unsigned arena_nthreads_get(arena_t *arena);
|
||||||
|
void arena_nthreads_inc(arena_t *arena);
|
||||||
|
void arena_nthreads_dec(arena_t *arena);
|
||||||
arena_t *arena_new(unsigned ind);
|
arena_t *arena_new(unsigned ind);
|
||||||
bool arena_boot(void);
|
bool arena_boot(void);
|
||||||
void arena_prefork(arena_t *arena);
|
void arena_prefork(arena_t *arena);
|
||||||
|
@ -28,8 +28,8 @@
|
|||||||
* callers.
|
* callers.
|
||||||
*
|
*
|
||||||
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
||||||
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
|
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
|
||||||
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
|
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
|
||||||
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
||||||
* {
|
* {
|
||||||
* if (*p != c)
|
* if (*p != c)
|
||||||
|
@ -438,7 +438,13 @@ extern unsigned opt_narenas;
|
|||||||
extern bool in_valgrind;
|
extern bool in_valgrind;
|
||||||
|
|
||||||
/* Number of CPUs. */
|
/* Number of CPUs. */
|
||||||
extern unsigned ncpus;
|
extern unsigned ncpus;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Arenas that are used to service external requests. Not all elements of the
|
||||||
|
* arenas array are necessarily used; arenas are created lazily as needed.
|
||||||
|
*/
|
||||||
|
extern arena_t **arenas;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* index2size_tab encodes the same information as could be computed (at
|
* index2size_tab encodes the same information as could be computed (at
|
||||||
@ -452,21 +458,17 @@ extern size_t const index2size_tab[NSIZES+1];
|
|||||||
*/
|
*/
|
||||||
extern uint8_t const size2index_tab[];
|
extern uint8_t const size2index_tab[];
|
||||||
|
|
||||||
arena_t *a0get(void);
|
|
||||||
void *a0malloc(size_t size);
|
void *a0malloc(size_t size);
|
||||||
void a0dalloc(void *ptr);
|
void a0dalloc(void *ptr);
|
||||||
void *bootstrap_malloc(size_t size);
|
void *bootstrap_malloc(size_t size);
|
||||||
void *bootstrap_calloc(size_t num, size_t size);
|
void *bootstrap_calloc(size_t num, size_t size);
|
||||||
void bootstrap_free(void *ptr);
|
void bootstrap_free(void *ptr);
|
||||||
arena_t *arenas_extend(unsigned ind);
|
arena_t *arenas_extend(unsigned ind);
|
||||||
arena_t *arena_init(unsigned ind);
|
|
||||||
unsigned narenas_total_get(void);
|
unsigned narenas_total_get(void);
|
||||||
|
arena_t *arena_init(unsigned ind);
|
||||||
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
||||||
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
|
||||||
arena_tdata_t *tdata);
|
|
||||||
arena_t *arena_choose_hard(tsd_t *tsd);
|
arena_t *arena_choose_hard(tsd_t *tsd);
|
||||||
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
||||||
unsigned arena_nbound(unsigned ind);
|
|
||||||
void thread_allocated_cleanup(tsd_t *tsd);
|
void thread_allocated_cleanup(tsd_t *tsd);
|
||||||
void thread_deallocated_cleanup(tsd_t *tsd);
|
void thread_deallocated_cleanup(tsd_t *tsd);
|
||||||
void arena_cleanup(tsd_t *tsd);
|
void arena_cleanup(tsd_t *tsd);
|
||||||
@ -543,8 +545,7 @@ size_t sa2u(size_t size, size_t alignment);
|
|||||||
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
||||||
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
||||||
bool refresh_if_missing);
|
bool refresh_if_missing);
|
||||||
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
arena_t *arena_get(unsigned ind, bool init_if_missing);
|
||||||
bool refresh_if_missing);
|
|
||||||
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -819,19 +820,19 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_t *
|
JEMALLOC_INLINE arena_t *
|
||||||
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
arena_get(unsigned ind, bool init_if_missing)
|
||||||
bool refresh_if_missing)
|
|
||||||
{
|
{
|
||||||
arena_tdata_t *tdata;
|
arena_t *ret;
|
||||||
|
|
||||||
/* init_if_missing requires refresh_if_missing. */
|
assert(ind <= MALLOCX_ARENA_MAX);
|
||||||
assert(!init_if_missing || refresh_if_missing);
|
|
||||||
|
|
||||||
tdata = arena_tdata_get(tsd, ind, refresh_if_missing);
|
ret = arenas[ind];
|
||||||
if (unlikely(tdata == NULL || tdata->arena == NULL))
|
if (unlikely(ret == NULL)) {
|
||||||
return (arena_get_hard(tsd, ind, init_if_missing, tdata));
|
ret = atomic_read_p((void *)&arenas[ind]);
|
||||||
|
if (init_if_missing && unlikely(ret == NULL))
|
||||||
return (tdata->arena);
|
ret = arena_init(ind);
|
||||||
|
}
|
||||||
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE ticker_t *
|
JEMALLOC_INLINE ticker_t *
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
a0dalloc
|
a0dalloc
|
||||||
a0get
|
|
||||||
a0malloc
|
a0malloc
|
||||||
arena_aalloc
|
arena_aalloc
|
||||||
arena_alloc_junk_small
|
arena_alloc_junk_small
|
||||||
@ -34,7 +33,6 @@ arena_decay_ticks
|
|||||||
arena_dss_prec_get
|
arena_dss_prec_get
|
||||||
arena_dss_prec_set
|
arena_dss_prec_set
|
||||||
arena_get
|
arena_get
|
||||||
arena_get_hard
|
|
||||||
arena_init
|
arena_init
|
||||||
arena_lg_dirty_mult_default_get
|
arena_lg_dirty_mult_default_get
|
||||||
arena_lg_dirty_mult_default_set
|
arena_lg_dirty_mult_default_set
|
||||||
@ -73,10 +71,12 @@ arena_migrate
|
|||||||
arena_miscelm_get
|
arena_miscelm_get
|
||||||
arena_miscelm_to_pageind
|
arena_miscelm_to_pageind
|
||||||
arena_miscelm_to_rpages
|
arena_miscelm_to_rpages
|
||||||
arena_nbound
|
|
||||||
arena_new
|
arena_new
|
||||||
arena_node_alloc
|
arena_node_alloc
|
||||||
arena_node_dalloc
|
arena_node_dalloc
|
||||||
|
arena_nthreads_dec
|
||||||
|
arena_nthreads_get
|
||||||
|
arena_nthreads_inc
|
||||||
arena_palloc
|
arena_palloc
|
||||||
arena_postfork_child
|
arena_postfork_child
|
||||||
arena_postfork_parent
|
arena_postfork_parent
|
||||||
@ -106,6 +106,7 @@ arena_stats_merge
|
|||||||
arena_tcache_fill_small
|
arena_tcache_fill_small
|
||||||
arena_tdata_get
|
arena_tdata_get
|
||||||
arena_tdata_get_hard
|
arena_tdata_get_hard
|
||||||
|
arenas
|
||||||
atomic_add_p
|
atomic_add_p
|
||||||
atomic_add_u
|
atomic_add_u
|
||||||
atomic_add_uint32
|
atomic_add_uint32
|
||||||
|
21
src/arena.c
21
src/arena.c
@ -3261,6 +3261,27 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned
|
||||||
|
arena_nthreads_get(arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_read_u(&arena->nthreads));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_nthreads_inc(arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_add_u(&arena->nthreads, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_nthreads_dec(arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_sub_u(&arena->nthreads, 1);
|
||||||
|
}
|
||||||
|
|
||||||
arena_t *
|
arena_t *
|
||||||
arena_new(unsigned ind)
|
arena_new(unsigned ind)
|
||||||
{
|
{
|
||||||
|
@ -415,9 +415,7 @@ chunk_arena_get(unsigned arena_ind)
|
|||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
|
arena = arena_get(arena_ind, false);
|
||||||
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
|
|
||||||
false, true);
|
|
||||||
/*
|
/*
|
||||||
* The arena we're allocating on behalf of must have been initialized
|
* The arena we're allocating on behalf of must have been initialized
|
||||||
* already.
|
* already.
|
||||||
|
43
src/ctl.c
43
src/ctl.c
@ -694,9 +694,7 @@ ctl_grow(void)
|
|||||||
static void
|
static void
|
||||||
ctl_refresh(void)
|
ctl_refresh(void)
|
||||||
{
|
{
|
||||||
tsd_t *tsd;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
bool refreshed;
|
|
||||||
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -706,19 +704,14 @@ ctl_refresh(void)
|
|||||||
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
|
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
|
||||||
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
|
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
for (i = 0; i < ctl_stats.narenas; i++)
|
||||||
for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
|
tarenas[i] = arena_get(i, false);
|
||||||
tarenas[i] = arena_get(tsd, i, false, false);
|
|
||||||
if (tarenas[i] == NULL && !refreshed) {
|
|
||||||
tarenas[i] = arena_get(tsd, i, false, true);
|
|
||||||
refreshed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ctl_stats.narenas; i++) {
|
for (i = 0; i < ctl_stats.narenas; i++) {
|
||||||
if (tarenas[i] != NULL)
|
if (tarenas[i] != NULL) {
|
||||||
ctl_stats.arenas[i].nthreads = arena_nbound(i);
|
ctl_stats.arenas[i].nthreads =
|
||||||
else
|
arena_nthreads_get(arena_get(i, false));
|
||||||
|
} else
|
||||||
ctl_stats.arenas[i].nthreads = 0;
|
ctl_stats.arenas[i].nthreads = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1332,7 +1325,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize arena if necessary. */
|
/* Initialize arena if necessary. */
|
||||||
newarena = arena_get(tsd, newind, true, true);
|
newarena = arena_get(newind, true);
|
||||||
if (newarena == NULL) {
|
if (newarena == NULL) {
|
||||||
ret = EAGAIN;
|
ret = EAGAIN;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
@ -1560,22 +1553,14 @@ arena_i_purge(unsigned arena_ind, bool all)
|
|||||||
|
|
||||||
malloc_mutex_lock(&ctl_mtx);
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
{
|
{
|
||||||
tsd_t *tsd = tsd_fetch();
|
|
||||||
unsigned narenas = ctl_stats.narenas;
|
unsigned narenas = ctl_stats.narenas;
|
||||||
|
|
||||||
if (arena_ind == narenas) {
|
if (arena_ind == narenas) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
bool refreshed;
|
|
||||||
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
|
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
|
||||||
|
|
||||||
for (i = 0, refreshed = false; i < narenas; i++) {
|
for (i = 0; i < narenas; i++)
|
||||||
tarenas[i] = arena_get(tsd, i, false, false);
|
tarenas[i] = arena_get(i, false);
|
||||||
if (tarenas[i] == NULL && !refreshed) {
|
|
||||||
tarenas[i] = arena_get(tsd, i, false,
|
|
||||||
true);
|
|
||||||
refreshed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No further need to hold ctl_mtx, since narenas and
|
* No further need to hold ctl_mtx, since narenas and
|
||||||
@ -1592,7 +1577,7 @@ arena_i_purge(unsigned arena_ind, bool all)
|
|||||||
|
|
||||||
assert(arena_ind < narenas);
|
assert(arena_ind < narenas);
|
||||||
|
|
||||||
tarena = arena_get(tsd, arena_ind, false, true);
|
tarena = arena_get(arena_ind, false);
|
||||||
|
|
||||||
/* No further need to hold ctl_mtx. */
|
/* No further need to hold ctl_mtx. */
|
||||||
malloc_mutex_unlock(&ctl_mtx);
|
malloc_mutex_unlock(&ctl_mtx);
|
||||||
@ -1664,7 +1649,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (arena_ind < ctl_stats.narenas) {
|
if (arena_ind < ctl_stats.narenas) {
|
||||||
arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
|
arena_t *arena = arena_get(arena_ind, false);
|
||||||
if (arena == NULL || (dss_prec != dss_prec_limit &&
|
if (arena == NULL || (dss_prec != dss_prec_limit &&
|
||||||
arena_dss_prec_set(arena, dss_prec))) {
|
arena_dss_prec_set(arena, dss_prec))) {
|
||||||
ret = EFAULT;
|
ret = EFAULT;
|
||||||
@ -1697,7 +1682,7 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
|
|||||||
unsigned arena_ind = (unsigned)mib[1];
|
unsigned arena_ind = (unsigned)mib[1];
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = arena_get(tsd_fetch(), arena_ind, false, true);
|
arena = arena_get(arena_ind, false);
|
||||||
if (arena == NULL) {
|
if (arena == NULL) {
|
||||||
ret = EFAULT;
|
ret = EFAULT;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
@ -1731,7 +1716,7 @@ arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
|
|||||||
unsigned arena_ind = (unsigned)mib[1];
|
unsigned arena_ind = (unsigned)mib[1];
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = arena_get(tsd_fetch(), arena_ind, false, true);
|
arena = arena_get(arena_ind, false);
|
||||||
if (arena == NULL) {
|
if (arena == NULL) {
|
||||||
ret = EFAULT;
|
ret = EFAULT;
|
||||||
goto label_return;
|
goto label_return;
|
||||||
@ -1767,7 +1752,7 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
|
|||||||
|
|
||||||
malloc_mutex_lock(&ctl_mtx);
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
if (arena_ind < narenas_total_get() && (arena =
|
if (arena_ind < narenas_total_get() && (arena =
|
||||||
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
|
arena_get(arena_ind, false)) != NULL) {
|
||||||
if (newp != NULL) {
|
if (newp != NULL) {
|
||||||
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
|
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
|
||||||
WRITE(new_chunk_hooks, chunk_hooks_t);
|
WRITE(new_chunk_hooks, chunk_hooks_t);
|
||||||
|
247
src/jemalloc.c
247
src/jemalloc.c
@ -47,7 +47,7 @@ bool in_valgrind;
|
|||||||
|
|
||||||
unsigned ncpus;
|
unsigned ncpus;
|
||||||
|
|
||||||
/* Protects arenas initialization (arenas, narenas_total). */
|
/* Protects arenas initialization. */
|
||||||
static malloc_mutex_t arenas_lock;
|
static malloc_mutex_t arenas_lock;
|
||||||
/*
|
/*
|
||||||
* Arenas that are used to service external requests. Not all elements of the
|
* Arenas that are used to service external requests. Not all elements of the
|
||||||
@ -57,8 +57,8 @@ static malloc_mutex_t arenas_lock;
|
|||||||
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
||||||
* takes some action to create them and allocate from them.
|
* takes some action to create them and allocate from them.
|
||||||
*/
|
*/
|
||||||
static arena_t **arenas;
|
arena_t **arenas;
|
||||||
static unsigned narenas_total;
|
static unsigned narenas_total; /* Use narenas_total_*(). */
|
||||||
static arena_t *a0; /* arenas[0]; read-only after initialization. */
|
static arena_t *a0; /* arenas[0]; read-only after initialization. */
|
||||||
static unsigned narenas_auto; /* Read-only after initialization. */
|
static unsigned narenas_auto; /* Read-only after initialization. */
|
||||||
|
|
||||||
@ -311,14 +311,6 @@ malloc_init(void)
|
|||||||
* cannot tolerate TLS variable access.
|
* cannot tolerate TLS variable access.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
arena_t *
|
|
||||||
a0get(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(a0 != NULL);
|
|
||||||
return (a0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
a0ialloc(size_t size, bool zero, bool is_metadata)
|
a0ialloc(size_t size, bool zero, bool is_metadata)
|
||||||
{
|
{
|
||||||
@ -327,7 +319,7 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
return (iallocztm(NULL, size, size2index(size), zero, false,
|
return (iallocztm(NULL, size, size2index(size), zero, false,
|
||||||
is_metadata, a0get(), true));
|
is_metadata, arena_get(0, false), true));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -391,47 +383,59 @@ bootstrap_free(void *ptr)
|
|||||||
a0idalloc(ptr, false);
|
a0idalloc(ptr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_set(unsigned ind, arena_t *arena)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_write_p((void **)&arenas[ind], arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
narenas_total_set(unsigned narenas)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_write_u(&narenas_total, narenas);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
narenas_total_inc(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_add_u(&narenas_total, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned
|
||||||
|
narenas_total_get(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_read_u(&narenas_total));
|
||||||
|
}
|
||||||
|
|
||||||
/* Create a new arena and insert it into the arenas array at index ind. */
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
||||||
static arena_t *
|
static arena_t *
|
||||||
arena_init_locked(unsigned ind)
|
arena_init_locked(unsigned ind)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
/* Expand arenas if necessary. */
|
assert(ind <= narenas_total_get());
|
||||||
assert(ind <= narenas_total);
|
|
||||||
if (ind > MALLOCX_ARENA_MAX)
|
if (ind > MALLOCX_ARENA_MAX)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (ind == narenas_total) {
|
if (ind == narenas_total_get())
|
||||||
unsigned narenas_new = narenas_total + 1;
|
narenas_total_inc();
|
||||||
arena_t **arenas_new =
|
|
||||||
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
|
|
||||||
sizeof(arena_t *)));
|
|
||||||
if (arenas_new == NULL)
|
|
||||||
return (NULL);
|
|
||||||
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
|
|
||||||
arenas_new[ind] = NULL;
|
|
||||||
/*
|
|
||||||
* Deallocate only if arenas came from a0malloc() (not
|
|
||||||
* base_alloc()).
|
|
||||||
*/
|
|
||||||
if (narenas_total != narenas_auto)
|
|
||||||
a0dalloc(arenas);
|
|
||||||
arenas = arenas_new;
|
|
||||||
narenas_total = narenas_new;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Another thread may have already initialized arenas[ind] if it's an
|
* Another thread may have already initialized arenas[ind] if it's an
|
||||||
* auto arena.
|
* auto arena.
|
||||||
*/
|
*/
|
||||||
arena = arenas[ind];
|
arena = arena_get(ind, false);
|
||||||
if (arena != NULL) {
|
if (arena != NULL) {
|
||||||
assert(ind < narenas_auto);
|
assert(ind < narenas_auto);
|
||||||
return (arena);
|
return (arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Actually initialize the arena. */
|
/* Actually initialize the arena. */
|
||||||
arena = arenas[ind] = arena_new(ind);
|
arena = arena_new(ind);
|
||||||
|
arena_set(ind, arena);
|
||||||
return (arena);
|
return (arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -446,37 +450,16 @@ arena_init(unsigned ind)
|
|||||||
return (arena);
|
return (arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned
|
|
||||||
narenas_total_get(void)
|
|
||||||
{
|
|
||||||
unsigned narenas;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
|
||||||
narenas = narenas_total;
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
|
|
||||||
return (narenas);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_bind_locked(tsd_t *tsd, unsigned ind)
|
|
||||||
{
|
|
||||||
arena_t *arena;
|
|
||||||
|
|
||||||
arena = arenas[ind];
|
|
||||||
arena->nthreads++;
|
|
||||||
|
|
||||||
if (tsd_nominal(tsd))
|
|
||||||
tsd_arena_set(tsd, arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_bind(tsd_t *tsd, unsigned ind)
|
arena_bind(tsd_t *tsd, unsigned ind)
|
||||||
{
|
{
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
arena = arena_get(ind, false);
|
||||||
arena_bind_locked(tsd, ind);
|
arena_nthreads_inc(arena);
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
|
if (tsd_nominal(tsd))
|
||||||
|
tsd_arena_set(tsd, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -484,35 +467,20 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
|
|||||||
{
|
{
|
||||||
arena_t *oldarena, *newarena;
|
arena_t *oldarena, *newarena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
oldarena = arena_get(oldind, false);
|
||||||
oldarena = arenas[oldind];
|
newarena = arena_get(newind, false);
|
||||||
newarena = arenas[newind];
|
arena_nthreads_dec(oldarena);
|
||||||
oldarena->nthreads--;
|
arena_nthreads_inc(newarena);
|
||||||
newarena->nthreads++;
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
tsd_arena_set(tsd, newarena);
|
tsd_arena_set(tsd, newarena);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned
|
|
||||||
arena_nbound(unsigned ind)
|
|
||||||
{
|
|
||||||
unsigned nthreads;
|
|
||||||
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
|
||||||
nthreads = arenas[ind]->nthreads;
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
return (nthreads);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_unbind(tsd_t *tsd, unsigned ind)
|
arena_unbind(tsd_t *tsd, unsigned ind)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
arena = arena_get(ind, false);
|
||||||
arena = arenas[ind];
|
arena_nthreads_dec(arena);
|
||||||
arena->nthreads--;
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
tsd_arena_set(tsd, NULL);
|
tsd_arena_set(tsd, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -568,14 +536,6 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
|
|||||||
* the arenas.extend mallctl, which we trust mallctl synchronization to
|
* the arenas.extend mallctl, which we trust mallctl synchronization to
|
||||||
* prevent.
|
* prevent.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_lock(&arenas_lock);
|
|
||||||
for (i = 0; i < narenas_actual; i++)
|
|
||||||
arenas_tdata[i].arena = arenas[i];
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
if (narenas_tdata > narenas_actual) {
|
|
||||||
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
|
|
||||||
* (narenas_tdata - narenas_actual));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Copy/initialize tickers. */
|
/* Copy/initialize tickers. */
|
||||||
for (i = 0; i < narenas_actual; i++) {
|
for (i = 0; i < narenas_actual; i++) {
|
||||||
@ -587,6 +547,10 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
|
|||||||
DECAY_NTICKS_PER_UPDATE);
|
DECAY_NTICKS_PER_UPDATE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (narenas_tdata > narenas_actual) {
|
||||||
|
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
|
||||||
|
* (narenas_tdata - narenas_actual));
|
||||||
|
}
|
||||||
|
|
||||||
/* Read the refreshed tdata array. */
|
/* Read the refreshed tdata array. */
|
||||||
tdata = &arenas_tdata[ind];
|
tdata = &arenas_tdata[ind];
|
||||||
@ -596,33 +560,6 @@ label_return:
|
|||||||
return (tdata);
|
return (tdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_t *
|
|
||||||
arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
|
||||||
arena_tdata_t *tdata)
|
|
||||||
{
|
|
||||||
arena_t *arena;
|
|
||||||
unsigned narenas_actual;
|
|
||||||
|
|
||||||
if (init_if_missing && tdata != NULL) {
|
|
||||||
tdata->arena = arena_init(ind);
|
|
||||||
if (tdata->arena != NULL)
|
|
||||||
return (tdata->arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function must always tell the truth, even if it's slow, so don't
|
|
||||||
* let OOM, thread cleanup (note tsd_nominal check), nor recursive
|
|
||||||
* allocation avoidance (note arenas_tdata_bypass check) get in the way.
|
|
||||||
*/
|
|
||||||
narenas_actual = narenas_total_get();
|
|
||||||
if (ind >= narenas_actual)
|
|
||||||
return (NULL);
|
|
||||||
malloc_mutex_lock(&arenas_lock);
|
|
||||||
arena = arenas[ind];
|
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
|
||||||
return (arena);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Slow path, called only by arena_choose(). */
|
/* Slow path, called only by arena_choose(). */
|
||||||
arena_t *
|
arena_t *
|
||||||
arena_choose_hard(tsd_t *tsd)
|
arena_choose_hard(tsd_t *tsd)
|
||||||
@ -635,15 +572,16 @@ arena_choose_hard(tsd_t *tsd)
|
|||||||
choose = 0;
|
choose = 0;
|
||||||
first_null = narenas_auto;
|
first_null = narenas_auto;
|
||||||
malloc_mutex_lock(&arenas_lock);
|
malloc_mutex_lock(&arenas_lock);
|
||||||
assert(a0get() != NULL);
|
assert(arena_get(0, false) != NULL);
|
||||||
for (i = 1; i < narenas_auto; i++) {
|
for (i = 1; i < narenas_auto; i++) {
|
||||||
if (arenas[i] != NULL) {
|
if (arena_get(i, false) != NULL) {
|
||||||
/*
|
/*
|
||||||
* Choose the first arena that has the lowest
|
* Choose the first arena that has the lowest
|
||||||
* number of threads assigned to it.
|
* number of threads assigned to it.
|
||||||
*/
|
*/
|
||||||
if (arenas[i]->nthreads <
|
if (arena_nthreads_get(arena_get(i, false)) <
|
||||||
arenas[choose]->nthreads)
|
arena_nthreads_get(arena_get(choose,
|
||||||
|
false)))
|
||||||
choose = i;
|
choose = i;
|
||||||
} else if (first_null == narenas_auto) {
|
} else if (first_null == narenas_auto) {
|
||||||
/*
|
/*
|
||||||
@ -659,13 +597,13 @@ arena_choose_hard(tsd_t *tsd)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (arenas[choose]->nthreads == 0
|
if (arena_nthreads_get(arena_get(choose, false)) == 0
|
||||||
|| first_null == narenas_auto) {
|
|| first_null == narenas_auto) {
|
||||||
/*
|
/*
|
||||||
* Use an unloaded arena, or the least loaded arena if
|
* Use an unloaded arena, or the least loaded arena if
|
||||||
* all arenas are already initialized.
|
* all arenas are already initialized.
|
||||||
*/
|
*/
|
||||||
ret = arenas[choose];
|
ret = arena_get(choose, false);
|
||||||
} else {
|
} else {
|
||||||
/* Initialize a new arena. */
|
/* Initialize a new arena. */
|
||||||
choose = first_null;
|
choose = first_null;
|
||||||
@ -675,10 +613,10 @@ arena_choose_hard(tsd_t *tsd)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
arena_bind_locked(tsd, choose);
|
arena_bind(tsd, choose);
|
||||||
malloc_mutex_unlock(&arenas_lock);
|
malloc_mutex_unlock(&arenas_lock);
|
||||||
} else {
|
} else {
|
||||||
ret = a0get();
|
ret = arena_get(0, false);
|
||||||
arena_bind(tsd, 0);
|
arena_bind(tsd, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,7 +688,7 @@ stats_print_atexit(void)
|
|||||||
* continue to allocate.
|
* continue to allocate.
|
||||||
*/
|
*/
|
||||||
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
||||||
arena_t *arena = arenas[i];
|
arena_t *arena = arena_get(i, false);
|
||||||
if (arena != NULL) {
|
if (arena != NULL) {
|
||||||
tcache_t *tcache;
|
tcache_t *tcache;
|
||||||
|
|
||||||
@ -1309,7 +1247,8 @@ malloc_init_hard_a0_locked(void)
|
|||||||
* Create enough scaffolding to allow recursive allocation in
|
* Create enough scaffolding to allow recursive allocation in
|
||||||
* malloc_ncpus().
|
* malloc_ncpus().
|
||||||
*/
|
*/
|
||||||
narenas_total = narenas_auto = 1;
|
narenas_auto = 1;
|
||||||
|
narenas_total_set(narenas_auto);
|
||||||
arenas = &a0;
|
arenas = &a0;
|
||||||
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
||||||
/*
|
/*
|
||||||
@ -1391,28 +1330,22 @@ malloc_init_hard_finish(void)
|
|||||||
}
|
}
|
||||||
narenas_auto = opt_narenas;
|
narenas_auto = opt_narenas;
|
||||||
/*
|
/*
|
||||||
* Make sure that the arenas array can be allocated. In practice, this
|
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
|
||||||
* limit is enough to allow the allocator to function, but the ctl
|
|
||||||
* machinery will fail to allocate memory at far lower limits.
|
|
||||||
*/
|
*/
|
||||||
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
if (narenas_auto > MALLOCX_ARENA_MAX) {
|
||||||
narenas_auto = (unsigned)(chunksize / sizeof(arena_t *));
|
narenas_auto = MALLOCX_ARENA_MAX;
|
||||||
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
||||||
narenas_auto);
|
narenas_auto);
|
||||||
}
|
}
|
||||||
narenas_total = narenas_auto;
|
narenas_total_set(narenas_auto);
|
||||||
|
|
||||||
/* Allocate and initialize arenas. */
|
/* Allocate and initialize arenas. */
|
||||||
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
|
arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
|
||||||
|
(MALLOCX_ARENA_MAX+1));
|
||||||
if (arenas == NULL)
|
if (arenas == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
/*
|
|
||||||
* Zero the array. In practice, this should always be pre-zeroed,
|
|
||||||
* since it was just mmap()ed, but let's be sure.
|
|
||||||
*/
|
|
||||||
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
|
|
||||||
/* Copy the pointer to the one arena that was already initialized. */
|
/* Copy the pointer to the one arena that was already initialized. */
|
||||||
arenas[0] = a0;
|
arena_set(0, a0);
|
||||||
|
|
||||||
malloc_init_state = malloc_init_initialized;
|
malloc_init_state = malloc_init_initialized;
|
||||||
malloc_slow_flag_init();
|
malloc_slow_flag_init();
|
||||||
@ -2084,7 +2017,7 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
|||||||
*tcache = tcache_get(tsd, true);
|
*tcache = tcache_get(tsd, true);
|
||||||
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
||||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||||
*arena = arena_get(tsd, arena_ind, true, true);
|
*arena = arena_get(arena_ind, true);
|
||||||
if (unlikely(*arena == NULL))
|
if (unlikely(*arena == NULL))
|
||||||
return (true);
|
return (true);
|
||||||
} else
|
} else
|
||||||
@ -2325,7 +2258,7 @@ je_rallocx(void *ptr, size_t size, int flags)
|
|||||||
|
|
||||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||||
arena = arena_get(tsd, arena_ind, true, true);
|
arena = arena_get(arena_ind, true);
|
||||||
if (unlikely(arena == NULL))
|
if (unlikely(arena == NULL))
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
} else
|
} else
|
||||||
@ -2677,7 +2610,7 @@ JEMALLOC_EXPORT void
|
|||||||
_malloc_prefork(void)
|
_malloc_prefork(void)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i, narenas;
|
||||||
|
|
||||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
if (!malloc_initialized())
|
if (!malloc_initialized())
|
||||||
@ -2689,9 +2622,11 @@ _malloc_prefork(void)
|
|||||||
ctl_prefork();
|
ctl_prefork();
|
||||||
prof_prefork();
|
prof_prefork();
|
||||||
malloc_mutex_prefork(&arenas_lock);
|
malloc_mutex_prefork(&arenas_lock);
|
||||||
for (i = 0; i < narenas_total; i++) {
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
||||||
if (arenas[i] != NULL)
|
arena_t *arena;
|
||||||
arena_prefork(arenas[i]);
|
|
||||||
|
if ((arena = arena_get(i, false)) != NULL)
|
||||||
|
arena_prefork(arena);
|
||||||
}
|
}
|
||||||
chunk_prefork();
|
chunk_prefork();
|
||||||
base_prefork();
|
base_prefork();
|
||||||
@ -2705,7 +2640,7 @@ JEMALLOC_EXPORT void
|
|||||||
_malloc_postfork(void)
|
_malloc_postfork(void)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i, narenas;
|
||||||
|
|
||||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
if (!malloc_initialized())
|
if (!malloc_initialized())
|
||||||
@ -2716,9 +2651,11 @@ _malloc_postfork(void)
|
|||||||
/* Release all mutexes, now that fork() has completed. */
|
/* Release all mutexes, now that fork() has completed. */
|
||||||
base_postfork_parent();
|
base_postfork_parent();
|
||||||
chunk_postfork_parent();
|
chunk_postfork_parent();
|
||||||
for (i = 0; i < narenas_total; i++) {
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
||||||
if (arenas[i] != NULL)
|
arena_t *arena;
|
||||||
arena_postfork_parent(arenas[i]);
|
|
||||||
|
if ((arena = arena_get(i, false)) != NULL)
|
||||||
|
arena_postfork_parent(arena);
|
||||||
}
|
}
|
||||||
malloc_mutex_postfork_parent(&arenas_lock);
|
malloc_mutex_postfork_parent(&arenas_lock);
|
||||||
prof_postfork_parent();
|
prof_postfork_parent();
|
||||||
@ -2728,16 +2665,18 @@ _malloc_postfork(void)
|
|||||||
void
|
void
|
||||||
jemalloc_postfork_child(void)
|
jemalloc_postfork_child(void)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i, narenas;
|
||||||
|
|
||||||
assert(malloc_initialized());
|
assert(malloc_initialized());
|
||||||
|
|
||||||
/* Release all mutexes, now that fork() has completed. */
|
/* Release all mutexes, now that fork() has completed. */
|
||||||
base_postfork_child();
|
base_postfork_child();
|
||||||
chunk_postfork_child();
|
chunk_postfork_child();
|
||||||
for (i = 0; i < narenas_total; i++) {
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
||||||
if (arenas[i] != NULL)
|
arena_t *arena;
|
||||||
arena_postfork_child(arenas[i]);
|
|
||||||
|
if ((arena = arena_get(i, false)) != NULL)
|
||||||
|
arena_postfork_child(arena);
|
||||||
}
|
}
|
||||||
malloc_mutex_postfork_child(&arenas_lock);
|
malloc_mutex_postfork_child(&arenas_lock);
|
||||||
prof_postfork_child();
|
prof_postfork_child();
|
||||||
|
@ -325,7 +325,8 @@ tcache_create(tsd_t *tsd, arena_t *arena)
|
|||||||
/* Avoid false cacheline sharing. */
|
/* Avoid false cacheline sharing. */
|
||||||
size = sa2u(size, CACHELINE);
|
size = sa2u(size, CACHELINE);
|
||||||
|
|
||||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
|
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
|
||||||
|
arena_get(0, false));
|
||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
@ -453,7 +454,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
|||||||
|
|
||||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||||
return (true);
|
return (true);
|
||||||
tcache = tcache_create(tsd, a0get());
|
tcache = tcache_create(tsd, arena_get(0, false));
|
||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user