diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 59b480b5..470eee65 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -290,14 +290,14 @@ struct arena_s { /* * Number of threads currently assigned to this arena. This field is - * protected by arenas_lock. + * synchronized via atomic operations. */ unsigned nthreads; /* * There are three classes of arena operations from a locking * perspective: - * 1) Thread assignment (modifies nthreads) is protected by arenas_lock. + * 1) Thread assignment (modifies nthreads) is synchronized via atomics. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ @@ -465,7 +465,6 @@ struct arena_s { /* Used in conjunction with tsd for fast arena-related context lookup. */ struct arena_tdata_s { - arena_t *arena; ticker_t decay_ticker; }; #endif /* JEMALLOC_ARENA_STRUCTS_B */ @@ -578,6 +577,9 @@ void arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); +unsigned arena_nthreads_get(arena_t *arena); +void arena_nthreads_inc(arena_t *arena); +void arena_nthreads_dec(arena_t *arena); arena_t *arena_new(unsigned ind); bool arena_boot(void); void arena_prefork(arena_t *arena); diff --git a/include/jemalloc/internal/atomic.h b/include/jemalloc/internal/atomic.h index a9aad35d..3f15ea14 100644 --- a/include/jemalloc/internal/atomic.h +++ b/include/jemalloc/internal/atomic.h @@ -28,8 +28,8 @@ * callers. * * atomic_read_( *p) { return (*p); } - * atomic_add_( *p, x) { return (*p + x); } - * atomic_sub_( *p, x) { return (*p - x); } + * atomic_add_( *p, x) { return (*p += x); } + * atomic_sub_( *p, x) { return (*p -= x); } * bool atomic_cas_( *p, c, s) * { * if (*p != c) diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index ffad04ba..611ed36a 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -438,7 +438,13 @@ extern unsigned opt_narenas; extern bool in_valgrind; /* Number of CPUs. */ -extern unsigned ncpus; +extern unsigned ncpus; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern arena_t **arenas; /* * index2size_tab encodes the same information as could be computed (at @@ -452,21 +458,17 @@ extern size_t const index2size_tab[NSIZES+1]; */ extern uint8_t const size2index_tab[]; -arena_t *a0get(void); void *a0malloc(size_t size); void a0dalloc(void *ptr); void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); arena_t *arenas_extend(unsigned ind); -arena_t *arena_init(unsigned ind); unsigned narenas_total_get(void); +arena_t *arena_init(unsigned ind); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); -arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing, - arena_tdata_t *tdata); arena_t *arena_choose_hard(tsd_t *tsd); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); -unsigned arena_nbound(unsigned ind); void thread_allocated_cleanup(tsd_t *tsd); void thread_deallocated_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); @@ -543,8 +545,7 @@ size_t sa2u(size_t size, size_t alignment); arena_t *arena_choose(tsd_t *tsd, arena_t *arena); arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing); -arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, - bool refresh_if_missing); +arena_t *arena_get(unsigned ind, bool init_if_missing); ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); #endif @@ -819,19 +820,19 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) } JEMALLOC_INLINE arena_t * -arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, - bool refresh_if_missing) +arena_get(unsigned ind, bool init_if_missing) { - arena_tdata_t *tdata; + arena_t *ret; - /* init_if_missing requires refresh_if_missing. */ - assert(!init_if_missing || refresh_if_missing); + assert(ind <= MALLOCX_ARENA_MAX); - tdata = arena_tdata_get(tsd, ind, refresh_if_missing); - if (unlikely(tdata == NULL || tdata->arena == NULL)) - return (arena_get_hard(tsd, ind, init_if_missing, tdata)); - - return (tdata->arena); + ret = arenas[ind]; + if (unlikely(ret == NULL)) { + ret = atomic_read_p((void *)&arenas[ind]); + if (init_if_missing && unlikely(ret == NULL)) + ret = arena_init(ind); + } + return (ret); } JEMALLOC_INLINE ticker_t * diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index adab8a5c..d716b82d 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -1,5 +1,4 @@ a0dalloc -a0get a0malloc arena_aalloc arena_alloc_junk_small @@ -34,7 +33,6 @@ arena_decay_ticks arena_dss_prec_get arena_dss_prec_set arena_get -arena_get_hard arena_init arena_lg_dirty_mult_default_get arena_lg_dirty_mult_default_set @@ -73,10 +71,12 @@ arena_migrate arena_miscelm_get arena_miscelm_to_pageind arena_miscelm_to_rpages -arena_nbound arena_new arena_node_alloc arena_node_dalloc +arena_nthreads_dec +arena_nthreads_get +arena_nthreads_inc arena_palloc arena_postfork_child arena_postfork_parent @@ -106,6 +106,7 @@ arena_stats_merge arena_tcache_fill_small arena_tdata_get arena_tdata_get_hard +arenas atomic_add_p atomic_add_u atomic_add_uint32 diff --git a/src/arena.c b/src/arena.c index ad675d13..3f394681 100644 --- a/src/arena.c +++ b/src/arena.c @@ -3261,6 +3261,27 @@ arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, } } +unsigned +arena_nthreads_get(arena_t *arena) +{ + + return (atomic_read_u(&arena->nthreads)); +} + +void +arena_nthreads_inc(arena_t *arena) +{ + + atomic_add_u(&arena->nthreads, 1); +} + +void +arena_nthreads_dec(arena_t *arena) +{ + + atomic_sub_u(&arena->nthreads, 1); +} + arena_t * arena_new(unsigned ind) { diff --git a/src/chunk.c b/src/chunk.c index 6a107e1d..26622ced 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -415,9 +415,7 @@ chunk_arena_get(unsigned arena_ind) { arena_t *arena; - /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ - arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, - false, true); + arena = arena_get(arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. diff --git a/src/ctl.c b/src/ctl.c index 107bacd6..dbf57c36 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -694,9 +694,7 @@ ctl_grow(void) static void ctl_refresh(void) { - tsd_t *tsd; unsigned i; - bool refreshed; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); /* @@ -706,19 +704,14 @@ ctl_refresh(void) ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } + for (i = 0; i < ctl_stats.narenas; i++) + tarenas[i] = arena_get(i, false); for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arena_nbound(i); - else + if (tarenas[i] != NULL) { + ctl_stats.arenas[i].nthreads = + arena_nthreads_get(arena_get(i, false)); + } else ctl_stats.arenas[i].nthreads = 0; } @@ -1332,7 +1325,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } /* Initialize arena if necessary. */ - newarena = arena_get(tsd, newind, true, true); + newarena = arena_get(newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; @@ -1560,22 +1553,14 @@ arena_i_purge(unsigned arena_ind, bool all) malloc_mutex_lock(&ctl_mtx); { - tsd_t *tsd = tsd_fetch(); unsigned narenas = ctl_stats.narenas; if (arena_ind == narenas) { unsigned i; - bool refreshed; VARIABLE_ARRAY(arena_t *, tarenas, narenas); - for (i = 0, refreshed = false; i < narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, - true); - refreshed = true; - } - } + for (i = 0; i < narenas; i++) + tarenas[i] = arena_get(i, false); /* * No further need to hold ctl_mtx, since narenas and @@ -1592,7 +1577,7 @@ arena_i_purge(unsigned arena_ind, bool all) assert(arena_ind < narenas); - tarena = arena_get(tsd, arena_ind, false, true); + tarena = arena_get(arena_ind, false); /* No further need to hold ctl_mtx. */ malloc_mutex_unlock(&ctl_mtx); @@ -1664,7 +1649,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); + arena_t *arena = arena_get(arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; @@ -1697,7 +1682,7 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(tsd_fetch(), arena_ind, false, true); + arena = arena_get(arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; @@ -1731,7 +1716,7 @@ arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp, unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(tsd_fetch(), arena_ind, false, true); + arena = arena_get(arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; @@ -1767,7 +1752,7 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, malloc_mutex_lock(&ctl_mtx); if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { + arena_get(arena_ind, false)) != NULL) { if (newp != NULL) { chunk_hooks_t old_chunk_hooks, new_chunk_hooks; WRITE(new_chunk_hooks, chunk_hooks_t); diff --git a/src/jemalloc.c b/src/jemalloc.c index ced27b88..86032a40 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -47,7 +47,7 @@ bool in_valgrind; unsigned ncpus; -/* Protects arenas initialization (arenas, narenas_total). */ +/* Protects arenas initialization. */ static malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the @@ -57,8 +57,8 @@ static malloc_mutex_t arenas_lock; * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. */ -static arena_t **arenas; -static unsigned narenas_total; +arena_t **arenas; +static unsigned narenas_total; /* Use narenas_total_*(). */ static arena_t *a0; /* arenas[0]; read-only after initialization. */ static unsigned narenas_auto; /* Read-only after initialization. */ @@ -311,14 +311,6 @@ malloc_init(void) * cannot tolerate TLS variable access. */ -arena_t * -a0get(void) -{ - - assert(a0 != NULL); - return (a0); -} - static void * a0ialloc(size_t size, bool zero, bool is_metadata) { @@ -327,7 +319,7 @@ a0ialloc(size_t size, bool zero, bool is_metadata) return (NULL); return (iallocztm(NULL, size, size2index(size), zero, false, - is_metadata, a0get(), true)); + is_metadata, arena_get(0, false), true)); } static void @@ -391,47 +383,59 @@ bootstrap_free(void *ptr) a0idalloc(ptr, false); } +static void +arena_set(unsigned ind, arena_t *arena) +{ + + atomic_write_p((void **)&arenas[ind], arena); +} + +static void +narenas_total_set(unsigned narenas) +{ + + atomic_write_u(&narenas_total, narenas); +} + +static void +narenas_total_inc(void) +{ + + atomic_add_u(&narenas_total, 1); +} + +unsigned +narenas_total_get(void) +{ + + return (atomic_read_u(&narenas_total)); +} + /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * arena_init_locked(unsigned ind) { arena_t *arena; - /* Expand arenas if necessary. */ - assert(ind <= narenas_total); + assert(ind <= narenas_total_get()); if (ind > MALLOCX_ARENA_MAX) return (NULL); - if (ind == narenas_total) { - unsigned narenas_new = narenas_total + 1; - arena_t **arenas_new = - (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * - sizeof(arena_t *))); - if (arenas_new == NULL) - return (NULL); - memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); - arenas_new[ind] = NULL; - /* - * Deallocate only if arenas came from a0malloc() (not - * base_alloc()). - */ - if (narenas_total != narenas_auto) - a0dalloc(arenas); - arenas = arenas_new; - narenas_total = narenas_new; - } + if (ind == narenas_total_get()) + narenas_total_inc(); /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ - arena = arenas[ind]; + arena = arena_get(ind, false); if (arena != NULL) { assert(ind < narenas_auto); return (arena); } /* Actually initialize the arena. */ - arena = arenas[ind] = arena_new(ind); + arena = arena_new(ind); + arena_set(ind, arena); return (arena); } @@ -446,37 +450,16 @@ arena_init(unsigned ind) return (arena); } -unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - -static void -arena_bind_locked(tsd_t *tsd, unsigned ind) -{ - arena_t *arena; - - arena = arenas[ind]; - arena->nthreads++; - - if (tsd_nominal(tsd)) - tsd_arena_set(tsd, arena); -} - static void arena_bind(tsd_t *tsd, unsigned ind) { + arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena_bind_locked(tsd, ind); - malloc_mutex_unlock(&arenas_lock); + arena = arena_get(ind, false); + arena_nthreads_inc(arena); + + if (tsd_nominal(tsd)) + tsd_arena_set(tsd, arena); } void @@ -484,35 +467,20 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; - malloc_mutex_lock(&arenas_lock); - oldarena = arenas[oldind]; - newarena = arenas[newind]; - oldarena->nthreads--; - newarena->nthreads++; - malloc_mutex_unlock(&arenas_lock); + oldarena = arena_get(oldind, false); + newarena = arena_get(newind, false); + arena_nthreads_dec(oldarena); + arena_nthreads_inc(newarena); tsd_arena_set(tsd, newarena); } -unsigned -arena_nbound(unsigned ind) -{ - unsigned nthreads; - - malloc_mutex_lock(&arenas_lock); - nthreads = arenas[ind]->nthreads; - malloc_mutex_unlock(&arenas_lock); - return (nthreads); -} - static void arena_unbind(tsd_t *tsd, unsigned ind) { arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); + arena = arena_get(ind, false); + arena_nthreads_dec(arena); tsd_arena_set(tsd, NULL); } @@ -568,14 +536,6 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind) * the arenas.extend mallctl, which we trust mallctl synchronization to * prevent. */ - malloc_mutex_lock(&arenas_lock); - for (i = 0; i < narenas_actual; i++) - arenas_tdata[i].arena = arenas[i]; - malloc_mutex_unlock(&arenas_lock); - if (narenas_tdata > narenas_actual) { - memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) - * (narenas_tdata - narenas_actual)); - } /* Copy/initialize tickers. */ for (i = 0; i < narenas_actual; i++) { @@ -587,6 +547,10 @@ arena_tdata_get_hard(tsd_t *tsd, unsigned ind) DECAY_NTICKS_PER_UPDATE); } } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } /* Read the refreshed tdata array. */ tdata = &arenas_tdata[ind]; @@ -596,33 +560,6 @@ label_return: return (tdata); } -arena_t * -arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing, - arena_tdata_t *tdata) -{ - arena_t *arena; - unsigned narenas_actual; - - if (init_if_missing && tdata != NULL) { - tdata->arena = arena_init(ind); - if (tdata->arena != NULL) - return (tdata->arena); - } - - /* - * This function must always tell the truth, even if it's slow, so don't - * let OOM, thread cleanup (note tsd_nominal check), nor recursive - * allocation avoidance (note arenas_tdata_bypass check) get in the way. - */ - narenas_actual = narenas_total_get(); - if (ind >= narenas_actual) - return (NULL); - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - malloc_mutex_unlock(&arenas_lock); - return (arena); -} - /* Slow path, called only by arena_choose(). */ arena_t * arena_choose_hard(tsd_t *tsd) @@ -635,15 +572,16 @@ arena_choose_hard(tsd_t *tsd) choose = 0; first_null = narenas_auto; malloc_mutex_lock(&arenas_lock); - assert(a0get() != NULL); + assert(arena_get(0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { + if (arena_get(i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) + if (arena_nthreads_get(arena_get(i, false)) < + arena_nthreads_get(arena_get(choose, + false))) choose = i; } else if (first_null == narenas_auto) { /* @@ -659,13 +597,13 @@ arena_choose_hard(tsd_t *tsd) } } - if (arenas[choose]->nthreads == 0 + if (arena_nthreads_get(arena_get(choose, false)) == 0 || first_null == narenas_auto) { /* * Use an unloaded arena, or the least loaded arena if * all arenas are already initialized. */ - ret = arenas[choose]; + ret = arena_get(choose, false); } else { /* Initialize a new arena. */ choose = first_null; @@ -675,10 +613,10 @@ arena_choose_hard(tsd_t *tsd) return (NULL); } } - arena_bind_locked(tsd, choose); + arena_bind(tsd, choose); malloc_mutex_unlock(&arenas_lock); } else { - ret = a0get(); + ret = arena_get(0, false); arena_bind(tsd, 0); } @@ -750,7 +688,7 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; + arena_t *arena = arena_get(i, false); if (arena != NULL) { tcache_t *tcache; @@ -1309,7 +1247,8 @@ malloc_init_hard_a0_locked(void) * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ - narenas_total = narenas_auto = 1; + narenas_auto = 1; + narenas_total_set(narenas_auto); arenas = &a0; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* @@ -1391,28 +1330,22 @@ malloc_init_hard_finish(void) } narenas_auto = opt_narenas; /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = (unsigned)(chunksize / sizeof(arena_t *)); + if (narenas_auto > MALLOCX_ARENA_MAX) { + narenas_auto = MALLOCX_ARENA_MAX; malloc_printf(": Reducing narenas to limit (%d)\n", narenas_auto); } - narenas_total = narenas_auto; + narenas_total_set(narenas_auto); /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); + arenas = (arena_t **)base_alloc(sizeof(arena_t *) * + (MALLOCX_ARENA_MAX+1)); if (arenas == NULL) return (true); - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = a0; + arena_set(0, a0); malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); @@ -2084,7 +2017,7 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, *tcache = tcache_get(tsd, true); if ((flags & MALLOCX_ARENA_MASK) != 0) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd, arena_ind, true, true); + *arena = arena_get(arena_ind, true); if (unlikely(*arena == NULL)) return (true); } else @@ -2325,7 +2258,7 @@ je_rallocx(void *ptr, size_t size, int flags) if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd, arena_ind, true, true); + arena = arena_get(arena_ind, true); if (unlikely(arena == NULL)) goto label_oom; } else @@ -2677,7 +2610,7 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) @@ -2689,9 +2622,11 @@ _malloc_prefork(void) ctl_prefork(); prof_prefork(); malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(i, false)) != NULL) + arena_prefork(arena); } chunk_prefork(); base_prefork(); @@ -2705,7 +2640,7 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - unsigned i; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) @@ -2716,9 +2651,11 @@ _malloc_postfork(void) /* Release all mutexes, now that fork() has completed. */ base_postfork_parent(); chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(i, false)) != NULL) + arena_postfork_parent(arena); } malloc_mutex_postfork_parent(&arenas_lock); prof_postfork_parent(); @@ -2728,16 +2665,18 @@ _malloc_postfork(void) void jemalloc_postfork_child(void) { - unsigned i; + unsigned i, narenas; assert(malloc_initialized()); /* Release all mutexes, now that fork() has completed. */ base_postfork_child(); chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(i, false)) != NULL) + arena_postfork_child(arena); } malloc_mutex_postfork_child(&arenas_lock); prof_postfork_child(); diff --git a/src/tcache.c b/src/tcache.c index 9f10a745..6e32f404 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -325,7 +325,8 @@ tcache_create(tsd_t *tsd, arena_t *arena) /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); - tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); + tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, + arena_get(0, false)); if (tcache == NULL) return (NULL); @@ -453,7 +454,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); - tcache = tcache_create(tsd, a0get()); + tcache = tcache_create(tsd, arena_get(0, false)); if (tcache == NULL) return (true);