Refactor arenas array (fixes deadlock).

Refactor the arenas array, which contains pointers to all extant arenas,
such that it starts out as a sparse array of maximum size, and use
double-checked atomics-based reads as the basis for fast and simple
arena_get().  Additionally, reduce arenas_lock's role such that it only
protects against arena initalization races.  These changes remove the
possibility for arena lookups to trigger locking, which resolves at
least one known (fork-related) deadlock.

This resolves #315.
This commit is contained in:
Jason Evans
2016-02-24 23:58:10 -08:00
parent 3812729167
commit 767d85061a
9 changed files with 162 additions and 214 deletions

View File

@@ -290,14 +290,14 @@ struct arena_s {
/*
* Number of threads currently assigned to this arena. This field is
* protected by arenas_lock.
* synchronized via atomic operations.
*/
unsigned nthreads;
/*
* There are three classes of arena operations from a locking
* perspective:
* 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
* 1) Thread assignment (modifies nthreads) is synchronized via atomics.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
@@ -465,7 +465,6 @@ struct arena_s {
/* Used in conjunction with tsd for fast arena-related context lookup. */
struct arena_tdata_s {
arena_t *arena;
ticker_t decay_ticker;
};
#endif /* JEMALLOC_ARENA_STRUCTS_B */
@@ -578,6 +577,9 @@ void arena_stats_merge(arena_t *arena, const char **dss,
ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
unsigned arena_nthreads_get(arena_t *arena);
void arena_nthreads_inc(arena_t *arena);
void arena_nthreads_dec(arena_t *arena);
arena_t *arena_new(unsigned ind);
bool arena_boot(void);
void arena_prefork(arena_t *arena);

View File

@@ -28,8 +28,8 @@
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)

View File

@@ -438,7 +438,13 @@ extern unsigned opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
extern unsigned ncpus;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
extern arena_t **arenas;
/*
* index2size_tab encodes the same information as could be computed (at
@@ -452,21 +458,17 @@ extern size_t const index2size_tab[NSIZES+1];
*/
extern uint8_t const size2index_tab[];
arena_t *a0get(void);
void *a0malloc(size_t size);
void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
arena_t *arena_init(unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing,
arena_tdata_t *tdata);
arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
@@ -543,8 +545,7 @@ size_t sa2u(size_t size, size_t alignment);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
arena_t *arena_get(unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
@@ -819,19 +820,19 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
}
JEMALLOC_INLINE arena_t *
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing)
arena_get(unsigned ind, bool init_if_missing)
{
arena_tdata_t *tdata;
arena_t *ret;
/* init_if_missing requires refresh_if_missing. */
assert(!init_if_missing || refresh_if_missing);
assert(ind <= MALLOCX_ARENA_MAX);
tdata = arena_tdata_get(tsd, ind, refresh_if_missing);
if (unlikely(tdata == NULL || tdata->arena == NULL))
return (arena_get_hard(tsd, ind, init_if_missing, tdata));
return (tdata->arena);
ret = arenas[ind];
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
ret = arena_init(ind);
}
return (ret);
}
JEMALLOC_INLINE ticker_t *

View File

@@ -1,5 +1,4 @@
a0dalloc
a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
@@ -34,7 +33,6 @@ arena_decay_ticks
arena_dss_prec_get
arena_dss_prec_set
arena_get
arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
@@ -73,10 +71,12 @@ arena_migrate
arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
arena_nthreads_dec
arena_nthreads_get
arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
@@ -106,6 +106,7 @@ arena_stats_merge
arena_tcache_fill_small
arena_tdata_get
arena_tdata_get_hard
arenas
atomic_add_p
atomic_add_u
atomic_add_uint32