Implementing opt.background_thread.
Added opt.background_thread to enable background threads, which handles purging currently. When enabled, decay ticks will not trigger purging (which will be left to the background threads). We limit the max number of threads to NCPUs. When percpu arena is enabled, set CPU affinity for the background threads as well. The sleep interval of background threads is dynamic and determined by computing number of pages to purge in the future (based on backlog).
This commit is contained in:
156
src/arena.c
156
src/arena.c
@@ -9,14 +9,13 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
const char *percpu_arena_mode_names[] = {
|
||||
const char *percpu_arena_mode_names[] = {
|
||||
"disabled",
|
||||
"percpu",
|
||||
"phycpu"
|
||||
};
|
||||
|
||||
const char *opt_percpu_arena = OPT_PERCPU_ARENA_DEFAULT;
|
||||
percpu_arena_mode_t percpu_arena_mode = PERCPU_ARENA_MODE_DEFAULT;
|
||||
const char *opt_percpu_arena = OPT_PERCPU_ARENA_DEFAULT;
|
||||
percpu_arena_mode_t percpu_arena_mode = PERCPU_ARENA_MODE_DEFAULT;
|
||||
|
||||
ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
|
||||
ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
|
||||
@@ -24,7 +23,7 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
|
||||
static atomic_zd_t dirty_decay_ms_default;
|
||||
static atomic_zd_t muzzy_decay_ms_default;
|
||||
|
||||
const arena_bin_info_t arena_bin_info[NBINS] = {
|
||||
const arena_bin_info_t arena_bin_info[NBINS] = {
|
||||
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
|
||||
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
|
||||
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
|
||||
@@ -39,6 +38,13 @@ const arena_bin_info_t arena_bin_info[NBINS] = {
|
||||
#undef SC
|
||||
};
|
||||
|
||||
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
|
||||
#define STEP(step, h, x, y, h_sum) \
|
||||
h,
|
||||
SMOOTHSTEP
|
||||
#undef STEP
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
@@ -47,7 +53,8 @@ const arena_bin_info_t arena_bin_info[NBINS] = {
|
||||
|
||||
static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit);
|
||||
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool all);
|
||||
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
|
||||
bool is_background_thread, bool all);
|
||||
static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
arena_bin_t *bin);
|
||||
static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
|
||||
@@ -359,7 +366,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||
extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
|
||||
extent);
|
||||
if (arena_dirty_decay_ms_get(arena) == 0) {
|
||||
arena_decay_dirty(tsdn, arena, true);
|
||||
arena_decay_dirty(tsdn, arena, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -606,12 +613,6 @@ arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
|
||||
|
||||
static size_t
|
||||
arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
|
||||
static const uint64_t h_steps[] = {
|
||||
#define STEP(step, h, x, y) \
|
||||
h,
|
||||
SMOOTHSTEP
|
||||
#undef STEP
|
||||
};
|
||||
uint64_t sum;
|
||||
size_t npages_limit_backlog;
|
||||
unsigned i;
|
||||
@@ -660,17 +661,27 @@ arena_decay_backlog_update(arena_decay_t *decay, extents_t *extents,
|
||||
arena_decay_backlog_update_last(decay, extents);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, extents_t *extents) {
|
||||
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
|
||||
|
||||
if (extents_npages_get(extents) > npages_limit) {
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, false,
|
||||
npages_limit);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_epoch_advance_helper(arena_decay_t *decay, extents_t *extents,
|
||||
const nstime_t *time) {
|
||||
uint64_t nadvance_u64;
|
||||
nstime_t delta;
|
||||
|
||||
assert(arena_decay_deadline_reached(decay, time));
|
||||
|
||||
nstime_t delta;
|
||||
nstime_copy(&delta, time);
|
||||
nstime_subtract(&delta, &decay->epoch);
|
||||
nadvance_u64 = nstime_divide(&delta, &decay->interval);
|
||||
|
||||
uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
|
||||
assert(nadvance_u64 > 0);
|
||||
|
||||
/* Add nadvance_u64 decay intervals to epoch. */
|
||||
@@ -686,14 +697,13 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, extents_t *extents,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, extents_t *extents) {
|
||||
size_t npages_limit = arena_decay_backlog_npages_limit(decay);
|
||||
|
||||
if (extents_npages_get(extents) > npages_limit) {
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, false,
|
||||
npages_limit);
|
||||
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extents_t *extents, const nstime_t *time, bool purge) {
|
||||
arena_decay_epoch_advance_helper(decay, extents, time);
|
||||
if (purge) {
|
||||
arena_decay_try_purge(tsdn, arena, decay, extents);
|
||||
}
|
||||
|
||||
/*
|
||||
* There may be concurrent ndirty fluctuation between the purge above
|
||||
* and the nunpurged update below, but this is inconsequential to decay
|
||||
@@ -702,13 +712,6 @@ arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena,
|
||||
decay->nunpurged = extents_npages_get(extents);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extents_t *extents, const nstime_t *time) {
|
||||
arena_decay_epoch_advance_helper(decay, extents, time);
|
||||
arena_decay_epoch_advance_purge(tsdn, arena, decay, extents);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
|
||||
arena_decay_ms_write(decay, decay_ms);
|
||||
@@ -759,9 +762,9 @@ arena_decay_ms_valid(ssize_t decay_ms) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extents_t *extents) {
|
||||
extents_t *extents, bool is_background_thread) {
|
||||
malloc_mutex_assert_owner(tsdn, &decay->mtx);
|
||||
|
||||
/* Purge all or nothing if the option is disabled. */
|
||||
@@ -771,7 +774,7 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, false,
|
||||
0);
|
||||
}
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
nstime_t time;
|
||||
@@ -799,11 +802,20 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
* If the deadline has been reached, advance to the current epoch and
|
||||
* purge to the new limit if necessary. Note that dirty pages created
|
||||
* during the current epoch are not subject to purge until a future
|
||||
* epoch, so as a result purging only happens during epoch advances.
|
||||
* epoch, so as a result purging only happens during epoch advances, or
|
||||
* being triggered by background threads (scheduled event).
|
||||
*/
|
||||
if (arena_decay_deadline_reached(decay, &time)) {
|
||||
arena_decay_epoch_advance(tsdn, arena, decay, extents, &time);
|
||||
bool advance_epoch = arena_decay_deadline_reached(decay, &time);
|
||||
if (advance_epoch) {
|
||||
bool should_purge = is_background_thread ||
|
||||
!background_thread_enabled();
|
||||
arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
|
||||
should_purge);
|
||||
} else if (is_background_thread) {
|
||||
arena_decay_try_purge(tsdn, arena, decay, extents);
|
||||
}
|
||||
|
||||
return advance_epoch;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@@ -838,7 +850,7 @@ arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
* arbitrary change during initial arena configuration.
|
||||
*/
|
||||
arena_decay_reinit(decay, extents, decay_ms);
|
||||
arena_maybe_decay(tsdn, arena, decay, extents);
|
||||
arena_maybe_decay(tsdn, arena, decay, extents, false);
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return false;
|
||||
@@ -974,40 +986,57 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
|
||||
static bool
|
||||
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
|
||||
extents_t *extents, bool all) {
|
||||
extents_t *extents, bool is_background_thread, bool all) {
|
||||
if (all) {
|
||||
malloc_mutex_lock(tsdn, &decay->mtx);
|
||||
arena_decay_to_limit(tsdn, arena, decay, extents, all, 0);
|
||||
} else {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* No need to wait if another thread is in progress. */
|
||||
return true;
|
||||
}
|
||||
arena_maybe_decay(tsdn, arena, decay, extents);
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* No need to wait if another thread is in progress. */
|
||||
return true;
|
||||
}
|
||||
|
||||
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
|
||||
is_background_thread);
|
||||
size_t npages_new;
|
||||
if (epoch_advanced) {
|
||||
/* Backlog is updated on epoch advance. */
|
||||
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
if (have_background_thread && background_thread_enabled() &&
|
||||
epoch_advanced && !is_background_thread) {
|
||||
background_thread_interval_check(tsdn, arena, decay, npages_new);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool all) {
|
||||
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all) {
|
||||
return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
|
||||
&arena->extents_dirty, all);
|
||||
&arena->extents_dirty, is_background_thread, all);
|
||||
}
|
||||
|
||||
static bool
|
||||
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool all) {
|
||||
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all) {
|
||||
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
|
||||
&arena->extents_muzzy, all);
|
||||
&arena->extents_muzzy, is_background_thread, all);
|
||||
}
|
||||
|
||||
void
|
||||
arena_decay(tsdn_t *tsdn, arena_t *arena, bool all) {
|
||||
if (arena_decay_dirty(tsdn, arena, all)) {
|
||||
arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
|
||||
if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
|
||||
return;
|
||||
}
|
||||
arena_decay_muzzy(tsdn, arena, all);
|
||||
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1173,6 +1202,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
* extents, so only retained extents may remain.
|
||||
*/
|
||||
assert(extents_npages_get(&arena->extents_dirty) == 0);
|
||||
assert(extents_npages_get(&arena->extents_muzzy) == 0);
|
||||
|
||||
/* Deallocate retained memory. */
|
||||
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
||||
@@ -1971,19 +2001,35 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
}
|
||||
|
||||
arena->base = base;
|
||||
/* Set arena before creating background threads. */
|
||||
arena_set(ind, arena);
|
||||
|
||||
nstime_init(&arena->create_time, 0);
|
||||
nstime_update(&arena->create_time);
|
||||
|
||||
/* We don't support reetrancy for arena 0 bootstrapping. */
|
||||
if (ind != 0 && hooks_arena_new_hook) {
|
||||
/* We don't support reentrancy for arena 0 bootstrapping. */
|
||||
if (ind != 0) {
|
||||
/*
|
||||
* If we're here, then arena 0 already exists, so bootstrapping
|
||||
* is done enough that we should have tsd.
|
||||
*/
|
||||
assert(!tsdn_null(tsdn));
|
||||
pre_reentrancy(tsdn_tsd(tsdn));
|
||||
hooks_arena_new_hook();
|
||||
if (hooks_arena_new_hook) {
|
||||
hooks_arena_new_hook();
|
||||
}
|
||||
post_reentrancy(tsdn_tsd(tsdn));
|
||||
|
||||
/* background_thread_create() handles reentrancy internally. */
|
||||
if (have_background_thread) {
|
||||
bool err;
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
err = background_thread_create(tsdn_tsd(tsdn), ind);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
if (err) {
|
||||
goto label_error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return arena;
|
||||
|
572
src/background_thread.c
Normal file
572
src/background_thread.c
Normal file
@@ -0,0 +1,572 @@
|
||||
#define JEMALLOC_BACKGROUND_THREAD_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
/* This option should be opt-in only. */
|
||||
#define BACKGROUND_THREAD_DEFAULT false
|
||||
/* Read-only after initialization. */
|
||||
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
|
||||
|
||||
/* Used for thread creation, termination and stats. */
|
||||
malloc_mutex_t background_thread_lock;
|
||||
/* Indicates global state. Atomic because decay reads this w/o locking. */
|
||||
atomic_b_t background_thread_enabled_state;
|
||||
size_t n_background_threads;
|
||||
/* Thread info per-index. */
|
||||
background_thread_info_t *background_thread_info;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#ifndef JEMALLOC_BACKGROUND_THREAD
|
||||
#define NOT_REACHED { not_reached(); }
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
|
||||
bool background_threads_init(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_disable_single(tsd_t *tsd,
|
||||
background_thread_info_t *info) NOT_REACHED
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) NOT_REACHED
|
||||
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
|
||||
void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
|
||||
#undef NOT_REACHED
|
||||
#else
|
||||
bool
|
||||
background_threads_init(tsd_t *tsd) {
|
||||
assert(have_background_thread);
|
||||
assert(narenas_total_get() > 0);
|
||||
|
||||
background_thread_enabled_set(tsd_tsdn(tsd), opt_background_thread);
|
||||
if (malloc_mutex_init(&background_thread_lock,
|
||||
"background_thread_global",
|
||||
WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
background_thread_info = (background_thread_info_t *)base_alloc(
|
||||
tsd_tsdn(tsd), b0get(), ncpus * sizeof(background_thread_info_t),
|
||||
CACHELINE);
|
||||
if (background_thread_info == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
if (malloc_mutex_init(&info->mtx, "background_thread",
|
||||
WITNESS_RANK_BACKGROUND_THREAD,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
if (pthread_cond_init(&info->cond, NULL)) {
|
||||
return true;
|
||||
}
|
||||
info->started = false;
|
||||
nstime_init(&info->next_wakeup, 0);
|
||||
info->npages_to_purge_new = 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
set_current_thread_affinity(UNUSED int cpu) {
|
||||
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
|
||||
cpu_set_t cpuset;
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(cpu, &cpuset);
|
||||
int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
|
||||
|
||||
return (ret != 0);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Threshold for determining when to wake up the background thread. */
|
||||
#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
|
||||
#define BILLION UINT64_C(1000000000)
|
||||
/* Minimal sleep interval 100 ms. */
|
||||
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
|
||||
#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
|
||||
|
||||
static inline size_t
|
||||
decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
|
||||
size_t i;
|
||||
uint64_t sum = 0;
|
||||
for (i = 0; i < interval; i++) {
|
||||
sum += decay->backlog[i] * h_steps[i];
|
||||
}
|
||||
for (; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
|
||||
}
|
||||
|
||||
return (size_t)(sum >> SMOOTHSTEP_BFP);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
|
||||
extents_t *extents) {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* Use minimal interval if decay is contended. */
|
||||
return BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
}
|
||||
|
||||
uint64_t interval;
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
size_t npages = extents_npages_get(extents);
|
||||
if (npages == 0) {
|
||||
unsigned i;
|
||||
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
|
||||
if (decay->backlog[i] > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == SMOOTHSTEP_NSTEPS) {
|
||||
/* No dirty pages recorded. Sleep indefinitely. */
|
||||
interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
goto label_done;
|
||||
}
|
||||
}
|
||||
if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
/* Use max interval. */
|
||||
interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
|
||||
size_t ub = SMOOTHSTEP_NSTEPS;
|
||||
/* Minimal 2 intervals to ensure reaching next epoch deadline. */
|
||||
lb = (lb < 2) ? 2 : lb;
|
||||
if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
|
||||
(lb + 2 > ub)) {
|
||||
interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
assert(lb + 2 <= ub);
|
||||
size_t npurge_lb, npurge_ub;
|
||||
npurge_lb = decay_npurge_after_interval(decay, lb);
|
||||
if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * lb;
|
||||
goto label_done;
|
||||
}
|
||||
npurge_ub = decay_npurge_after_interval(decay, ub);
|
||||
if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
interval = decay_interval_ns * ub;
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
unsigned n_search = 0;
|
||||
size_t target, npurge;
|
||||
while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
|
||||
&& (lb + 2 < ub)) {
|
||||
target = (lb + ub) / 2;
|
||||
npurge = decay_npurge_after_interval(decay, target);
|
||||
if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||
ub = target;
|
||||
npurge_ub = npurge;
|
||||
} else {
|
||||
lb = target;
|
||||
npurge_lb = npurge;
|
||||
}
|
||||
assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
|
||||
}
|
||||
interval = decay_interval_ns * (ub + lb) / 2;
|
||||
label_done:
|
||||
interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
|
||||
BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
|
||||
return interval;
|
||||
}
|
||||
|
||||
/* Compute purge interval for background threads. */
|
||||
static uint64_t
|
||||
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
|
||||
uint64_t i1, i2;
|
||||
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
|
||||
&arena->extents_dirty);
|
||||
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
return i1;
|
||||
}
|
||||
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
|
||||
&arena->extents_muzzy);
|
||||
|
||||
return i1 < i2 ? i1 : i2;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
background_work_once(tsdn_t *tsdn, unsigned ind) {
|
||||
arena_t *arena;
|
||||
unsigned i, narenas;
|
||||
uint64_t min_interval;
|
||||
|
||||
min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
|
||||
narenas = narenas_total_get();
|
||||
for (i = ind; i < narenas; i += ncpus) {
|
||||
arena = arena_get(tsdn, i, false);
|
||||
if (!arena) {
|
||||
continue;
|
||||
}
|
||||
|
||||
arena_decay(tsdn, arena, true, false);
|
||||
uint64_t interval = arena_decay_compute_purge_interval(tsdn,
|
||||
arena);
|
||||
if (interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
return interval;
|
||||
}
|
||||
|
||||
assert(interval > BACKGROUND_THREAD_MIN_INTERVAL_NS);
|
||||
if (min_interval > interval) {
|
||||
min_interval = interval;
|
||||
}
|
||||
}
|
||||
|
||||
return min_interval;
|
||||
}
|
||||
|
||||
static void
|
||||
background_work(tsdn_t *tsdn, unsigned ind) {
|
||||
int ret;
|
||||
background_thread_info_t *info = &background_thread_info[ind];
|
||||
|
||||
malloc_mutex_lock(tsdn, &info->mtx);
|
||||
while (info->started) {
|
||||
uint64_t interval = background_work_once(tsdn, ind);
|
||||
info->npages_to_purge_new = 0;
|
||||
|
||||
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
|
||||
nstime_init(&info->next_wakeup,
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
|
||||
assert(ret == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
|
||||
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
|
||||
nstime_init(&info->next_wakeup, 0);
|
||||
nstime_update(&info->next_wakeup);
|
||||
info->next_wakeup.ns += interval;
|
||||
|
||||
nstime_t ts_wakeup;
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
nstime_init2(&ts_wakeup, tv.tv_sec,
|
||||
tv.tv_usec * 1000 + interval);
|
||||
struct timespec ts;
|
||||
ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
|
||||
ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
|
||||
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock,
|
||||
&ts);
|
||||
assert(ret == ETIMEDOUT || ret == 0);
|
||||
}
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
|
||||
static void *
|
||||
background_thread_entry(void *ind_arg) {
|
||||
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
|
||||
assert(thread_ind < narenas_total_get() && thread_ind < ncpus);
|
||||
|
||||
if (opt_percpu_arena != percpu_arena_disabled) {
|
||||
set_current_thread_affinity((int)thread_ind);
|
||||
}
|
||||
/*
|
||||
* Start periodic background work. We avoid fetching tsd to keep the
|
||||
* background thread "outside", since there may be side effects, for
|
||||
* example triggering new arena creation (which in turn triggers
|
||||
* background thread creation).
|
||||
*/
|
||||
background_work(TSDN_NULL, thread_ind);
|
||||
assert(pthread_equal(pthread_self(),
|
||||
background_thread_info[thread_ind].thread));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create a new background thread if needed. */
|
||||
bool
|
||||
background_thread_create(tsd_t *tsd, unsigned arena_ind) {
|
||||
assert(have_background_thread);
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
/* We create at most NCPUs threads. */
|
||||
size_t thread_ind = arena_ind % ncpus;
|
||||
background_thread_info_t *info = &background_thread_info[thread_ind];
|
||||
|
||||
bool need_new_thread;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
need_new_thread = background_thread_enabled() && !info->started;
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (!need_new_thread) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pre_reentrancy(tsd);
|
||||
int err;
|
||||
load_pthread_create_fptr();
|
||||
if ((err = pthread_create(&info->thread, NULL,
|
||||
background_thread_entry, (void *)thread_ind)) != 0) {
|
||||
malloc_printf("<jemalloc>: arena %u background thread creation "
|
||||
"failed (%d).\n", arena_ind, err);
|
||||
}
|
||||
post_reentrancy(tsd);
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
assert(info->started == false);
|
||||
if (err == 0) {
|
||||
info->started = true;
|
||||
n_background_threads++;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
return (err != 0);
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_enable(tsd_t *tsd) {
|
||||
assert(n_background_threads == 0);
|
||||
assert(background_thread_enabled());
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
|
||||
VARIABLE_ARRAY(bool, created, ncpus);
|
||||
unsigned i, ncreated;
|
||||
for (i = 0; i < ncpus; i++) {
|
||||
created[i] = false;
|
||||
}
|
||||
ncreated = 0;
|
||||
|
||||
unsigned n = narenas_total_get();
|
||||
for (i = 0; i < n; i++) {
|
||||
if (created[i % ncpus] ||
|
||||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
|
||||
continue;
|
||||
}
|
||||
if (background_thread_create(tsd, i)) {
|
||||
return true;
|
||||
}
|
||||
created[i % ncpus] = true;
|
||||
if (++ncreated == ncpus) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
|
||||
pre_reentrancy(tsd);
|
||||
|
||||
bool has_thread;
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
if (info->started) {
|
||||
has_thread = true;
|
||||
info->started = false;
|
||||
pthread_cond_signal(&info->cond);
|
||||
} else {
|
||||
has_thread = false;
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
|
||||
if (!has_thread) {
|
||||
post_reentrancy(tsd);
|
||||
return false;
|
||||
}
|
||||
void *ret;
|
||||
if (pthread_join(info->thread, &ret)) {
|
||||
post_reentrancy(tsd);
|
||||
return true;
|
||||
}
|
||||
assert(ret == NULL);
|
||||
n_background_threads--;
|
||||
post_reentrancy(tsd);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
background_threads_disable(tsd_t *tsd) {
|
||||
assert(!background_thread_enabled());
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
background_thread_info_t *info = &background_thread_info[i];
|
||||
if (background_threads_disable_single(tsd, info)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
assert(n_background_threads == 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Check if we need to signal the background thread early. */
|
||||
void
|
||||
background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new) {
|
||||
background_thread_info_t *info = arena_background_thread_info_get(
|
||||
arena);
|
||||
|
||||
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
|
||||
/*
|
||||
* Background thread may hold the mutex for a long period of
|
||||
* time. We'd like to avoid the variance on application
|
||||
* threads. So keep this non-blocking, and leave the work to a
|
||||
* future epoch.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (!info->started) {
|
||||
goto label_done;
|
||||
}
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
|
||||
if (decay_time <= 0) {
|
||||
/* Purging is eagerly done or disabled currently. */
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
if (nstime_compare(&info->next_wakeup, &decay->epoch) <= 0) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
|
||||
uint64_t decay_interval_ns = nstime_ns(&decay->interval);
|
||||
assert(decay_interval_ns > 0);
|
||||
nstime_t diff;
|
||||
nstime_copy(&diff, &info->next_wakeup);
|
||||
nstime_subtract(&diff, &decay->epoch);
|
||||
if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
goto label_done_unlock2;
|
||||
}
|
||||
|
||||
if (npages_new > 0) {
|
||||
size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
|
||||
/*
|
||||
* Compute how many new pages we would need to purge by the next
|
||||
* wakeup, which is used to determine if we should signal the
|
||||
* background thread.
|
||||
*/
|
||||
uint64_t npurge_new;
|
||||
if (n_epoch >= SMOOTHSTEP_NSTEPS) {
|
||||
npurge_new = npages_new;
|
||||
} else {
|
||||
uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
|
||||
assert(h_steps_max >=
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new = npages_new * (h_steps_max -
|
||||
h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
|
||||
npurge_new >>= SMOOTHSTEP_BFP;
|
||||
}
|
||||
info->npages_to_purge_new += npurge_new;
|
||||
}
|
||||
|
||||
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD ||
|
||||
(nstime_ns(&info->next_wakeup) ==
|
||||
BACKGROUND_THREAD_INDEFINITE_SLEEP && info->npages_to_purge_new > 0)) {
|
||||
info->npages_to_purge_new = 0;
|
||||
pthread_cond_signal(&info->cond);
|
||||
}
|
||||
label_done_unlock2:
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
label_done:
|
||||
malloc_mutex_unlock(tsdn, &info->mtx);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork0(tsdn_t *tsdn) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_lock);
|
||||
if (background_thread_enabled()) {
|
||||
background_thread_enabled_set(tsdn, false);
|
||||
background_threads_disable(tsdn_tsd(tsdn));
|
||||
/* Enable again to re-create threads after fork. */
|
||||
background_thread_enabled_set(tsdn, true);
|
||||
}
|
||||
assert(n_background_threads == 0);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_prefork1(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
background_thread_postfork_init(tsdn_t *tsdn) {
|
||||
if (background_thread_enabled()) {
|
||||
background_threads_enable(tsdn_tsd(tsdn));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_parent(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_postfork_parent(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
background_thread_postfork_init(tsdn);
|
||||
malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
void
|
||||
background_thread_postfork_child(tsdn_t *tsdn) {
|
||||
for (unsigned i = 0; i < ncpus; i++) {
|
||||
malloc_mutex_postfork_child(tsdn,
|
||||
&background_thread_info[i].mtx);
|
||||
}
|
||||
malloc_mutex_postfork_child(tsdn, &background_thread_lock);
|
||||
|
||||
malloc_mutex_lock(tsdn, &background_thread_lock);
|
||||
background_thread_postfork_init(tsdn);
|
||||
malloc_mutex_unlock(tsdn, &background_thread_lock);
|
||||
}
|
||||
|
||||
#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
|
||||
#undef BILLION
|
||||
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
|
||||
#undef BACKGROUND_THREAD_INDEFINITE_SLEEP
|
||||
|
||||
#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
|
||||
|
||||
#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
|
||||
#include <dlfcn.h>
|
||||
|
||||
int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
|
||||
void *
|
||||
load_pthread_create_fptr(void) {
|
||||
if (pthread_create_fptr) {
|
||||
return pthread_create_fptr;
|
||||
}
|
||||
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
return pthread_create_fptr;
|
||||
}
|
||||
|
||||
#endif
|
92
src/ctl.c
92
src/ctl.c
@@ -53,6 +53,7 @@ static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
|
||||
|
||||
CTL_PROTO(version)
|
||||
CTL_PROTO(epoch)
|
||||
CTL_PROTO(background_thread)
|
||||
CTL_PROTO(thread_tcache_enabled)
|
||||
CTL_PROTO(thread_tcache_flush)
|
||||
CTL_PROTO(thread_prof_name)
|
||||
@@ -78,6 +79,7 @@ CTL_PROTO(opt_retain)
|
||||
CTL_PROTO(opt_dss)
|
||||
CTL_PROTO(opt_narenas)
|
||||
CTL_PROTO(opt_percpu_arena)
|
||||
CTL_PROTO(opt_background_thread)
|
||||
CTL_PROTO(opt_dirty_decay_ms)
|
||||
CTL_PROTO(opt_muzzy_decay_ms)
|
||||
CTL_PROTO(opt_stats_print)
|
||||
@@ -265,6 +267,7 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("dss"), CTL(opt_dss)},
|
||||
{NAME("narenas"), CTL(opt_narenas)},
|
||||
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
|
||||
{NAME("background_thread"), CTL(opt_background_thread)},
|
||||
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
|
||||
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
|
||||
{NAME("stats_print"), CTL(opt_stats_print)},
|
||||
@@ -501,6 +504,7 @@ static const ctl_named_node_t stats_node[] = {
|
||||
static const ctl_named_node_t root_node[] = {
|
||||
{NAME("version"), CTL(version)},
|
||||
{NAME("epoch"), CTL(epoch)},
|
||||
{NAME("background_thread"), CTL(background_thread)},
|
||||
{NAME("thread"), CHILD(named, thread)},
|
||||
{NAME("config"), CHILD(named, config)},
|
||||
{NAME("opt"), CHILD(named, opt)},
|
||||
@@ -1445,6 +1449,53 @@ label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
|
||||
int ret;
|
||||
bool oldval;
|
||||
|
||||
if (!have_background_thread) {
|
||||
return ENOENT;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
if (newp == NULL) {
|
||||
oldval = background_thread_enabled();
|
||||
READ(oldval, bool);
|
||||
} else {
|
||||
if (newlen != sizeof(bool)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
oldval = background_thread_enabled();
|
||||
READ(oldval, bool);
|
||||
|
||||
bool newval = *(bool *)newp;
|
||||
if (newval == oldval) {
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
background_thread_enabled_set(tsd_tsdn(tsd), newval);
|
||||
if (newval) {
|
||||
if (background_threads_enable(tsd)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
} else {
|
||||
if (background_threads_disable(tsd)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
|
||||
@@ -1466,6 +1517,7 @@ CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
|
||||
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
||||
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
|
||||
CTL_RO_NL_GEN(opt_percpu_arena, opt_percpu_arena, const char *)
|
||||
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
|
||||
CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
@@ -1764,7 +1816,8 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
|
||||
|
||||
for (i = 0; i < narenas; i++) {
|
||||
if (tarenas[i] != NULL) {
|
||||
arena_decay(tsdn, tarenas[i], all);
|
||||
arena_decay(tsdn, tarenas[i], false,
|
||||
all);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1778,7 +1831,7 @@ arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
|
||||
malloc_mutex_unlock(tsdn, &ctl_mtx);
|
||||
|
||||
if (tarena != NULL) {
|
||||
arena_decay(tsdn, tarena, all);
|
||||
arena_decay(tsdn, tarena, false, all);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1837,6 +1890,35 @@ label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
|
||||
/* Temporarily disable the background thread during arena reset. */
|
||||
if (have_background_thread) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
if (background_thread_enabled()) {
|
||||
unsigned ind = arena_ind % ncpus;
|
||||
background_thread_info_t *info =
|
||||
&background_thread_info[ind];
|
||||
assert(info->started);
|
||||
background_threads_disable_single(tsd, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
|
||||
if (have_background_thread) {
|
||||
if (background_thread_enabled()) {
|
||||
unsigned ind = arena_ind % ncpus;
|
||||
background_thread_info_t *info =
|
||||
&background_thread_info[ind];
|
||||
assert(!info->started);
|
||||
background_thread_create(tsd, ind);
|
||||
}
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen) {
|
||||
@@ -1850,7 +1932,9 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
return ret;
|
||||
}
|
||||
|
||||
arena_reset_prepare_background_thread(tsd, arena_ind);
|
||||
arena_reset(tsd, arena);
|
||||
arena_reset_finish_background_thread(tsd, arena_ind);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1875,9 +1959,10 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
arena_reset_prepare_background_thread(tsd, arena_ind);
|
||||
/* Merge stats after resetting and purging arena. */
|
||||
arena_reset(tsd, arena);
|
||||
arena_decay(tsd_tsdn(tsd), arena, true);
|
||||
arena_decay(tsd_tsdn(tsd), arena, false, true);
|
||||
ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
|
||||
ctl_darena->initialized = true;
|
||||
ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
|
||||
@@ -1888,6 +1973,7 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
/* Record arena index for later recycling via arenas.create. */
|
||||
ql_elm_new(ctl_arena, destroyed_link);
|
||||
ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
|
||||
arena_reset_finish_background_thread(tsd, arena_ind);
|
||||
|
||||
assert(ret == 0);
|
||||
label_return:
|
||||
|
@@ -420,7 +420,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
|
||||
/* Actually initialize the arena. */
|
||||
arena = arena_new(tsdn, ind, extent_hooks);
|
||||
arena_set(ind, arena);
|
||||
|
||||
return arena;
|
||||
}
|
||||
|
||||
@@ -1140,6 +1140,8 @@ malloc_conf_init(void) {
|
||||
}
|
||||
continue;
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_background_thread,
|
||||
"background_thread");
|
||||
if (config_prof) {
|
||||
CONF_HANDLE_BOOL(opt_prof, "prof")
|
||||
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
||||
@@ -1380,6 +1382,22 @@ malloc_init_narenas(void) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_init_background_threads(tsd_t *tsd) {
|
||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &init_lock);
|
||||
if (!have_background_thread) {
|
||||
if (opt_background_thread) {
|
||||
malloc_printf("<jemalloc>: option background_thread "
|
||||
"currently supports pthread only. \n");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return background_threads_init(tsd);
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_init_hard_finish(void) {
|
||||
if (malloc_mutex_boot())
|
||||
@@ -1421,8 +1439,8 @@ malloc_init_hard(void) {
|
||||
}
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
|
||||
|
||||
/* Need this before prof_boot2 (for allocation). */
|
||||
if (malloc_init_narenas()) {
|
||||
/* Initialize narenas before prof_boot2 (for allocation). */
|
||||
if (malloc_init_narenas() || malloc_init_background_threads(tsd)) {
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
||||
return true;
|
||||
}
|
||||
@@ -1439,6 +1457,23 @@ malloc_init_hard(void) {
|
||||
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
||||
malloc_tsd_boot1();
|
||||
|
||||
/* Update TSD after tsd_boot1. */
|
||||
tsd = tsd_fetch();
|
||||
if (opt_background_thread) {
|
||||
assert(have_background_thread);
|
||||
/*
|
||||
* Need to finish init & unlock first before creating background
|
||||
* threads (pthread_create depends on malloc).
|
||||
*/
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
bool err = background_thread_create(tsd, 0);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -2970,7 +3005,13 @@ _malloc_prefork(void)
|
||||
ctl_prefork(tsd_tsdn(tsd));
|
||||
tcache_prefork(tsd_tsdn(tsd));
|
||||
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
|
||||
if (have_background_thread) {
|
||||
background_thread_prefork0(tsd_tsdn(tsd));
|
||||
}
|
||||
prof_prefork0(tsd_tsdn(tsd));
|
||||
if (have_background_thread) {
|
||||
background_thread_prefork1(tsd_tsdn(tsd));
|
||||
}
|
||||
/* Break arena prefork into stages to preserve lock order. */
|
||||
for (i = 0; i < 7; i++) {
|
||||
for (j = 0; j < narenas; j++) {
|
||||
@@ -3036,6 +3077,9 @@ _malloc_postfork(void)
|
||||
}
|
||||
}
|
||||
prof_postfork_parent(tsd_tsdn(tsd));
|
||||
if (have_background_thread) {
|
||||
background_thread_postfork_parent(tsd_tsdn(tsd));
|
||||
}
|
||||
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
|
||||
tcache_postfork_parent(tsd_tsdn(tsd));
|
||||
ctl_postfork_parent(tsd_tsdn(tsd));
|
||||
@@ -3060,6 +3104,9 @@ jemalloc_postfork_child(void) {
|
||||
}
|
||||
}
|
||||
prof_postfork_child(tsd_tsdn(tsd));
|
||||
if (have_background_thread) {
|
||||
background_thread_postfork_child(tsd_tsdn(tsd));
|
||||
}
|
||||
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
|
||||
tcache_postfork_child(tsd_tsdn(tsd));
|
||||
ctl_postfork_child(tsd_tsdn(tsd));
|
||||
|
19
src/mutex.c
19
src/mutex.c
@@ -5,10 +5,6 @@
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/malloc_io.h"
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
#include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
#ifndef _CRT_SPINCOUNT
|
||||
#define _CRT_SPINCOUNT 4000
|
||||
#endif
|
||||
@@ -24,10 +20,6 @@ static bool postpone_init = true;
|
||||
static malloc_mutex_t *postponed_mutexes = NULL;
|
||||
#endif
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
static void pthread_create_once(void);
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* We intercept pthread_create() calls in order to toggle isthreaded if the
|
||||
@@ -35,18 +27,9 @@ static void pthread_create_once(void);
|
||||
*/
|
||||
|
||||
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
|
||||
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
|
||||
void *(*)(void *), void *__restrict);
|
||||
|
||||
static void
|
||||
pthread_create_once(void) {
|
||||
pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
|
||||
if (pthread_create_fptr == NULL) {
|
||||
malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
|
||||
"\"pthread_create\")\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
pthread_create_fptr = load_pthread_create_fptr();
|
||||
isthreaded = true;
|
||||
}
|
||||
|
||||
|
@@ -816,6 +816,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
OPT_WRITE_CHAR_P(dss, ",")
|
||||
OPT_WRITE_UNSIGNED(narenas, ",")
|
||||
OPT_WRITE_CHAR_P(percpu_arena, ",")
|
||||
OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread, ",")
|
||||
OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms, ",")
|
||||
OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms, ",")
|
||||
OPT_WRITE_CHAR_P(junk, ",")
|
||||
|
Reference in New Issue
Block a user