Remove opt_background_thread_hpa_interval_max_ms
Now that HPA can communicate the time until its deferred work should be done, this option is not used anymore.
This commit is contained in:
parent
8229cc77c5
commit
6e848a005e
@ -2,7 +2,6 @@
|
||||
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
|
||||
|
||||
extern bool opt_background_thread;
|
||||
extern ssize_t opt_background_thread_hpa_interval_max_ms;
|
||||
extern size_t opt_max_background_threads;
|
||||
extern malloc_mutex_t background_thread_lock;
|
||||
extern atomic_b_t background_thread_enabled_state;
|
||||
@ -16,8 +15,6 @@ bool background_threads_disable(tsd_t *tsd);
|
||||
bool background_thread_is_started(background_thread_info_t* info);
|
||||
void background_thread_wakeup_early(background_thread_info_t *info,
|
||||
nstime_t *remaining_sleep);
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
decay_t *decay, size_t npages_new);
|
||||
void background_thread_prefork0(tsdn_t *tsdn);
|
||||
void background_thread_prefork1(tsdn_t *tsdn);
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn);
|
||||
|
@ -13,13 +13,6 @@ JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
/* Read-only after initialization. */
|
||||
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
|
||||
size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
|
||||
/*
|
||||
* This is disabled (and set to -1) if the HPA is. If the HPA is enabled,
|
||||
* malloc_conf initialization sets it to
|
||||
* BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED.
|
||||
*/
|
||||
ssize_t opt_background_thread_hpa_interval_max_ms =
|
||||
BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED;
|
||||
|
||||
/* Used for thread creation, termination and stats. */
|
||||
malloc_mutex_t background_thread_lock;
|
||||
@ -60,7 +53,7 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
|
||||
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
|
||||
bool background_thread_running(background_thread_info_t *info) NOT_REACHED
|
||||
bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
|
||||
void background_thread_wakeup_early(background_thread_info_t *info,
|
||||
nstime_t *remaining_sleep) NOT_REACHED
|
||||
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
|
||||
@ -593,7 +586,7 @@ background_thread_wakeup_early(background_thread_info_t *info,
|
||||
* we know that background thread wakes up soon, so the time to cache
|
||||
* the just freed memory is bounded and low.
|
||||
*/
|
||||
if (remaining_sleep && nstime_ns(remaining_sleep) <
|
||||
if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
|
||||
BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||
return;
|
||||
}
|
||||
|
@ -113,7 +113,6 @@ CTL_PROTO(opt_oversize_threshold)
|
||||
CTL_PROTO(opt_background_thread)
|
||||
CTL_PROTO(opt_mutex_max_spin)
|
||||
CTL_PROTO(opt_max_background_threads)
|
||||
CTL_PROTO(opt_background_thread_hpa_interval_max_ms)
|
||||
CTL_PROTO(opt_dirty_decay_ms)
|
||||
CTL_PROTO(opt_muzzy_decay_ms)
|
||||
CTL_PROTO(opt_stats_print)
|
||||
@ -427,8 +426,6 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
|
||||
{NAME("background_thread"), CTL(opt_background_thread)},
|
||||
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
|
||||
{NAME("background_thread_hpa_interval_max_ms"),
|
||||
CTL(opt_background_thread_hpa_interval_max_ms)},
|
||||
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
|
||||
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
|
||||
{NAME("stats_print"), CTL(opt_stats_print)},
|
||||
@ -2148,8 +2145,6 @@ CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
|
||||
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
|
||||
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
|
||||
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
|
||||
CTL_RO_NL_GEN(opt_background_thread_hpa_interval_max_ms,
|
||||
opt_background_thread_hpa_interval_max_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
|
||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||
|
@ -1416,10 +1416,6 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
CONF_CHECK_MIN, CONF_CHECK_MAX,
|
||||
true);
|
||||
CONF_HANDLE_BOOL(opt_hpa, "hpa")
|
||||
CONF_HANDLE_SSIZE_T(
|
||||
opt_background_thread_hpa_interval_max_ms,
|
||||
"background_thread_hpa_interval_max_ms", -1,
|
||||
SSIZE_MAX)
|
||||
CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
|
||||
"hpa_slab_max_alloc", PAGE, HUGEPAGE,
|
||||
CONF_CHECK_MIN, CONF_CHECK_MAX, true);
|
||||
@ -1658,11 +1654,6 @@ malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
|
||||
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
|
||||
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
|
||||
NULL);
|
||||
if (opt_hpa && opt_background_thread_hpa_interval_max_ms
|
||||
== BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED) {
|
||||
opt_background_thread_hpa_interval_max_ms =
|
||||
BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
#undef MALLOC_CONF_NSOURCES
|
||||
|
36
src/pa.c
36
src/pa.c
@ -245,19 +245,6 @@ pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
pa_shard_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* Use minimal interval if decay is contended. */
|
||||
return BACKGROUND_THREAD_DEFERRED_MIN;
|
||||
}
|
||||
uint64_t result = decay_ns_until_purge(decay, npages,
|
||||
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get time until next deferred work ought to happen. If there are multiple
|
||||
* things that have been deferred, this function calculates the time until
|
||||
@ -265,32 +252,11 @@ pa_shard_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
|
||||
*/
|
||||
uint64_t
|
||||
pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
|
||||
uint64_t time;
|
||||
time = pa_shard_ns_until_purge(tsdn,
|
||||
&shard->pac.decay_dirty,
|
||||
ecache_npages_get(&shard->pac.ecache_dirty));
|
||||
uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
|
||||
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
|
||||
return time;
|
||||
}
|
||||
|
||||
uint64_t muzzy = pa_shard_ns_until_purge(tsdn,
|
||||
&shard->pac.decay_muzzy,
|
||||
ecache_npages_get(&shard->pac.ecache_muzzy));
|
||||
if (muzzy < time) {
|
||||
time = muzzy;
|
||||
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
|
||||
return time;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t pac = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
|
||||
if (pac < time) {
|
||||
time = pac;
|
||||
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
|
||||
return time;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
uint64_t hpa =
|
||||
pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
|
||||
|
31
src/pac.c
31
src/pac.c
@ -208,9 +208,38 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
*deferred_work_generated = true;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
|
||||
if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
|
||||
/* Use minimal interval if decay is contended. */
|
||||
return BACKGROUND_THREAD_DEFERRED_MIN;
|
||||
}
|
||||
uint64_t result = decay_ns_until_purge(decay, npages,
|
||||
ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &decay->mtx);
|
||||
return result;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
return BACKGROUND_THREAD_DEFERRED_MAX;
|
||||
uint64_t time;
|
||||
pac_t *pac = (pac_t *)self;
|
||||
|
||||
time = pac_ns_until_purge(tsdn,
|
||||
&pac->decay_dirty,
|
||||
ecache_npages_get(&pac->ecache_dirty));
|
||||
if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
|
||||
return time;
|
||||
}
|
||||
|
||||
uint64_t muzzy = pac_ns_until_purge(tsdn,
|
||||
&pac->decay_muzzy,
|
||||
ecache_npages_get(&pac->ecache_muzzy));
|
||||
if (muzzy < time) {
|
||||
time = muzzy;
|
||||
}
|
||||
return time;
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -1499,7 +1499,6 @@ stats_general_print(emitter_t *emitter) {
|
||||
OPT_WRITE_CHAR_P("metadata_thp")
|
||||
OPT_WRITE_INT64("mutex_max_spin")
|
||||
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
|
||||
OPT_WRITE_SSIZE_T("background_thread_hpa_interval_max_ms")
|
||||
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
|
||||
OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
|
||||
OPT_WRITE_SIZE_T("lg_extent_max_active_fit")
|
||||
|
@ -77,7 +77,7 @@ wait_until_thread_is_enabled(unsigned arena_id) {
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
sleeping = background_thread_indefinite_sleep(info);
|
||||
assert_d_lt(iterations, (int)1e6,
|
||||
assert_d_lt(iterations, UINT64_C(1000000),
|
||||
"Waiting for a thread to start for too long");
|
||||
} while (!sleeping);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user