Allow PAI to calculate time until deferred work
Previously the calculation of sleep time between wakeups was implemented within background_thread. This resulted in some parts of decay and hpa specific logic mixing with background thread implementation. In this change, background thread delegates this calculation to arena and it, in turn, delegates it to PAI. The next step is to implement the actual calculation of time until deferred work in HPA.
This commit is contained in:
committed by
Alexander Lapenkov
parent
26140dd246
commit
b8b8027f19
@@ -8,6 +8,12 @@
|
||||
#include "jemalloc/internal/pages.h"
|
||||
#include "jemalloc/internal/stats.h"
|
||||
|
||||
/*
|
||||
* When the amount of pages to be purged exceeds this amount, deferred purge
|
||||
* should happen.
|
||||
*/
|
||||
#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
|
||||
|
||||
extern ssize_t opt_dirty_decay_ms;
|
||||
extern ssize_t opt_muzzy_decay_ms;
|
||||
|
||||
@@ -16,7 +22,6 @@ extern const char *percpu_arena_mode_names[];
|
||||
|
||||
extern div_info_t arena_binind_div_info[SC_NBINS];
|
||||
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
extern emap_t arena_emap_global;
|
||||
|
||||
@@ -51,6 +56,7 @@ bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
|
||||
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
|
||||
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||
bool all);
|
||||
uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
|
@@ -13,6 +13,9 @@ extern background_thread_info_t *background_thread_info;
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||
bool background_threads_enable(tsd_t *tsd);
|
||||
bool background_threads_disable(tsd_t *tsd);
|
||||
bool background_thread_running(background_thread_info_t* info);
|
||||
void background_thread_wakeup_early(background_thread_info_t *info,
|
||||
nstime_t *remaining_sleep);
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
decay_t *decay, size_t npages_new);
|
||||
void background_thread_prefork0(tsdn_t *tsdn);
|
||||
|
@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
|
||||
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
|
||||
bool is_background_thread) {
|
||||
if (!background_thread_enabled() || is_background_thread) {
|
||||
return;
|
||||
}
|
||||
background_thread_info_t *info =
|
||||
arena_background_thread_info_get(arena);
|
||||
if (background_thread_indefinite_sleep(info)) {
|
||||
background_thread_interval_check(tsdn, arena,
|
||||
&arena->pa_shard.pac.decay_dirty, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
|
||||
|
@@ -19,6 +19,9 @@
|
||||
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
|
||||
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
|
||||
|
||||
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
|
||||
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_C(-1)
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
||||
background_thread_started,
|
||||
|
@@ -118,6 +118,25 @@ decay_epoch_duration_ns(const decay_t *decay) {
|
||||
return nstime_ns(&decay->interval);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
decay_immediately(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms == 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
decay_disabled(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms < 0;
|
||||
}
|
||||
|
||||
/* Returns true if decay is enabled and done gradually. */
|
||||
static inline bool
|
||||
decay_gradually(const decay_t *decay) {
|
||||
ssize_t decay_ms = decay_ms_read(decay);
|
||||
return decay_ms > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the passed in decay time setting is valid.
|
||||
* < -1 : invalid
|
||||
@@ -144,6 +163,12 @@ bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
|
||||
*/
|
||||
void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
|
||||
|
||||
/*
|
||||
* Compute how many of 'npages_new' pages we would need to purge in 'time'.
|
||||
*/
|
||||
uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
|
||||
size_t npages_new);
|
||||
|
||||
/* Returns true if the epoch advanced and there are pages to purge. */
|
||||
bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
|
||||
size_t current_npages);
|
||||
|
@@ -200,6 +200,8 @@ ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
|
||||
void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
|
||||
bool deferral_allowed);
|
||||
void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
|
@@ -24,6 +24,7 @@ struct pai_s {
|
||||
/* This function empties out list as a side-effect of being called. */
|
||||
void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list);
|
||||
uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -64,6 +65,11 @@ pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list) {
|
||||
self->dalloc_batch(tsdn, self, list);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
return self->time_until_deferred_work(tsdn, self);
|
||||
}
|
||||
|
||||
/*
|
||||
* An implementation of batch allocation that simply calls alloc once for
|
||||
* each item in the list.
|
||||
|
Reference in New Issue
Block a user