Wake up background threads on demand
This change allows every allocator conforming to PAI communicate that it deferred some work for the future. Without it if a background thread goes into indefinite sleep, there is no way to notify it about upcoming deferred work.
This commit is contained in:
committed by
Alexander Lapenkov
parent
97da57c13a
commit
8229cc77c5
@@ -42,7 +42,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
||||
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
||||
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
|
||||
void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
||||
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool zero);
|
||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||
|
@@ -13,7 +13,7 @@ extern background_thread_info_t *background_thread_info;
|
||||
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||
bool background_threads_enable(tsd_t *tsd);
|
||||
bool background_threads_disable(tsd_t *tsd);
|
||||
bool background_thread_running(background_thread_info_t* info);
|
||||
bool background_thread_is_started(background_thread_info_t* info);
|
||||
void background_thread_wakeup_early(background_thread_info_t *info,
|
||||
nstime_t *remaining_sleep);
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
|
@@ -20,7 +20,7 @@
|
||||
#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
|
||||
|
||||
#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
|
||||
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_C(-1)
|
||||
#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
|
||||
|
||||
typedef enum {
|
||||
background_thread_stopped,
|
||||
|
@@ -136,6 +136,11 @@ struct hpa_shard_s {
|
||||
* stats.
|
||||
*/
|
||||
hpa_shard_nonderived_stats_t stats;
|
||||
|
||||
/*
|
||||
* Last time we performed purge on this shard.
|
||||
*/
|
||||
nstime_t last_purge;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -167,16 +167,17 @@ void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/* Gets an edata for the given allocation. */
|
||||
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool slab, szind_t szind, bool zero);
|
||||
size_t alignment, bool slab, szind_t szind, bool zero,
|
||||
bool *deferred_work_generated);
|
||||
/* Returns true on error, in which case nothing changed. */
|
||||
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool zero);
|
||||
size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
|
||||
/*
|
||||
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
|
||||
* false otherwise.
|
||||
*/
|
||||
bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool *generated_dirty);
|
||||
size_t new_size, szind_t szind, bool *deferred_work_generated);
|
||||
/*
|
||||
* Frees the given edata back to the pa. Sets *generated_dirty if we produced
|
||||
* new dirty pages (well, we alwyas set it for now; but this need not be the
|
||||
@@ -185,7 +186,7 @@ bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
* consistent with the shrink pathway and our error codes here).
|
||||
*/
|
||||
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
||||
bool *generated_dirty);
|
||||
bool *deferred_work_generated);
|
||||
bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
|
||||
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
|
||||
ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
|
||||
|
@@ -7,7 +7,7 @@ typedef struct pai_s pai_t;
|
||||
struct pai_s {
|
||||
/* Returns NULL on failure. */
|
||||
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero);
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
/*
|
||||
* Returns the number of extents added to the list (which may be fewer
|
||||
* than requested, in case of OOM). The list should already be
|
||||
@@ -15,15 +15,18 @@ struct pai_s {
|
||||
* the results are not necessarily zeroed.
|
||||
*/
|
||||
size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results);
|
||||
size_t nallocs, edata_list_active_t *results,
|
||||
bool *deferred_work_generated);
|
||||
bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero);
|
||||
size_t old_size, size_t new_size, bool zero,
|
||||
bool *deferred_work_generated);
|
||||
bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size);
|
||||
void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata);
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated);
|
||||
void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated);
|
||||
/* This function empties out list as a side-effect of being called. */
|
||||
void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list);
|
||||
edata_list_active_t *list, bool *deferred_work_generated);
|
||||
uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
|
||||
};
|
||||
|
||||
@@ -33,36 +36,43 @@ struct pai_s {
|
||||
*/
|
||||
|
||||
static inline edata_t *
|
||||
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero) {
|
||||
return self->alloc(tsdn, self, size, alignment, zero);
|
||||
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
return self->alloc(tsdn, self, size, alignment, zero,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
edata_list_active_t *results) {
|
||||
return self->alloc_batch(tsdn, self, size, nallocs, results);
|
||||
edata_list_active_t *results, bool *deferred_work_generated) {
|
||||
return self->alloc_batch(tsdn, self, size, nallocs, results,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero) {
|
||||
return self->expand(tsdn, self, edata, old_size, new_size, zero);
|
||||
size_t new_size, bool zero, bool *deferred_work_generated) {
|
||||
return self->expand(tsdn, self, edata, old_size, new_size, zero,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size) {
|
||||
return self->shrink(tsdn, self, edata, old_size, new_size);
|
||||
size_t new_size, bool *deferred_work_generated) {
|
||||
return self->shrink(tsdn, self, edata, old_size, new_size,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
|
||||
self->dalloc(tsdn, self, edata);
|
||||
pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
self->dalloc(tsdn, self, edata, deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list) {
|
||||
self->dalloc_batch(tsdn, self, list);
|
||||
pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
||||
bool *deferred_work_generated) {
|
||||
self->dalloc_batch(tsdn, self, list, deferred_work_generated);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
@@ -75,9 +85,9 @@ pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
|
||||
* each item in the list.
|
||||
*/
|
||||
size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results);
|
||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||
/* Ditto, for dalloc. */
|
||||
void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list);
|
||||
edata_list_active_t *list, bool *deferred_work_generated);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAI_H */
|
||||
|
Reference in New Issue
Block a user