Refactor arena->decay_* into arena->decay.* (arena_decay_t).

This commit is contained in:
Jason Evans 2016-10-10 20:32:19 -07:00
parent e0164bc63c
commit ee0c74b77a
2 changed files with 91 additions and 84 deletions

View File

@ -31,6 +31,7 @@ typedef enum {
typedef struct arena_slab_data_s arena_slab_data_t; typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_bin_info_s arena_bin_info_t; typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t; typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t; typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t; typedef struct arena_tdata_s arena_tdata_t;
@ -89,6 +90,56 @@ struct arena_bin_info_s {
bitmap_info_t bitmap_info; bitmap_info_t bitmap_info;
}; };
struct arena_decay_s {
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t time;
/* decay_time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* decay_deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of decay_interval and
* per epoch jitter which is a uniform random variable in
* [0..decay_interval). Epochs always advance by precise multiples of
* decay_interval, but we randomize the deadline to reduce the
* likelihood of arenas purging in lockstep.
*/
nstime_t deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between decay_ndirty and ndirty to
* determine how many dirty pages, if any, were generated, and record
* the result in decay_backlog.
*/
size_t ndirty;
/*
* Memoized result of arena_decay_backlog_npages_limit() corresponding
* to the current contents of decay_backlog, i.e. the limit on how many
* pages are allowed to exist for the decay epochs.
*/
size_t backlog_npages_limit;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to decay_epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
};
struct arena_bin_s { struct arena_bin_s {
/* All operations on arena_bin_t fields require lock ownership. */ /* All operations on arena_bin_t fields require lock ownership. */
malloc_mutex_t lock; malloc_mutex_t lock;
@ -176,52 +227,8 @@ struct arena_s {
*/ */
size_t ndirty; size_t ndirty;
/* /* Decay-based purging state. */
* Approximate time in seconds from the creation of a set of unused arena_decay_t decay;
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t decay_time;
/* decay_time / SMOOTHSTEP_NSTEPS. */
nstime_t decay_interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t decay_epoch;
/* decay_deadline randomness generator. */
uint64_t decay_jitter_state;
/*
* Deadline for current epoch. This is the sum of decay_interval and
* per epoch jitter which is a uniform random variable in
* [0..decay_interval). Epochs always advance by precise multiples of
* decay_interval, but we randomize the deadline to reduce the
* likelihood of arenas purging in lockstep.
*/
nstime_t decay_deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between decay_ndirty and ndirty to
* determine how many dirty pages, if any, were generated, and record
* the result in decay_backlog.
*/
size_t decay_ndirty;
/*
* Memoized result of arena_decay_backlog_npages_limit() corresponding
* to the current contents of decay_backlog, i.e. the limit on how many
* pages are allowed to exist for the decay epochs.
*/
size_t decay_backlog_npages_limit;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to decay_epoch.
*/
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
/* Extant large allocations. */ /* Extant large allocations. */
ql_head(extent_t) large; ql_head(extent_t) large;

View File

@ -474,14 +474,14 @@ arena_decay_deadline_init(arena_t *arena)
* Generate a new deadline that is uniformly random within the next * Generate a new deadline that is uniformly random within the next
* epoch after the current one. * epoch after the current one.
*/ */
nstime_copy(&arena->decay_deadline, &arena->decay_epoch); nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
nstime_add(&arena->decay_deadline, &arena->decay_interval); nstime_add(&arena->decay.deadline, &arena->decay.interval);
if (arena->decay_time > 0) { if (arena->decay.time > 0) {
nstime_t jitter; nstime_t jitter;
nstime_init(&jitter, prng_range(&arena->decay_jitter_state, nstime_init(&jitter, prng_range(&arena->decay.jitter_state,
nstime_ns(&arena->decay_interval), false)); nstime_ns(&arena->decay.interval), false));
nstime_add(&arena->decay_deadline, &jitter); nstime_add(&arena->decay.deadline, &jitter);
} }
} }
@ -491,7 +491,7 @@ arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
assert(opt_purge == purge_mode_decay); assert(opt_purge == purge_mode_decay);
return (nstime_compare(&arena->decay_deadline, time) <= 0); return (nstime_compare(&arena->decay.deadline, time) <= 0);
} }
static size_t static size_t
@ -516,7 +516,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
*/ */
sum = 0; sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay_backlog[i] * h_steps[i]; sum += arena->decay.backlog[i] * h_steps[i];
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog); return (npages_limit_backlog);
@ -533,39 +533,39 @@ arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
assert(arena_decay_deadline_reached(arena, time)); assert(arena_decay_deadline_reached(arena, time));
nstime_copy(&delta, time); nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay_epoch); nstime_subtract(&delta, &arena->decay.epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay_interval); nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
assert(nadvance_u64 > 0); assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */ /* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay_interval); nstime_copy(&delta, &arena->decay.interval);
nstime_imultiply(&delta, nadvance_u64); nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay_epoch, &delta); nstime_add(&arena->decay.epoch, &delta);
/* Set a new deadline. */ /* Set a new deadline. */
arena_decay_deadline_init(arena); arena_decay_deadline_init(arena);
/* Update the backlog. */ /* Update the backlog. */
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t)); sizeof(size_t));
} else { } else {
size_t nadvance_z = (size_t)nadvance_u64; size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64); assert((uint64_t)nadvance_z == nadvance_u64);
memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z], memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) { if (nadvance_z > 1) {
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
} }
} }
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? arena->ndirty -
arena->decay_ndirty : 0; arena->decay.ndirty : 0;
arena->decay_ndirty = arena->ndirty; arena->decay.ndirty = arena->ndirty;
arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
arena->decay_backlog_npages_limit = arena->decay.backlog_npages_limit =
arena_decay_backlog_npages_limit(arena); arena_decay_backlog_npages_limit(arena);
} }
@ -576,11 +576,11 @@ arena_decay_npages_limit(arena_t *arena)
assert(opt_purge == purge_mode_decay); assert(opt_purge == purge_mode_decay);
npages_limit = arena->decay_backlog_npages_limit; npages_limit = arena->decay.backlog_npages_limit;
/* Add in any dirty pages created during the current epoch. */ /* Add in any dirty pages created during the current epoch. */
if (arena->ndirty > arena->decay_ndirty) if (arena->ndirty > arena->decay.ndirty)
npages_limit += arena->ndirty - arena->decay_ndirty; npages_limit += arena->ndirty - arena->decay.ndirty;
return (npages_limit); return (npages_limit);
} }
@ -589,19 +589,19 @@ static void
arena_decay_init(arena_t *arena, ssize_t decay_time) arena_decay_init(arena_t *arena, ssize_t decay_time)
{ {
arena->decay_time = decay_time; arena->decay.time = decay_time;
if (decay_time > 0) { if (decay_time > 0) {
nstime_init2(&arena->decay_interval, decay_time, 0); nstime_init2(&arena->decay.interval, decay_time, 0);
nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
} }
nstime_init(&arena->decay_epoch, 0); nstime_init(&arena->decay.epoch, 0);
nstime_update(&arena->decay_epoch); nstime_update(&arena->decay.epoch);
arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena); arena_decay_deadline_init(arena);
arena->decay_ndirty = arena->ndirty; arena->decay.ndirty = arena->ndirty;
arena->decay_backlog_npages_limit = 0; arena->decay.backlog_npages_limit = 0;
memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
} }
static bool static bool
@ -621,7 +621,7 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
ssize_t decay_time; ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay_time; decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time); return (decay_time);
@ -687,16 +687,16 @@ arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
assert(opt_purge == purge_mode_decay); assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */ /* Purge all or nothing if the option is disabled. */
if (arena->decay_time <= 0) { if (arena->decay.time <= 0) {
if (arena->decay_time == 0) if (arena->decay.time == 0)
arena_purge_to_limit(tsdn, arena, 0); arena_purge_to_limit(tsdn, arena, 0);
return; return;
} }
nstime_copy(&time, &arena->decay_epoch); nstime_copy(&time, &arena->decay.epoch);
if (unlikely(nstime_update(&time))) { if (unlikely(nstime_update(&time))) {
/* Time went backwards. Force an epoch advance. */ /* Time went backwards. Force an epoch advance. */
nstime_copy(&time, &arena->decay_deadline); nstime_copy(&time, &arena->decay.deadline);
} }
if (arena_decay_deadline_reached(arena, &time)) if (arena_decay_deadline_reached(arena, &time))
@ -1671,7 +1671,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
*nthreads += arena_nthreads_get(arena, false); *nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec]; *dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult; *lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay_time; *decay_time = arena->decay.time;
*nactive += arena->nactive; *nactive += arena->nactive;
*ndirty += arena->ndirty; *ndirty += arena->ndirty;
} }