Refactor arena->decay_* into arena->decay.* (arena_decay_t).

This commit is contained in:
Jason Evans 2016-10-10 20:32:19 -07:00
parent b732c395b7
commit 94e7ffa979
2 changed files with 91 additions and 84 deletions

View File

@ -42,6 +42,7 @@ typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
typedef struct arena_tdata_s arena_tdata_t;
@ -257,6 +258,56 @@ struct arena_bin_info_s {
uint32_t reg0_offset;
};
struct arena_decay_s {
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t time;
/* decay_time / SMOOTHSTEP_NSTEPS. */
nstime_t interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t epoch;
/* decay_deadline randomness generator. */
uint64_t jitter_state;
/*
* Deadline for current epoch. This is the sum of decay_interval and
* per epoch jitter which is a uniform random variable in
* [0..decay_interval). Epochs always advance by precise multiples of
* decay_interval, but we randomize the deadline to reduce the
* likelihood of arenas purging in lockstep.
*/
nstime_t deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between decay_ndirty and ndirty to
* determine how many dirty pages, if any, were generated, and record
* the result in decay_backlog.
*/
size_t ndirty;
/*
* Memoized result of arena_decay_backlog_npages_limit() corresponding
* to the current contents of decay_backlog, i.e. the limit on how many
* pages are allowed to exist for the decay epochs.
*/
size_t backlog_npages_limit;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to decay_epoch.
*/
size_t backlog[SMOOTHSTEP_NSTEPS];
};
struct arena_bin_s {
/*
* All operations on runcur, runs, and stats require that lock be
@ -394,52 +445,8 @@ struct arena_s {
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
* and/or reused.
*/
ssize_t decay_time;
/* decay_time / SMOOTHSTEP_NSTEPS. */
nstime_t decay_interval;
/*
* Time at which the current decay interval logically started. We do
* not actually advance to a new epoch until sometime after it starts
* because of scheduling and computation delays, and it is even possible
* to completely skip epochs. In all cases, during epoch advancement we
* merge all relevant activity into the most recently recorded epoch.
*/
nstime_t decay_epoch;
/* decay_deadline randomness generator. */
uint64_t decay_jitter_state;
/*
* Deadline for current epoch. This is the sum of decay_interval and
* per epoch jitter which is a uniform random variable in
* [0..decay_interval). Epochs always advance by precise multiples of
* decay_interval, but we randomize the deadline to reduce the
* likelihood of arenas purging in lockstep.
*/
nstime_t decay_deadline;
/*
* Number of dirty pages at beginning of current epoch. During epoch
* advancement we use the delta between decay_ndirty and ndirty to
* determine how many dirty pages, if any, were generated, and record
* the result in decay_backlog.
*/
size_t decay_ndirty;
/*
* Memoized result of arena_decay_backlog_npages_limit() corresponding
* to the current contents of decay_backlog, i.e. the limit on how many
* pages are allowed to exist for the decay epochs.
*/
size_t decay_backlog_npages_limit;
/*
* Trailing log of how many unused dirty pages were generated during
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
* element is the most recent epoch. Corresponding epoch times are
* relative to decay_epoch.
*/
size_t decay_backlog[SMOOTHSTEP_NSTEPS];
/* Decay-based purging state. */
arena_decay_t decay;
/* Extant huge allocations. */
ql_head(extent_node_t) huge;

View File

@ -1187,14 +1187,14 @@ arena_decay_deadline_init(arena_t *arena)
* Generate a new deadline that is uniformly random within the next
* epoch after the current one.
*/
nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
nstime_add(&arena->decay_deadline, &arena->decay_interval);
if (arena->decay_time > 0) {
nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
nstime_add(&arena->decay.deadline, &arena->decay.interval);
if (arena->decay.time > 0) {
nstime_t jitter;
nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
nstime_ns(&arena->decay_interval)));
nstime_add(&arena->decay_deadline, &jitter);
nstime_init(&jitter, prng_range(&arena->decay.jitter_state,
nstime_ns(&arena->decay.interval)));
nstime_add(&arena->decay.deadline, &jitter);
}
}
@ -1204,7 +1204,7 @@ arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
assert(opt_purge == purge_mode_decay);
return (nstime_compare(&arena->decay_deadline, time) <= 0);
return (nstime_compare(&arena->decay.deadline, time) <= 0);
}
static size_t
@ -1229,7 +1229,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
*/
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay_backlog[i] * h_steps[i];
sum += arena->decay.backlog[i] * h_steps[i];
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
@ -1246,39 +1246,39 @@ arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
assert(arena_decay_deadline_reached(arena, time));
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay_epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
nstime_subtract(&delta, &arena->decay.epoch);
nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
assert(nadvance_u64 > 0);
/* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay_interval);
nstime_copy(&delta, &arena->decay.interval);
nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay_epoch, &delta);
nstime_add(&arena->decay.epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
size_t nadvance_z = (size_t)nadvance_u64;
assert((uint64_t)nadvance_z == nadvance_u64);
memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
(SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
if (nadvance_z > 1) {
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
arena->decay_ndirty : 0;
arena->decay_ndirty = arena->ndirty;
arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
arena->decay_backlog_npages_limit =
ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? arena->ndirty -
arena->decay.ndirty : 0;
arena->decay.ndirty = arena->ndirty;
arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
arena->decay.backlog_npages_limit =
arena_decay_backlog_npages_limit(arena);
}
@ -1289,11 +1289,11 @@ arena_decay_npages_limit(arena_t *arena)
assert(opt_purge == purge_mode_decay);
npages_limit = arena->decay_backlog_npages_limit;
npages_limit = arena->decay.backlog_npages_limit;
/* Add in any dirty pages created during the current epoch. */
if (arena->ndirty > arena->decay_ndirty)
npages_limit += arena->ndirty - arena->decay_ndirty;
if (arena->ndirty > arena->decay.ndirty)
npages_limit += arena->ndirty - arena->decay.ndirty;
return (npages_limit);
}
@ -1302,19 +1302,19 @@ static void
arena_decay_init(arena_t *arena, ssize_t decay_time)
{
arena->decay_time = decay_time;
arena->decay.time = decay_time;
if (decay_time > 0) {
nstime_init2(&arena->decay_interval, decay_time, 0);
nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
nstime_init2(&arena->decay.interval, decay_time, 0);
nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
}
nstime_init(&arena->decay_epoch, 0);
nstime_update(&arena->decay_epoch);
arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
nstime_init(&arena->decay.epoch, 0);
nstime_update(&arena->decay.epoch);
arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
arena_decay_deadline_init(arena);
arena->decay_ndirty = arena->ndirty;
arena->decay_backlog_npages_limit = 0;
memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
arena->decay.ndirty = arena->ndirty;
arena->decay.backlog_npages_limit = 0;
memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
}
static bool
@ -1334,7 +1334,7 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
ssize_t decay_time;
malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay_time;
decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
@ -1400,16 +1400,16 @@ arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
assert(opt_purge == purge_mode_decay);
/* Purge all or nothing if the option is disabled. */
if (arena->decay_time <= 0) {
if (arena->decay_time == 0)
if (arena->decay.time <= 0) {
if (arena->decay.time == 0)
arena_purge_to_limit(tsdn, arena, 0);
return;
}
nstime_copy(&time, &arena->decay_epoch);
nstime_copy(&time, &arena->decay.epoch);
if (unlikely(nstime_update(&time))) {
/* Time went backwards. Force an epoch advance. */
nstime_copy(&time, &arena->decay_deadline);
nstime_copy(&time, &arena->decay.deadline);
}
if (arena_decay_deadline_reached(arena, &time))
@ -3323,7 +3323,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay_time;
*decay_time = arena->decay.time;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
}