Introduce decay module and put decay objects in PA
This commit is contained in:
committed by
David Goldblatt
parent
497836dbc8
commit
7b62885476
@@ -134,8 +134,8 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.decay_dirty.mtx);
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.decay_muzzy.mtx);
|
||||
|
||||
arena_decay_ticks(tsdn, arena, 1);
|
||||
}
|
||||
|
@@ -15,69 +15,8 @@
|
||||
#include "jemalloc/internal/pa.h"
|
||||
#include "jemalloc/internal/ql.h"
|
||||
#include "jemalloc/internal/sc.h"
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
struct arena_decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* True if a thread is currently purging the extents associated with
|
||||
* this decay structure.
|
||||
*/
|
||||
bool purging;
|
||||
/*
|
||||
* Approximate time in milliseconds from the creation of a set of unused
|
||||
* dirty pages until an equivalent set of unused dirty pages is purged
|
||||
* and/or reused.
|
||||
*/
|
||||
atomic_zd_t time_ms;
|
||||
/* time / SMOOTHSTEP_NSTEPS. */
|
||||
nstime_t interval;
|
||||
/*
|
||||
* Time at which the current decay interval logically started. We do
|
||||
* not actually advance to a new epoch until sometime after it starts
|
||||
* because of scheduling and computation delays, and it is even possible
|
||||
* to completely skip epochs. In all cases, during epoch advancement we
|
||||
* merge all relevant activity into the most recently recorded epoch.
|
||||
*/
|
||||
nstime_t epoch;
|
||||
/* Deadline randomness generator. */
|
||||
uint64_t jitter_state;
|
||||
/*
|
||||
* Deadline for current epoch. This is the sum of interval and per
|
||||
* epoch jitter which is a uniform random variable in [0..interval).
|
||||
* Epochs always advance by precise multiples of interval, but we
|
||||
* randomize the deadline to reduce the likelihood of arenas purging in
|
||||
* lockstep.
|
||||
*/
|
||||
nstime_t deadline;
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
|
||||
* if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
* Trailing log of how many unused dirty pages were generated during
|
||||
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
||||
* element is the most recent epoch. Corresponding epoch times are
|
||||
* relative to epoch.
|
||||
*/
|
||||
size_t backlog[SMOOTHSTEP_NSTEPS];
|
||||
|
||||
/*
|
||||
* Pointer to associated stats. These stats are embedded directly in
|
||||
* the arena's stats due to how stats structures are shared between the
|
||||
* arena and ctl code.
|
||||
*
|
||||
* Synchronization: Same as associated arena's stats field. */
|
||||
pa_shard_decay_stats_t *stats;
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
struct arena_s {
|
||||
/*
|
||||
* Number of threads currently assigned to this arena. Each thread has
|
||||
@@ -147,15 +86,6 @@ struct arena_s {
|
||||
/* The page-level allocator shard this arena uses. */
|
||||
pa_shard_t pa_shard;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
* transitions.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
arena_decay_t decay_dirty; /* dirty --> muzzy */
|
||||
arena_decay_t decay_muzzy; /* muzzy --> retained */
|
||||
|
||||
/*
|
||||
* bins is used to store heaps of free regions.
|
||||
*
|
||||
|
@@ -13,7 +13,7 @@ bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
|
||||
bool background_threads_enable(tsd_t *tsd);
|
||||
bool background_threads_disable(tsd_t *tsd);
|
||||
void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_decay_t *decay, size_t npages_new);
|
||||
decay_t *decay, size_t npages_new);
|
||||
void background_thread_prefork0(tsdn_t *tsdn);
|
||||
void background_thread_prefork1(tsdn_t *tsdn);
|
||||
void background_thread_postfork_parent(tsdn_t *tsdn);
|
||||
|
@@ -55,7 +55,7 @@ arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_background_thread_info_get(arena);
|
||||
if (background_thread_indefinite_sleep(info)) {
|
||||
background_thread_interval_check(tsdn, arena,
|
||||
&arena->decay_dirty, 0);
|
||||
&arena->pa_shard.decay_dirty, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
66
include/jemalloc/internal/decay.h
Normal file
66
include/jemalloc/internal/decay.h
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef JEMALLOC_INTERNAL_DECAY_H
|
||||
#define JEMALLOC_INTERNAL_DECAY_H
|
||||
|
||||
#include "jemalloc/internal/smoothstep.h"
|
||||
|
||||
/*
|
||||
* The decay_t computes the number of pages we should purge at any given time.
|
||||
* Page allocators inform a decay object when pages enter a decay-able state
|
||||
* (i.e. dirty or muzzy), and query it to determine how many pages should be
|
||||
* purged at any given time.
|
||||
*/
|
||||
typedef struct decay_s decay_t;
|
||||
struct decay_s {
|
||||
/* Synchronizes all non-atomic fields. */
|
||||
malloc_mutex_t mtx;
|
||||
/*
|
||||
* True if a thread is currently purging the extents associated with
|
||||
* this decay structure.
|
||||
*/
|
||||
bool purging;
|
||||
/*
|
||||
* Approximate time in milliseconds from the creation of a set of unused
|
||||
* dirty pages until an equivalent set of unused dirty pages is purged
|
||||
* and/or reused.
|
||||
*/
|
||||
atomic_zd_t time_ms;
|
||||
/* time / SMOOTHSTEP_NSTEPS. */
|
||||
nstime_t interval;
|
||||
/*
|
||||
* Time at which the current decay interval logically started. We do
|
||||
* not actually advance to a new epoch until sometime after it starts
|
||||
* because of scheduling and computation delays, and it is even possible
|
||||
* to completely skip epochs. In all cases, during epoch advancement we
|
||||
* merge all relevant activity into the most recently recorded epoch.
|
||||
*/
|
||||
nstime_t epoch;
|
||||
/* Deadline randomness generator. */
|
||||
uint64_t jitter_state;
|
||||
/*
|
||||
* Deadline for current epoch. This is the sum of interval and per
|
||||
* epoch jitter which is a uniform random variable in [0..interval).
|
||||
* Epochs always advance by precise multiples of interval, but we
|
||||
* randomize the deadline to reduce the likelihood of arenas purging in
|
||||
* lockstep.
|
||||
*/
|
||||
nstime_t deadline;
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
|
||||
* if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
/*
|
||||
* Trailing log of how many unused dirty pages were generated during
|
||||
* each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
|
||||
* element is the most recent epoch. Corresponding epoch times are
|
||||
* relative to epoch.
|
||||
*/
|
||||
size_t backlog[SMOOTHSTEP_NSTEPS];
|
||||
|
||||
/* Peak number of pages in associated extents. Used for debug only. */
|
||||
uint64_t ceil_npages;
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_DECAY_H */
|
@@ -1,6 +1,7 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PA_H
|
||||
#define JEMALLOC_INTERNAL_PA_H
|
||||
|
||||
#include "jemalloc/internal/decay.h"
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/edata_cache.h"
|
||||
#include "jemalloc/internal/lockedint.h"
|
||||
@@ -10,6 +11,16 @@
|
||||
* allocations.
|
||||
*/
|
||||
|
||||
typedef struct pa_shard_decay_stats_s pa_shard_decay_stats_t;
|
||||
struct pa_shard_decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
locked_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
locked_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
locked_u64_t purged;
|
||||
};
|
||||
|
||||
/*
|
||||
* The stats for a particular pa_shard. Because of the way the ctl module
|
||||
* handles stats epoch data collection (it has its own arena_stats, and merges
|
||||
@@ -21,16 +32,6 @@
|
||||
* are the ones that are not maintained on their own; instead, their values are
|
||||
* derived during those stats merges.
|
||||
*/
|
||||
typedef struct pa_shard_decay_stats_s pa_shard_decay_stats_t;
|
||||
struct pa_shard_decay_stats_s {
|
||||
/* Total number of purge sweeps. */
|
||||
locked_u64_t npurge;
|
||||
/* Total number of madvise calls made. */
|
||||
locked_u64_t nmadvise;
|
||||
/* Total number of pages purged. */
|
||||
locked_u64_t purged;
|
||||
};
|
||||
|
||||
typedef struct pa_shard_stats_s pa_shard_stats_t;
|
||||
struct pa_shard_stats_s {
|
||||
pa_shard_decay_stats_t decay_dirty;
|
||||
@@ -70,6 +71,15 @@ struct pa_shard_s {
|
||||
|
||||
malloc_mutex_t *stats_mtx;
|
||||
pa_shard_stats_t *stats;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
* transitions.
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
decay_t decay_dirty; /* dirty --> muzzy */
|
||||
decay_t decay_muzzy; /* muzzy --> retained */
|
||||
};
|
||||
|
||||
static inline void
|
||||
|
Reference in New Issue
Block a user