HPA: Introduce a redesigned hpa_central_t.

For now, this only handles allocating virtual address space to shards, with no
reuse.  This is framework, though; it will change over time.
This commit is contained in:
David Goldblatt
2021-05-07 13:54:26 -07:00
committed by David Goldblatt
parent e09eac1d4e
commit d93eef2f40
10 changed files with 257 additions and 128 deletions

View File

@@ -99,7 +99,7 @@ bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void **ptrs, size_t nfill, bool zero);
void arena_boot(sc_data_t *sc_data);
bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);

View File

@@ -7,6 +7,37 @@
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
typedef struct hpa_central_s hpa_central_t;
struct hpa_central_s {
/*
* The mutex guarding most of the operations on the central data
* structure.
*/
malloc_mutex_t mtx;
/*
* Guards expansion of eden. We separate this from the regular mutex so
* that cheaper operations can still continue while we're doing the OS
* call.
*/
malloc_mutex_t grow_mtx;
/*
* Either NULL (if empty), or some integer multiple of a
* hugepage-aligned number of hugepages. We carve them off one at a
* time to satisfy new pageslab requests.
*
* Guarded by grow_mtx.
*/
void *eden;
size_t eden_len;
/* Source for metadata. */
base_t *base;
/* Number of grow operations done on this hpa_central_t. */
uint64_t age_counter;
/* The HPA hooks. */
hpa_hooks_t hooks;
};
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
struct hpa_shard_nonderived_stats_s {
/*
@@ -52,19 +83,20 @@ struct hpa_shard_s {
* pointer to the hpa_shard_t.
*/
pai_t pai;
malloc_mutex_t grow_mtx;
/* The central allocator we get our hugepages from. */
hpa_central_t *central;
/* Protects most of this shard's state. */
malloc_mutex_t mtx;
/*
* Guards the shard's access to the central allocator (preventing
* multiple threads operating on this shard from accessing the central
* allocator).
*/
malloc_mutex_t grow_mtx;
/* The base metadata allocator. */
base_t *base;
/*
* The HPA hooks for this shard. Eventually, once we have the
* hpa_central_t back, these should live there (since it doesn't make
* sense for different shards on the same hpa_central_t to have
* different hooks).
*/
hpa_hooks_t hooks;
/*
* This edata cache is the one we use when allocating a small extent
* from a pageslab. The pageslab itself comes from the centralized
@@ -81,18 +113,13 @@ struct hpa_shard_s {
*/
uint64_t age_counter;
/*
* Either NULL (if empty), or some integer multiple of a
* hugepage-aligned number of hugepages. We carve them off one at a
* time to satisfy new pageslab requests.
*
* Guarded by grow_mtx.
*/
void *eden;
size_t eden_len;
/* The arena ind we're associated with. */
unsigned ind;
/*
* Our emap. This is just a cache of the emap pointer in the associated
* hpa_central.
*/
emap_t *emap;
/* The configuration choices for this hpa shard. */
@@ -117,8 +144,9 @@ struct hpa_shard_s {
* just that it can function properly given the system it's running on.
*/
bool hpa_supported();
bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, const hpa_hooks_t *hooks,
bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
base_t *base, edata_cache_t *edata_cache, unsigned ind,
const hpa_shard_opts_t *opts);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);

View File

@@ -20,6 +20,11 @@
* others will be coming soon.
*/
typedef struct pa_central_s pa_central_t;
struct pa_central_s {
hpa_central_t hpa;
};
/*
* The stats for a particular pa_shard. Because of the way the ctl module
* handles stats epoch data collection (it has its own arena_stats, and merges
@@ -61,6 +66,9 @@ struct pa_shard_stats_s {
*/
typedef struct pa_shard_s pa_shard_t;
struct pa_shard_s {
/* The central PA this shard is associated with. */
pa_central_t *central;
/*
* Number of pages in active extents.
*
@@ -76,6 +84,7 @@ struct pa_shard_s {
* for those allocations.
*/
atomic_b_t use_hpa;
/*
* If we never used the HPA to begin with, it wasn't initialized, and so
* we shouldn't try to e.g. acquire its mutexes during fork. This
@@ -121,18 +130,21 @@ pa_shard_ehooks_get(pa_shard_t *shard) {
}
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, size_t oversize_threshold, ssize_t dirty_decay_ms,
ssize_t muzzy_decay_ms);
bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
hpa_hooks_t *hpa_hooks);
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
/*
* This isn't exposed to users; we allow late enablement of the HPA shard so
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
const hpa_hooks_t *hpa_hooks, const hpa_shard_opts_t *hpa_opts,
const sec_opts_t *hpa_sec_opts);
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
/*
* We stop using the HPA when custom extent hooks are installed, but still

View File

@@ -52,8 +52,8 @@ enum witness_rank_e {
WITNESS_RANK_EXTENTS,
WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS,
WITNESS_RANK_HPA_GROW,
WITNESS_RANK_HPA,
WITNESS_RANK_HPA_CENTRAL_GROW,
WITNESS_RANK_HPA_CENTRAL,
WITNESS_RANK_EDATA_CACHE,