PA: Use an SEC in fron of the HPA shard.

This commit is contained in:
David Goldblatt
2020-10-16 13:14:59 -07:00
committed by David Goldblatt
parent ea51e97bb8
commit 6599651aee
15 changed files with 141 additions and 41 deletions

View File

@@ -28,7 +28,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
pac_estats_t *estats, hpa_shard_stats_t *hpastats);
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
#ifdef JEMALLOC_JET
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
@@ -99,6 +99,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);

View File

@@ -46,6 +46,7 @@ typedef struct ctl_arena_stats_s {
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
pac_estats_t estats[SC_NPSIZES];
hpa_shard_stats_t hpastats;
sec_stats_t secstats;
} ctl_arena_stats_t;
typedef struct ctl_stats_s {

View File

@@ -90,10 +90,10 @@ void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
/*
* We share the fork ordering with the PA and arena prefork handling; that's why
* these are 2 and 3 rather than 0 or 1.
* these are 3 and 4 rather than 0 and 1.
*/
void hpa_shard_prefork2(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
@@ -103,7 +103,7 @@ void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
* so it needs to be lower in the witness ordering, but it's also logically
* global and not tied to any particular arena.
*/
void hpa_prefork3(tsdn_t *tsdn, hpa_t *hpa);
void hpa_prefork4(tsdn_t *tsdn, hpa_t *hpa);
void hpa_postfork_parent(tsdn_t *tsdn, hpa_t *hpa);
void hpa_postfork_child(tsdn_t *tsdn, hpa_t *hpa);

View File

@@ -17,6 +17,11 @@ extern size_t opt_hpa_slab_goal;
extern size_t opt_hpa_slab_max_alloc;
extern size_t opt_hpa_small_max;
extern size_t opt_hpa_large_min;
extern size_t opt_hpa_sec_max_alloc;
extern size_t opt_hpa_sec_max_bytes;
extern size_t opt_hpa_sec_nshards;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;

View File

@@ -33,7 +33,8 @@ typedef enum {
OP(base) \
OP(tcache_list) \
OP(hpa_shard) \
OP(hpa_shard_grow)
OP(hpa_shard_grow) \
OP(hpa_sec)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,

View File

@@ -10,6 +10,7 @@
#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/sec.h"
/*
* The page allocator; responsible for acquiring pages of memory for
@@ -85,7 +86,12 @@ struct pa_shard_s {
/* Allocates from a PAC. */
pac_t pac;
/* Allocates from a HPA. */
/*
* We place a small extent cache in front of the HPA, since we intend
* these configurations to use many fewer arenas, and therefore have a
* higher risk of hot locks.
*/
sec_t hpa_sec;
hpa_shard_t hpa_shard;
/* The source of edata_t objects. */
@@ -124,18 +130,20 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool pa_shard_enable_hpa(pa_shard_t *shard, hpa_t *hpa, size_t ps_goal,
size_t ps_alloc_max, size_t small_max, size_t large_min);
size_t ps_alloc_max, size_t small_max, size_t large_min, size_t sec_nshards,
size_t sec_alloc_max, size_t sec_bytes_max);
/*
* We stop using the HPA when custom extent hooks are installed, but still
* redirect deallocations to it.
*/
void pa_shard_disable_hpa(pa_shard_t *shard);
void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).
*/
void pa_shard_reset(pa_shard_t *shard);
void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
/*
* Destroy all the remaining retained extents. Should only be called after
* decaying all active, dirty, and muzzy extents to the retained state, as the
@@ -184,6 +192,7 @@ void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
@@ -192,7 +201,8 @@ void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
hpa_shard_stats_t *hpa_stats_out, size_t *resident);
hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
size_t *resident);
/*
* Reads the PA-owned mutex stats into the output stats array, at the