2020-08-15 04:36:41 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_HPA_H
|
|
|
|
#define JEMALLOC_INTERNAL_HPA_H
|
|
|
|
|
2020-11-07 06:38:17 +08:00
|
|
|
#include "jemalloc/internal/exp_grow.h"
|
2020-08-15 04:36:41 +08:00
|
|
|
#include "jemalloc/internal/hpa_central.h"
|
|
|
|
#include "jemalloc/internal/pai.h"
|
|
|
|
#include "jemalloc/internal/psset.h"
|
|
|
|
|
2020-12-04 07:35:38 +08:00
|
|
|
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
|
|
|
|
struct hpa_shard_nonderived_stats_s {
|
2020-12-04 08:09:50 +08:00
|
|
|
/*
|
|
|
|
* The number of times we've purged within a hugepage.
|
|
|
|
*
|
|
|
|
* Guarded by mtx.
|
|
|
|
*/
|
|
|
|
uint64_t npurge_passes;
|
|
|
|
/*
|
|
|
|
* The number of individual purge calls we perform (which should always
|
|
|
|
* be bigger than npurge_passes, since each pass purges at least one
|
|
|
|
* extent within a hugepage.
|
|
|
|
*
|
|
|
|
* Guarded by mtx.
|
|
|
|
*/
|
|
|
|
uint64_t npurges;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The number of times we've hugified a pageslab.
|
|
|
|
*
|
|
|
|
* Guarded by mtx.
|
|
|
|
*/
|
|
|
|
uint64_t nhugifies;
|
|
|
|
/*
|
|
|
|
* The number of times we've dehugified a pageslab.
|
|
|
|
*
|
|
|
|
* Guarded by mtx.
|
|
|
|
*/
|
|
|
|
uint64_t ndehugifies;
|
2020-12-04 07:35:38 +08:00
|
|
|
};
|
|
|
|
|
2020-12-01 11:06:50 +08:00
|
|
|
/* Completely derived; only used by CTL. */
|
2020-09-05 03:01:52 +08:00
|
|
|
typedef struct hpa_shard_stats_s hpa_shard_stats_t;
|
|
|
|
struct hpa_shard_stats_s {
|
2020-11-11 08:23:03 +08:00
|
|
|
psset_stats_t psset_stats;
|
2020-12-04 07:35:38 +08:00
|
|
|
hpa_shard_nonderived_stats_t nonderived_stats;
|
2020-09-05 03:01:52 +08:00
|
|
|
};
|
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
typedef struct hpa_shard_s hpa_shard_t;
|
|
|
|
struct hpa_shard_s {
|
|
|
|
/*
|
|
|
|
* pai must be the first member; we cast from a pointer to it to a
|
|
|
|
* pointer to the hpa_shard_t.
|
|
|
|
*/
|
|
|
|
pai_t pai;
|
|
|
|
malloc_mutex_t grow_mtx;
|
|
|
|
malloc_mutex_t mtx;
|
2020-11-18 08:32:45 +08:00
|
|
|
/* The base metadata allocator. */
|
|
|
|
base_t *base;
|
2020-08-15 04:36:41 +08:00
|
|
|
/*
|
|
|
|
* This edata cache is the one we use when allocating a small extent
|
|
|
|
* from a pageslab. The pageslab itself comes from the centralized
|
|
|
|
* allocator, and so will use its edata_cache.
|
|
|
|
*/
|
2020-10-31 05:43:43 +08:00
|
|
|
edata_cache_small_t ecs;
|
2020-11-10 05:49:30 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
psset_t psset;
|
|
|
|
|
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* The largest size we'll allocate out of the shard. For those
|
|
|
|
* allocations refused, the caller (in practice, the PA module) will
|
|
|
|
* fall back to the more general (for now) PAC, which can always handle
|
|
|
|
* any allocation request.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
2020-11-10 05:49:30 +08:00
|
|
|
size_t alloc_max;
|
|
|
|
|
2020-11-18 08:32:45 +08:00
|
|
|
/*
|
|
|
|
* How many grow operations have occurred.
|
|
|
|
*
|
|
|
|
* Guarded by grow_mtx.
|
|
|
|
*/
|
|
|
|
uint64_t age_counter;
|
2020-11-10 05:49:30 +08:00
|
|
|
|
2020-09-05 09:29:28 +08:00
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* Either NULL (if empty), or some integer multiple of a
|
|
|
|
* hugepage-aligned number of hugepages. We carve them off one at a
|
|
|
|
* time to satisfy new pageslab requests.
|
|
|
|
*
|
|
|
|
* Guarded by grow_mtx.
|
2020-09-05 09:29:28 +08:00
|
|
|
*/
|
2020-11-18 08:32:45 +08:00
|
|
|
void *eden;
|
|
|
|
size_t eden_len;
|
2020-09-05 09:29:28 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
/* The arena ind we're associated with. */
|
|
|
|
unsigned ind;
|
2020-11-10 05:49:30 +08:00
|
|
|
emap_t *emap;
|
2020-12-01 11:06:50 +08:00
|
|
|
|
|
|
|
/*
|
2020-12-04 07:35:38 +08:00
|
|
|
* Those stats which are copied directly into the CTL-centric hpa shard
|
|
|
|
* stats.
|
2020-12-01 11:06:50 +08:00
|
|
|
*/
|
2020-12-04 07:35:38 +08:00
|
|
|
hpa_shard_nonderived_stats_t stats;
|
2020-08-15 04:36:41 +08:00
|
|
|
};
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
|
|
|
* Whether or not the HPA can be used given the current configuration. This is
|
|
|
|
* is not necessarily a guarantee that it backs its allocations by hugepages,
|
|
|
|
* just that it can function properly given the system it's running on.
|
|
|
|
*/
|
|
|
|
bool hpa_supported();
|
2020-11-18 08:32:45 +08:00
|
|
|
bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
|
2020-11-10 05:49:30 +08:00
|
|
|
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max);
|
|
|
|
|
|
|
|
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
|
|
|
|
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
|
|
|
|
hpa_shard_stats_t *dst);
|
2020-11-11 08:23:03 +08:00
|
|
|
|
2020-10-31 05:43:43 +08:00
|
|
|
/*
|
|
|
|
* Notify the shard that we won't use it for allocations much longer. Due to
|
|
|
|
* the possibility of races, we don't actually prevent allocations; just flush
|
|
|
|
* and disable the embedded edata_cache_small.
|
|
|
|
*/
|
|
|
|
void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
|
2020-08-15 04:36:41 +08:00
|
|
|
void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We share the fork ordering with the PA and arena prefork handling; that's why
|
2020-10-17 04:14:59 +08:00
|
|
|
* these are 3 and 4 rather than 0 and 1.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
|
|
|
void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
|
2020-10-17 04:14:59 +08:00
|
|
|
void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
|
2020-08-15 04:36:41 +08:00
|
|
|
void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
|
|
|
|
void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_HPA_H */
|