2020-05-30 07:57:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_PAC_H
|
|
|
|
#define JEMALLOC_INTERNAL_PAC_H
|
|
|
|
|
2020-11-07 06:38:17 +08:00
|
|
|
#include "jemalloc/internal/exp_grow.h"
|
2020-06-12 02:53:30 +08:00
|
|
|
#include "jemalloc/internal/pai.h"
|
|
|
|
|
2020-08-08 09:03:40 +08:00
|
|
|
|
2020-05-30 07:57:31 +08:00
|
|
|
/*
|
|
|
|
* Page allocator classic; an implementation of the PAI interface that:
|
|
|
|
* - Can be used for arenas with custom extent hooks.
|
|
|
|
* - Can always satisfy any allocation request (including highly-fragmentary
|
|
|
|
* ones).
|
|
|
|
* - Can use efficient OS-level zeroing primitives for demand-filled pages.
|
|
|
|
*/
|
|
|
|
|
2020-06-03 03:45:39 +08:00
|
|
|
/* How "eager" decay/purging should be. */
|
2020-06-04 09:30:33 +08:00
|
|
|
enum pac_purge_eagerness_e {
|
|
|
|
PAC_PURGE_ALWAYS,
|
|
|
|
PAC_PURGE_NEVER,
|
|
|
|
PAC_PURGE_ON_EPOCH_ADVANCE
|
2020-06-03 03:45:39 +08:00
|
|
|
};
|
2020-06-04 09:30:33 +08:00
|
|
|
typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
|
2020-06-03 03:45:39 +08:00
|
|
|
|
2020-06-02 08:42:27 +08:00
|
|
|
typedef struct pac_decay_stats_s pac_decay_stats_t;
|
|
|
|
struct pac_decay_stats_s {
|
|
|
|
/* Total number of purge sweeps. */
|
|
|
|
locked_u64_t npurge;
|
|
|
|
/* Total number of madvise calls made. */
|
|
|
|
locked_u64_t nmadvise;
|
|
|
|
/* Total number of pages purged. */
|
|
|
|
locked_u64_t purged;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct pac_estats_s pac_estats_t;
|
|
|
|
struct pac_estats_s {
|
|
|
|
/*
|
|
|
|
* Stats for a given index in the range [0, SC_NPSIZES] in the various
|
|
|
|
* ecache_ts.
|
|
|
|
* We track both bytes and # of extents: two extents in the same bucket
|
|
|
|
* may have different sizes if adjacent size classes differ by more than
|
|
|
|
* a page, so bytes cannot always be derived from # of extents.
|
|
|
|
*/
|
|
|
|
size_t ndirty;
|
|
|
|
size_t dirty_bytes;
|
|
|
|
size_t nmuzzy;
|
|
|
|
size_t muzzy_bytes;
|
|
|
|
size_t nretained;
|
|
|
|
size_t retained_bytes;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct pac_stats_s pac_stats_t;
|
|
|
|
struct pac_stats_s {
|
|
|
|
pac_decay_stats_t decay_dirty;
|
|
|
|
pac_decay_stats_t decay_muzzy;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of unused virtual memory bytes currently retained. Retained
|
|
|
|
* bytes are technically mapped (though always decommitted or purged),
|
|
|
|
* but they are excluded from the mapped statistic (above).
|
|
|
|
*/
|
|
|
|
size_t retained; /* Derived. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of bytes currently mapped, excluding retained memory (and any
|
|
|
|
* base-allocated memory, which is tracked by the arena stats).
|
|
|
|
*
|
|
|
|
* We name this "pac_mapped" to avoid confusion with the arena_stats
|
|
|
|
* "mapped".
|
|
|
|
*/
|
|
|
|
atomic_zu_t pac_mapped;
|
|
|
|
|
|
|
|
/* VM space had to be leaked (undocumented). Normally 0. */
|
|
|
|
atomic_zu_t abandoned_vm;
|
|
|
|
};
|
|
|
|
|
2020-05-30 07:57:31 +08:00
|
|
|
typedef struct pac_s pac_t;
|
|
|
|
struct pac_s {
|
2020-06-12 02:53:30 +08:00
|
|
|
/*
|
|
|
|
* Must be the first member (we convert it to a PAC given only a
|
|
|
|
* pointer). The handle to the allocation interface.
|
|
|
|
*/
|
|
|
|
pai_t pai;
|
2020-05-30 07:57:31 +08:00
|
|
|
/*
|
|
|
|
* Collections of extents that were previously allocated. These are
|
|
|
|
* used when allocating extents, in an attempt to re-use address space.
|
|
|
|
*
|
|
|
|
* Synchronization: internal.
|
|
|
|
*/
|
|
|
|
ecache_t ecache_dirty;
|
|
|
|
ecache_t ecache_muzzy;
|
|
|
|
ecache_t ecache_retained;
|
2020-05-30 08:14:16 +08:00
|
|
|
|
2020-06-04 05:43:28 +08:00
|
|
|
base_t *base;
|
2020-05-30 08:47:04 +08:00
|
|
|
emap_t *emap;
|
2020-05-30 08:14:16 +08:00
|
|
|
edata_cache_t *edata_cache;
|
2020-06-02 07:01:53 +08:00
|
|
|
|
|
|
|
/* The grow info for the retained ecache. */
|
2020-11-07 06:38:17 +08:00
|
|
|
exp_grow_t exp_grow;
|
2020-08-12 01:18:31 +08:00
|
|
|
malloc_mutex_t grow_mtx;
|
2020-06-02 07:35:17 +08:00
|
|
|
|
2020-11-12 05:34:43 +08:00
|
|
|
/* How large extents should be before getting auto-purged. */
|
|
|
|
atomic_zu_t oversize_threshold;
|
|
|
|
|
2020-06-02 07:35:17 +08:00
|
|
|
/*
|
|
|
|
* Decay-based purging state, responsible for scheduling extent state
|
|
|
|
* transitions.
|
|
|
|
*
|
|
|
|
* Synchronization: via the internal mutex.
|
|
|
|
*/
|
|
|
|
decay_t decay_dirty; /* dirty --> muzzy */
|
|
|
|
decay_t decay_muzzy; /* muzzy --> retained */
|
2020-06-02 08:42:27 +08:00
|
|
|
|
|
|
|
malloc_mutex_t *stats_mtx;
|
|
|
|
pac_stats_t *stats;
|
2020-06-02 09:01:19 +08:00
|
|
|
|
|
|
|
/* Extent serial number generator state. */
|
|
|
|
atomic_zu_t extent_sn_next;
|
2020-05-30 07:57:31 +08:00
|
|
|
};
|
|
|
|
|
2020-06-04 05:43:28 +08:00
|
|
|
bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
2020-11-12 05:34:43 +08:00
|
|
|
edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
|
|
|
|
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
|
|
|
|
malloc_mutex_t *stats_mtx);
|
2020-05-30 08:32:37 +08:00
|
|
|
|
2020-06-02 08:42:27 +08:00
|
|
|
static inline size_t
|
|
|
|
pac_mapped(pac_t *pac) {
|
|
|
|
return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
|
|
|
|
}
|
|
|
|
|
2020-06-04 05:43:28 +08:00
|
|
|
/*
|
|
|
|
* All purging functions require holding decay->mtx. This is one of the few
|
|
|
|
* places external modules are allowed to peek inside pa_shard_t internals.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decays the number of pages currently in the ecache. This might not leave the
|
|
|
|
* ecache empty if other threads are inserting dirty objects into it
|
|
|
|
* concurrently with the call.
|
|
|
|
*/
|
|
|
|
void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
|
|
|
|
pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
|
|
|
|
/*
|
|
|
|
* Updates decay settings for the current time, and conditionally purges in
|
|
|
|
* response (depending on decay_purge_setting). Returns whether or not the
|
|
|
|
* epoch advanced.
|
|
|
|
*/
|
|
|
|
bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
|
|
|
|
pac_decay_stats_t *decay_stats, ecache_t *ecache,
|
2020-06-04 09:30:33 +08:00
|
|
|
pac_purge_eagerness_t eagerness);
|
2020-06-04 05:43:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Gets / sets the maximum amount that we'll grow an arena down the
|
|
|
|
* grow-retained pathways (unless forced to by an allocaction request).
|
|
|
|
*
|
|
|
|
* Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
|
|
|
|
* care about the previous value.
|
|
|
|
*
|
|
|
|
* Returns true on error (if the new limit is not valid).
|
|
|
|
*/
|
|
|
|
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
|
|
|
|
size_t *new_limit);
|
|
|
|
|
2020-06-04 09:30:33 +08:00
|
|
|
bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
|
|
|
|
ssize_t decay_ms, pac_purge_eagerness_t eagerness);
|
|
|
|
ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
|
2020-06-11 08:42:49 +08:00
|
|
|
|
|
|
|
void pac_reset(tsdn_t *tsdn, pac_t *pac);
|
|
|
|
void pac_destroy(tsdn_t *tsdn, pac_t *pac);
|
|
|
|
|
2020-05-30 07:57:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_PAC_H */
|