2019-12-13 08:25:24 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ECACHE_H
|
|
|
|
#define JEMALLOC_INTERNAL_ECACHE_H
|
|
|
|
|
|
|
|
#include "jemalloc/internal/eset.h"
|
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
|
|
|
|
typedef struct ecache_s ecache_t;
|
|
|
|
struct ecache_s {
|
|
|
|
malloc_mutex_t mtx;
|
|
|
|
eset_t eset;
|
2019-12-13 08:44:49 +08:00
|
|
|
/* All stored extents must be in the same state. */
|
|
|
|
extent_state_t state;
|
2019-12-14 03:33:03 +08:00
|
|
|
/* The index of the ehooks the ecache is associated with. */
|
|
|
|
unsigned ind;
|
2019-12-13 08:33:19 +08:00
|
|
|
/*
|
|
|
|
* If true, delay coalescing until eviction; otherwise coalesce during
|
|
|
|
* deallocation.
|
|
|
|
*/
|
|
|
|
bool delay_coalesce;
|
2019-12-13 08:25:24 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct ecache_grow_s ecache_grow_t;
|
|
|
|
struct ecache_grow_s {
|
|
|
|
/*
|
|
|
|
* Next extent size class in a growing series to use when satisfying a
|
|
|
|
* request via the extent hooks (only if opt_retain). This limits the
|
|
|
|
* number of disjoint virtual memory ranges so that extent merging can
|
|
|
|
* be effective even if multiple arenas' extent allocation requests are
|
|
|
|
* highly interleaved.
|
|
|
|
*
|
|
|
|
* retain_grow_limit is the max allowed size ind to expand (unless the
|
|
|
|
* required size is greater). Default is no limit, and controlled
|
|
|
|
* through mallctl only.
|
|
|
|
*
|
|
|
|
* Synchronization: extent_grow_mtx
|
|
|
|
*/
|
|
|
|
pszind_t next;
|
|
|
|
pszind_t limit;
|
|
|
|
malloc_mutex_t mtx;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline size_t
|
|
|
|
ecache_npages_get(ecache_t *ecache) {
|
|
|
|
return eset_npages_get(&ecache->eset);
|
|
|
|
}
|
|
|
|
/* Get the number of extents in the given page size index. */
|
|
|
|
static inline size_t
|
|
|
|
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
|
|
|
|
return eset_nextents_get(&ecache->eset, ind);
|
|
|
|
}
|
|
|
|
/* Get the sum total bytes of the extents in the given page size index. */
|
|
|
|
static inline size_t
|
|
|
|
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
|
|
|
|
return eset_nbytes_get(&ecache->eset, ind);
|
|
|
|
}
|
|
|
|
|
2019-12-14 03:33:03 +08:00
|
|
|
static inline unsigned
|
|
|
|
ecache_ind_get(ecache_t *ecache) {
|
|
|
|
return ecache->ind;
|
|
|
|
}
|
|
|
|
|
2019-12-13 08:25:24 +08:00
|
|
|
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
|
2019-12-14 03:33:03 +08:00
|
|
|
unsigned ind, bool delay_coalesce);
|
2019-12-13 08:25:24 +08:00
|
|
|
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
|
|
|
|
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
|
|
|
|
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
|
|
|
|
|
|
|
|
bool ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
|
|
|
void ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
|
|
|
void ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
|
|
|
void ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
|