Extent refactor: Introduce ecache module.
This will eventually completely wrap the eset, and handle concurrency, allocation, and deallocation. For now, we only pull out the mutex from the eset.
This commit is contained in:
committed by
David Goldblatt
parent
0704516245
commit
bb70df8e5b
@@ -5,8 +5,8 @@
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/edata_cache.h"
|
||||
#include "jemalloc/internal/eset.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
@@ -53,7 +53,7 @@ struct arena_decay_s {
|
||||
/*
|
||||
* Number of unpurged pages at beginning of current epoch. During epoch
|
||||
* advancement we use the delta between arena->decay_*.nunpurged and
|
||||
* eset_npages_get(&arena->extents_*) to determine how many dirty pages,
|
||||
* ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
|
||||
* if any, were generated.
|
||||
*/
|
||||
size_t nunpurged;
|
||||
@@ -155,9 +155,9 @@ struct arena_s {
|
||||
*
|
||||
* Synchronization: internal.
|
||||
*/
|
||||
eset_t eset_dirty;
|
||||
eset_t eset_muzzy;
|
||||
eset_t eset_retained;
|
||||
ecache_t ecache_dirty;
|
||||
ecache_t ecache_muzzy;
|
||||
ecache_t ecache_retained;
|
||||
|
||||
/*
|
||||
* Decay-based purging state, responsible for scheduling extent state
|
||||
@@ -168,22 +168,8 @@ struct arena_s {
|
||||
arena_decay_t decay_dirty; /* dirty --> muzzy */
|
||||
arena_decay_t decay_muzzy; /* muzzy --> retained */
|
||||
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t extent_grow_next;
|
||||
pszind_t retain_grow_limit;
|
||||
malloc_mutex_t extent_grow_mtx;
|
||||
/* The grow info for the retained ecache. */
|
||||
ecache_grow_t ecache_grow;
|
||||
|
||||
/* The source of edata_t objects. */
|
||||
edata_cache_t edata_cache;
|
||||
|
59
include/jemalloc/internal/ecache.h
Normal file
59
include/jemalloc/internal/ecache.h
Normal file
@@ -0,0 +1,59 @@
|
||||
#ifndef JEMALLOC_INTERNAL_ECACHE_H
|
||||
#define JEMALLOC_INTERNAL_ECACHE_H
|
||||
|
||||
#include "jemalloc/internal/eset.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct ecache_s ecache_t;
|
||||
struct ecache_s {
|
||||
malloc_mutex_t mtx;
|
||||
eset_t eset;
|
||||
};
|
||||
|
||||
typedef struct ecache_grow_s ecache_grow_t;
|
||||
struct ecache_grow_s {
|
||||
/*
|
||||
* Next extent size class in a growing series to use when satisfying a
|
||||
* request via the extent hooks (only if opt_retain). This limits the
|
||||
* number of disjoint virtual memory ranges so that extent merging can
|
||||
* be effective even if multiple arenas' extent allocation requests are
|
||||
* highly interleaved.
|
||||
*
|
||||
* retain_grow_limit is the max allowed size ind to expand (unless the
|
||||
* required size is greater). Default is no limit, and controlled
|
||||
* through mallctl only.
|
||||
*
|
||||
* Synchronization: extent_grow_mtx
|
||||
*/
|
||||
pszind_t next;
|
||||
pszind_t limit;
|
||||
malloc_mutex_t mtx;
|
||||
};
|
||||
|
||||
static inline size_t
|
||||
ecache_npages_get(ecache_t *ecache) {
|
||||
return eset_npages_get(&ecache->eset);
|
||||
}
|
||||
/* Get the number of extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nextents_get(&ecache->eset, ind);
|
||||
}
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nbytes_get(&ecache->eset, ind);
|
||||
}
|
||||
|
||||
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
|
||||
bool delay_coalesce);
|
||||
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
|
||||
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
|
||||
|
||||
bool ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
void ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ECACHE_H */
|
@@ -9,42 +9,25 @@
|
||||
/*
|
||||
* An eset ("extent set") is a quantized collection of extents, with built-in
|
||||
* LRU queue.
|
||||
*
|
||||
* This class is not thread-safe; synchronization must be done externally if
|
||||
* there are mutating operations. One exception is the stats counters, which
|
||||
* may be read without any locking.
|
||||
*/
|
||||
typedef struct eset_s eset_t;
|
||||
struct eset_s {
|
||||
malloc_mutex_t mtx;
|
||||
|
||||
/*
|
||||
* Quantized per size class heaps of extents.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
/* Quantized per size class heaps of extents. */
|
||||
edata_heap_t heaps[SC_NPSIZES + 1];
|
||||
atomic_zu_t nextents[SC_NPSIZES + 1];
|
||||
atomic_zu_t nbytes[SC_NPSIZES + 1];
|
||||
|
||||
/*
|
||||
* Bitmap for which set bits correspond to non-empty heaps.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
/* Bitmap for which set bits correspond to non-empty heaps. */
|
||||
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
|
||||
|
||||
/*
|
||||
* LRU of all extents in heaps.
|
||||
*
|
||||
* Synchronization: mtx.
|
||||
*/
|
||||
/* LRU of all extents in heaps. */
|
||||
edata_list_t lru;
|
||||
|
||||
/*
|
||||
* Page sum for all extents in heaps.
|
||||
*
|
||||
* The synchronization here is a little tricky. Modifications to npages
|
||||
* must hold mtx, but reads need not (though, a reader who sees npages
|
||||
* without holding the mutex can't assume anything about the rest of the
|
||||
* state of the eset_t).
|
||||
*/
|
||||
/* Page sum for all extents in heaps. */
|
||||
atomic_zu_t npages;
|
||||
|
||||
/* All stored extents must be in the same state. */
|
||||
@@ -57,8 +40,7 @@ struct eset_s {
|
||||
bool delay_coalesce;
|
||||
};
|
||||
|
||||
bool eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
|
||||
bool delay_coalesce);
|
||||
void eset_init(eset_t *eset, extent_state_t state, bool delay_coalesce);
|
||||
extent_state_t eset_state_get(const eset_t *eset);
|
||||
|
||||
size_t eset_npages_get(eset_t *eset);
|
||||
@@ -67,17 +49,12 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
|
||||
|
||||
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
|
||||
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata);
|
||||
void eset_insert(eset_t *eset, edata_t *edata);
|
||||
void eset_remove(eset_t *eset, edata_t *edata);
|
||||
/*
|
||||
* Select an extent from this eset of the given size and alignment. Returns
|
||||
* null if no such item could be found.
|
||||
*/
|
||||
edata_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize,
|
||||
size_t alignment);
|
||||
|
||||
void eset_prefork(tsdn_t *tsdn, eset_t *eset);
|
||||
void eset_postfork_parent(tsdn_t *tsdn, eset_t *eset);
|
||||
void eset_postfork_child(tsdn_t *tsdn, eset_t *eset);
|
||||
edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ESET_H */
|
||||
|
@@ -1,8 +1,8 @@
|
||||
#ifndef JEMALLOC_INTERNAL_EXTENT2_H
|
||||
#define JEMALLOC_INTERNAL_EXTENT2_H
|
||||
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/eset.h"
|
||||
#include "jemalloc/internal/ph.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
@@ -27,12 +27,12 @@ extern size_t opt_lg_extent_max_active_fit;
|
||||
extern rtree_t extents_rtree;
|
||||
|
||||
edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||
bool slab, szind_t szind, bool *zero, bool *commit);
|
||||
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||
eset_t *eset, edata_t *edata);
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||
eset_t *eset, size_t npages_min);
|
||||
ecache_t *ecache, size_t npages_min);
|
||||
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
|
||||
szind_t szind, bool *zero, bool *commit);
|
||||
|
Reference in New Issue
Block a user