Extent refactor: Introduce ecache module.

This will eventually completely wrap the eset, and handle concurrency,
allocation, and deallocation.  For now, we only pull out the mutex from the
eset.
This commit is contained in:
David Goldblatt 2019-12-12 16:25:24 -08:00 committed by David Goldblatt
parent 0704516245
commit bb70df8e5b
15 changed files with 354 additions and 295 deletions

View File

@ -104,6 +104,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/ckh.c \ $(srcroot)src/ckh.c \
$(srcroot)src/ctl.c \ $(srcroot)src/ctl.c \
$(srcroot)src/div.c \ $(srcroot)src/div.c \
$(srcroot)src/ecache.c \
$(srcroot)src/edata.c \ $(srcroot)src/edata.c \
$(srcroot)src/edata_cache.c \ $(srcroot)src/edata_cache.c \
$(srcroot)src/ehooks.c \ $(srcroot)src/ehooks.c \

View File

@ -5,8 +5,8 @@
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h" #include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/extent_dss.h" #include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
@ -53,7 +53,7 @@ struct arena_decay_s {
/* /*
* Number of unpurged pages at beginning of current epoch. During epoch * Number of unpurged pages at beginning of current epoch. During epoch
* advancement we use the delta between arena->decay_*.nunpurged and * advancement we use the delta between arena->decay_*.nunpurged and
* eset_npages_get(&arena->extents_*) to determine how many dirty pages, * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
* if any, were generated. * if any, were generated.
*/ */
size_t nunpurged; size_t nunpurged;
@ -155,9 +155,9 @@ struct arena_s {
* *
* Synchronization: internal. * Synchronization: internal.
*/ */
eset_t eset_dirty; ecache_t ecache_dirty;
eset_t eset_muzzy; ecache_t ecache_muzzy;
eset_t eset_retained; ecache_t ecache_retained;
/* /*
* Decay-based purging state, responsible for scheduling extent state * Decay-based purging state, responsible for scheduling extent state
@ -168,22 +168,8 @@ struct arena_s {
arena_decay_t decay_dirty; /* dirty --> muzzy */ arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */ arena_decay_t decay_muzzy; /* muzzy --> retained */
/* /* The grow info for the retained ecache. */
* Next extent size class in a growing series to use when satisfying a ecache_grow_t ecache_grow;
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/
pszind_t extent_grow_next;
pszind_t retain_grow_limit;
malloc_mutex_t extent_grow_mtx;
/* The source of edata_t objects. */ /* The source of edata_t objects. */
edata_cache_t edata_cache; edata_cache_t edata_cache;

View File

@ -0,0 +1,59 @@
#ifndef JEMALLOC_INTERNAL_ECACHE_H
#define JEMALLOC_INTERNAL_ECACHE_H
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/mutex.h"
typedef struct ecache_s ecache_t;
struct ecache_s {
malloc_mutex_t mtx;
eset_t eset;
};
typedef struct ecache_grow_s ecache_grow_t;
struct ecache_grow_s {
/*
* Next extent size class in a growing series to use when satisfying a
* request via the extent hooks (only if opt_retain). This limits the
* number of disjoint virtual memory ranges so that extent merging can
* be effective even if multiple arenas' extent allocation requests are
* highly interleaved.
*
* retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled
* through mallctl only.
*
* Synchronization: extent_grow_mtx
*/
pszind_t next;
pszind_t limit;
malloc_mutex_t mtx;
};
static inline size_t
ecache_npages_get(ecache_t *ecache) {
return eset_npages_get(&ecache->eset);
}
/* Get the number of extents in the given page size index. */
static inline size_t
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
return eset_nextents_get(&ecache->eset, ind);
}
/* Get the sum total bytes of the extents in the given page size index. */
static inline size_t
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind);
}
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
bool delay_coalesce);
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
bool ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
void ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
void ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
void ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow);
#endif /* JEMALLOC_INTERNAL_ECACHE_H */

View File

@ -9,42 +9,25 @@
/* /*
* An eset ("extent set") is a quantized collection of extents, with built-in * An eset ("extent set") is a quantized collection of extents, with built-in
* LRU queue. * LRU queue.
*
* This class is not thread-safe; synchronization must be done externally if
* there are mutating operations. One exception is the stats counters, which
* may be read without any locking.
*/ */
typedef struct eset_s eset_t; typedef struct eset_s eset_t;
struct eset_s { struct eset_s {
malloc_mutex_t mtx; /* Quantized per size class heaps of extents. */
/*
* Quantized per size class heaps of extents.
*
* Synchronization: mtx.
*/
edata_heap_t heaps[SC_NPSIZES + 1]; edata_heap_t heaps[SC_NPSIZES + 1];
atomic_zu_t nextents[SC_NPSIZES + 1]; atomic_zu_t nextents[SC_NPSIZES + 1];
atomic_zu_t nbytes[SC_NPSIZES + 1]; atomic_zu_t nbytes[SC_NPSIZES + 1];
/* /* Bitmap for which set bits correspond to non-empty heaps. */
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)]; bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/* /* LRU of all extents in heaps. */
* LRU of all extents in heaps.
*
* Synchronization: mtx.
*/
edata_list_t lru; edata_list_t lru;
/* /* Page sum for all extents in heaps. */
* Page sum for all extents in heaps.
*
* The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the eset_t).
*/
atomic_zu_t npages; atomic_zu_t npages;
/* All stored extents must be in the same state. */ /* All stored extents must be in the same state. */
@ -57,8 +40,7 @@ struct eset_s {
bool delay_coalesce; bool delay_coalesce;
}; };
bool eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, void eset_init(eset_t *eset, extent_state_t state, bool delay_coalesce);
bool delay_coalesce);
extent_state_t eset_state_get(const eset_t *eset); extent_state_t eset_state_get(const eset_t *eset);
size_t eset_npages_get(eset_t *eset); size_t eset_npages_get(eset_t *eset);
@ -67,17 +49,12 @@ size_t eset_nextents_get(eset_t *eset, pszind_t ind);
/* Get the sum total bytes of the extents in the given page size index. */ /* Get the sum total bytes of the extents in the given page size index. */
size_t eset_nbytes_get(eset_t *eset, pszind_t ind); size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
void eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata); void eset_insert(eset_t *eset, edata_t *edata);
void eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata); void eset_remove(eset_t *eset, edata_t *edata);
/* /*
* Select an extent from this eset of the given size and alignment. Returns * Select an extent from this eset of the given size and alignment. Returns
* null if no such item could be found. * null if no such item could be found.
*/ */
edata_t *eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment);
size_t alignment);
void eset_prefork(tsdn_t *tsdn, eset_t *eset);
void eset_postfork_parent(tsdn_t *tsdn, eset_t *eset);
void eset_postfork_child(tsdn_t *tsdn, eset_t *eset);
#endif /* JEMALLOC_INTERNAL_ESET_H */ #endif /* JEMALLOC_INTERNAL_ESET_H */

View File

@ -1,8 +1,8 @@
#ifndef JEMALLOC_INTERNAL_EXTENT2_H #ifndef JEMALLOC_INTERNAL_EXTENT2_H
#define JEMALLOC_INTERNAL_EXTENT2_H #define JEMALLOC_INTERNAL_EXTENT2_H
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/ehooks.h" #include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/eset.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"
#include "jemalloc/internal/rtree.h" #include "jemalloc/internal/rtree.h"
@ -27,12 +27,12 @@ extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree; extern rtree_t extents_rtree;
edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, void *new_addr, size_t size, size_t pad, size_t alignment, ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit); bool slab, szind_t szind, bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, edata_t *edata); ecache_t *ecache, edata_t *edata);
edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, size_t npages_min); ecache_t *ecache, size_t npages_min);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit); szind_t szind, bool *zero, bool *commit);

View File

@ -44,6 +44,7 @@
<ClCompile Include="..\..\..\..\src\ckh.c" /> <ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" /> <ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\div.c" /> <ClCompile Include="..\..\..\..\src\div.c" />
<ClCompile Include="..\..\..\..\src\ecache.c" />
<ClCompile Include="..\..\..\..\src\edata.c" /> <ClCompile Include="..\..\..\..\src\edata.c" />
<ClCompile Include="..\..\..\..\src\edata_cache.c" /> <ClCompile Include="..\..\..\..\src\edata_cache.c" />
<ClCompile Include="..\..\..\..\src\ehooks.c" /> <ClCompile Include="..\..\..\..\src\ehooks.c" />

View File

@ -44,6 +44,7 @@
<ClCompile Include="..\..\..\..\src\ckh.c" /> <ClCompile Include="..\..\..\..\src\ckh.c" />
<ClCompile Include="..\..\..\..\src\ctl.c" /> <ClCompile Include="..\..\..\..\src\ctl.c" />
<ClCompile Include="..\..\..\..\src\div.c" /> <ClCompile Include="..\..\..\..\src\div.c" />
<ClCompile Include="..\..\..\..\src\ecache.c" />
<ClCompile Include="..\..\..\..\src\edata.c" /> <ClCompile Include="..\..\..\..\src\edata.c" />
<ClCompile Include="..\..\..\..\src\edata_cache.c" /> <ClCompile Include="..\..\..\..\src\edata_cache.c" />
<ClCompile Include="..\..\..\..\src\ehooks.c" /> <ClCompile Include="..\..\..\..\src\ehooks.c" />

View File

@ -56,7 +56,7 @@ static unsigned huge_arena_ind;
*/ */
static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
arena_decay_t *decay, eset_t *eset, bool all, size_t npages_limit, arena_decay_t *decay, ecache_t *ecache, bool all, size_t npages_limit,
size_t npages_decay_max, bool is_background_thread); size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all); bool is_background_thread, bool all);
@ -76,8 +76,8 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
*dirty_decay_ms = arena_dirty_decay_ms_get(arena); *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
*ndirty += eset_npages_get(&arena->eset_dirty); *ndirty += ecache_npages_get(&arena->ecache_dirty);
*nmuzzy += eset_npages_get(&arena->eset_muzzy); *nmuzzy += ecache_npages_get(&arena->ecache_muzzy);
} }
void void
@ -100,7 +100,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->mapped, base_mapped arena_stats_accum_zu(&astats->mapped, base_mapped
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
arena_stats_accum_zu(&astats->retained, arena_stats_accum_zu(&astats->retained,
eset_npages_get(&arena->eset_retained) << LG_PAGE); ecache_npages_get(&arena->ecache_retained) << LG_PAGE);
atomic_store_zu(&astats->edata_avail, atomic_store_zu(&astats->edata_avail,
atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED), atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED),
@ -131,8 +131,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
arena_stats_accum_zu(&astats->resident, base_resident + arena_stats_accum_zu(&astats->resident, base_resident +
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
eset_npages_get(&arena->eset_dirty) + ecache_npages_get(&arena->ecache_dirty) +
eset_npages_get(&arena->eset_muzzy)) << LG_PAGE))); ecache_npages_get(&arena->ecache_muzzy)) << LG_PAGE)));
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
&arena->stats.abandoned_vm, ATOMIC_RELAXED)); &arena->stats.abandoned_vm, ATOMIC_RELAXED));
@ -174,12 +174,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (pszind_t i = 0; i < SC_NPSIZES; i++) { for (pszind_t i = 0; i < SC_NPSIZES; i++) {
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
retained_bytes; retained_bytes;
dirty = eset_nextents_get(&arena->eset_dirty, i); dirty = ecache_nextents_get(&arena->ecache_dirty, i);
muzzy = eset_nextents_get(&arena->eset_muzzy, i); muzzy = ecache_nextents_get(&arena->ecache_muzzy, i);
retained = eset_nextents_get(&arena->eset_retained, i); retained = ecache_nextents_get(&arena->ecache_retained, i);
dirty_bytes = eset_nbytes_get(&arena->eset_dirty, i); dirty_bytes = ecache_nbytes_get(&arena->ecache_dirty, i);
muzzy_bytes = eset_nbytes_get(&arena->eset_muzzy, i); muzzy_bytes = ecache_nbytes_get(&arena->ecache_muzzy, i);
retained_bytes = eset_nbytes_get(&arena->eset_retained, i); retained_bytes = ecache_nbytes_get(&arena->ecache_retained, i);
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
@ -226,11 +226,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx, READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx,
arena_prof_mutex_extent_avail) arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(eset_dirty.mtx, READ_ARENA_MUTEX_PROF_DATA(ecache_dirty.mtx,
arena_prof_mutex_extents_dirty) arena_prof_mutex_extents_dirty)
READ_ARENA_MUTEX_PROF_DATA(eset_muzzy.mtx, READ_ARENA_MUTEX_PROF_DATA(ecache_muzzy.mtx,
arena_prof_mutex_extents_muzzy) arena_prof_mutex_extents_muzzy)
READ_ARENA_MUTEX_PROF_DATA(eset_retained.mtx, READ_ARENA_MUTEX_PROF_DATA(ecache_retained.mtx,
arena_prof_mutex_extents_retained) arena_prof_mutex_extents_retained)
READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
arena_prof_mutex_decay_dirty) arena_prof_mutex_decay_dirty)
@ -258,7 +258,7 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
extents_dalloc(tsdn, arena, ehooks, &arena->eset_dirty, edata); extents_dalloc(tsdn, arena, ehooks, &arena->ecache_dirty, edata);
if (arena_dirty_decay_ms_get(arena) == 0) { if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true); arena_decay_dirty(tsdn, arena, false, true);
} else { } else {
@ -434,10 +434,11 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
szind_t szind = sz_size2index(usize); szind_t szind = sz_size2index(usize);
size_t mapped_add; size_t mapped_add;
bool commit = true; bool commit = true;
edata_t *edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty, edata_t *edata = extents_alloc(tsdn, arena, ehooks,
NULL, usize, sz_large_pad, alignment, false, szind, zero, &commit); &arena->ecache_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (edata == NULL && arena_may_have_muzzy(arena)) { if (edata == NULL && arena_may_have_muzzy(arena)) {
edata = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy, edata = extents_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
NULL, usize, sz_large_pad, alignment, false, szind, zero, NULL, usize, sz_large_pad, alignment, false, szind, zero,
&commit); &commit);
} }
@ -606,10 +607,10 @@ arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
static void static void
arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, size_t current_npages, size_t npages_limit, ecache_t *ecache, size_t current_npages, size_t npages_limit,
bool is_background_thread) { bool is_background_thread) {
if (current_npages > npages_limit) { if (current_npages > npages_limit) {
arena_decay_to_limit(tsdn, arena, decay, eset, false, arena_decay_to_limit(tsdn, arena, decay, ecache, false,
npages_limit, current_npages - npages_limit, npages_limit, current_npages - npages_limit,
is_background_thread); is_background_thread);
} }
@ -641,8 +642,8 @@ arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
static void static void
arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, const nstime_t *time, bool is_background_thread) { ecache_t *ecache, const nstime_t *time, bool is_background_thread) {
size_t current_npages = eset_npages_get(eset); size_t current_npages = ecache_npages_get(ecache);
arena_decay_epoch_advance_helper(decay, time, current_npages); arena_decay_epoch_advance_helper(decay, time, current_npages);
size_t npages_limit = arena_decay_backlog_npages_limit(decay); size_t npages_limit = arena_decay_backlog_npages_limit(decay);
@ -651,7 +652,7 @@ arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
current_npages; current_npages;
if (!background_thread_enabled() || is_background_thread) { if (!background_thread_enabled() || is_background_thread) {
arena_decay_try_purge(tsdn, arena, decay, eset, arena_decay_try_purge(tsdn, arena, decay, ecache,
current_npages, npages_limit, is_background_thread); current_npages, npages_limit, is_background_thread);
} }
} }
@ -708,15 +709,15 @@ arena_decay_ms_valid(ssize_t decay_ms) {
static bool static bool
arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, bool is_background_thread) { ecache_t *ecache, bool is_background_thread) {
malloc_mutex_assert_owner(tsdn, &decay->mtx); malloc_mutex_assert_owner(tsdn, &decay->mtx);
/* Purge all or nothing if the option is disabled. */ /* Purge all or nothing if the option is disabled. */
ssize_t decay_ms = arena_decay_ms_read(decay); ssize_t decay_ms = arena_decay_ms_read(decay);
if (decay_ms <= 0) { if (decay_ms <= 0) {
if (decay_ms == 0) { if (decay_ms == 0) {
arena_decay_to_limit(tsdn, arena, decay, eset, false, arena_decay_to_limit(tsdn, arena, decay, ecache, false,
0, eset_npages_get(eset), 0, ecache_npages_get(ecache),
is_background_thread); is_background_thread);
} }
return false; return false;
@ -751,11 +752,11 @@ arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
*/ */
bool advance_epoch = arena_decay_deadline_reached(decay, &time); bool advance_epoch = arena_decay_deadline_reached(decay, &time);
if (advance_epoch) { if (advance_epoch) {
arena_decay_epoch_advance(tsdn, arena, decay, eset, &time, arena_decay_epoch_advance(tsdn, arena, decay, ecache, &time,
is_background_thread); is_background_thread);
} else if (is_background_thread) { } else if (is_background_thread) {
arena_decay_try_purge(tsdn, arena, decay, eset, arena_decay_try_purge(tsdn, arena, decay, ecache,
eset_npages_get(eset), ecache_npages_get(ecache),
arena_decay_backlog_npages_limit(decay), arena_decay_backlog_npages_limit(decay),
is_background_thread); is_background_thread);
} }
@ -780,7 +781,7 @@ arena_muzzy_decay_ms_get(arena_t *arena) {
static bool static bool
arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, ssize_t decay_ms) { ecache_t *ecache, ssize_t decay_ms) {
if (!arena_decay_ms_valid(decay_ms)) { if (!arena_decay_ms_valid(decay_ms)) {
return true; return true;
} }
@ -795,7 +796,7 @@ arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
* arbitrary change during initial arena configuration. * arbitrary change during initial arena configuration.
*/ */
arena_decay_reinit(decay, decay_ms); arena_decay_reinit(decay, decay_ms);
arena_maybe_decay(tsdn, arena, decay, eset, false); arena_maybe_decay(tsdn, arena, decay, ecache, false);
malloc_mutex_unlock(tsdn, &decay->mtx); malloc_mutex_unlock(tsdn, &decay->mtx);
return false; return false;
@ -805,19 +806,19 @@ bool
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) { ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
&arena->eset_dirty, decay_ms); &arena->ecache_dirty, decay_ms);
} }
bool bool
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
ssize_t decay_ms) { ssize_t decay_ms) {
return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
&arena->eset_muzzy, decay_ms); &arena->ecache_muzzy, decay_ms);
} }
static size_t static size_t
arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, eset_t *eset, size_t npages_limit, ehooks_t *ehooks, ecache_t *ecache, size_t npages_limit,
size_t npages_decay_max, edata_list_t *decay_extents) { size_t npages_decay_max, edata_list_t *decay_extents) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
@ -826,7 +827,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
size_t nstashed = 0; size_t nstashed = 0;
edata_t *edata; edata_t *edata;
while (nstashed < npages_decay_max && while (nstashed < npages_decay_max &&
(edata = extents_evict(tsdn, arena, ehooks, eset, npages_limit)) (edata = extents_evict(tsdn, arena, ehooks, ecache, npages_limit))
!= NULL) { != NULL) {
edata_list_append(decay_extents, edata); edata_list_append(decay_extents, edata);
nstashed += edata_size_get(edata) >> LG_PAGE; nstashed += edata_size_get(edata) >> LG_PAGE;
@ -836,8 +837,8 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
static size_t static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
arena_decay_t *decay, eset_t *eset, bool all, edata_list_t *decay_extents, arena_decay_t *decay, ecache_t *ecache, bool all,
bool is_background_thread) { edata_list_t *decay_extents, bool is_background_thread) {
size_t nmadvise, nunmapped; size_t nmadvise, nunmapped;
size_t npurged; size_t npurged;
@ -856,7 +857,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t npages = edata_size_get(edata) >> LG_PAGE; size_t npages = edata_size_get(edata) >> LG_PAGE;
npurged += npages; npurged += npages;
edata_list_remove(decay_extents, edata); edata_list_remove(decay_extents, edata);
switch (eset_state_get(eset)) { switch (eset_state_get(&ecache->eset)) {
case extent_state_active: case extent_state_active:
not_reached(); not_reached();
case extent_state_dirty: case extent_state_dirty:
@ -864,7 +865,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
!extent_purge_lazy_wrapper(tsdn, arena, !extent_purge_lazy_wrapper(tsdn, arena,
ehooks, edata, 0, edata_size_get(edata))) { ehooks, edata, 0, edata_size_get(edata))) {
extents_dalloc(tsdn, arena, ehooks, extents_dalloc(tsdn, arena, ehooks,
&arena->eset_muzzy, edata); &arena->ecache_muzzy, edata);
arena_background_thread_inactivity_check(tsdn, arena_background_thread_inactivity_check(tsdn,
arena, is_background_thread); arena, is_background_thread);
break; break;
@ -900,14 +901,14 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* /*
* npages_limit: Decay at most npages_decay_max pages without violating the * npages_limit: Decay at most npages_decay_max pages without violating the
* invariant: (eset_npages_get(extents) >= npages_limit). We need an upper * invariant: (ecache_npages_get(ecache) >= npages_limit). We need an upper
* bound on number of pages in order to prevent unbounded growth (namely in * bound on number of pages in order to prevent unbounded growth (namely in
* stashed), otherwise unbounded new pages could be added to extents during the * stashed), otherwise unbounded new pages could be added to extents during the
* current decay run, so that the purging thread never finishes. * current decay run, so that the purging thread never finishes.
*/ */
static void static void
arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, bool all, size_t npages_limit, size_t npages_decay_max, ecache_t *ecache, bool all, size_t npages_limit, size_t npages_decay_max,
bool is_background_thread) { bool is_background_thread) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 1); WITNESS_RANK_CORE, 1);
@ -924,11 +925,11 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
edata_list_t decay_extents; edata_list_t decay_extents;
edata_list_init(&decay_extents); edata_list_init(&decay_extents);
size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, eset, size_t npurge = arena_stash_decayed(tsdn, arena, ehooks, ecache,
npages_limit, npages_decay_max, &decay_extents); npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) { if (npurge != 0) {
size_t npurged = arena_decay_stashed(tsdn, arena, ehooks, decay, size_t npurged = arena_decay_stashed(tsdn, arena, ehooks, decay,
eset, all, &decay_extents, is_background_thread); ecache, all, &decay_extents, is_background_thread);
assert(npurged == npurge); assert(npurged == npurge);
} }
@ -938,11 +939,11 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
static bool static bool
arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
eset_t *eset, bool is_background_thread, bool all) { ecache_t *ecache, bool is_background_thread, bool all) {
if (all) { if (all) {
malloc_mutex_lock(tsdn, &decay->mtx); malloc_mutex_lock(tsdn, &decay->mtx);
arena_decay_to_limit(tsdn, arena, decay, eset, all, 0, arena_decay_to_limit(tsdn, arena, decay, ecache, all, 0,
eset_npages_get(eset), is_background_thread); ecache_npages_get(ecache), is_background_thread);
malloc_mutex_unlock(tsdn, &decay->mtx); malloc_mutex_unlock(tsdn, &decay->mtx);
return false; return false;
@ -953,7 +954,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
return true; return true;
} }
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, eset, bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, ecache,
is_background_thread); is_background_thread);
size_t npages_new; size_t npages_new;
if (epoch_advanced) { if (epoch_advanced) {
@ -975,18 +976,18 @@ static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) { bool all) {
return arena_decay_impl(tsdn, arena, &arena->decay_dirty, return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
&arena->eset_dirty, is_background_thread, all); &arena->ecache_dirty, is_background_thread, all);
} }
static bool static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) { bool all) {
if (eset_npages_get(&arena->eset_muzzy) == 0 && if (ecache_npages_get(&arena->ecache_muzzy) == 0 &&
arena_muzzy_decay_ms_get(arena) <= 0) { arena_muzzy_decay_ms_get(arena) <= 0) {
return false; return false;
} }
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
&arena->eset_muzzy, is_background_thread, all); &arena->ecache_muzzy, is_background_thread, all);
} }
void void
@ -1157,7 +1158,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
ehooks_t *ehooks = arena_get_ehooks(arena); ehooks_t *ehooks = arena_get_ehooks(arena);
edata_t *edata; edata_t *edata;
while ((edata = extents_evict(tsdn, arena, ehooks, while ((edata = extents_evict(tsdn, arena, ehooks,
&arena->eset_retained, 0)) != NULL) { &arena->ecache_retained, 0)) != NULL) {
extent_destroy_wrapper(tsdn, arena, ehooks, edata); extent_destroy_wrapper(tsdn, arena, ehooks, edata);
} }
} }
@ -1173,8 +1174,8 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
* extents, so only retained extents may remain. * extents, so only retained extents may remain.
*/ */
assert(eset_npages_get(&arena->eset_dirty) == 0); assert(ecache_npages_get(&arena->ecache_dirty) == 0);
assert(eset_npages_get(&arena->eset_muzzy) == 0); assert(ecache_npages_get(&arena->ecache_muzzy) == 0);
/* Deallocate retained memory. */ /* Deallocate retained memory. */
arena_destroy_retained(tsd_tsdn(tsd), arena); arena_destroy_retained(tsd_tsdn(tsd), arena);
@ -1230,10 +1231,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
szind_t szind = sz_size2index(bin_info->reg_size); szind_t szind = sz_size2index(bin_info->reg_size);
bool zero = false; bool zero = false;
bool commit = true; bool commit = true;
edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty, edata_t *slab = extents_alloc(tsdn, arena, ehooks, &arena->ecache_dirty,
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit); NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, &commit);
if (slab == NULL && arena_may_have_muzzy(arena)) { if (slab == NULL && arena_may_have_muzzy(arena)) {
slab = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy, slab = extents_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero, NULL, bin_info->slab_size, 0, PAGE, true, binind, &zero,
&commit); &commit);
} }
@ -1917,14 +1918,14 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
} }
} }
malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); malloc_mutex_lock(tsd_tsdn(tsd), &arena->ecache_grow.mtx);
if (old_limit != NULL) { if (old_limit != NULL) {
*old_limit = sz_pind2sz(arena->retain_grow_limit); *old_limit = sz_pind2sz(arena->ecache_grow.limit);
} }
if (new_limit != NULL) { if (new_limit != NULL) {
arena->retain_grow_limit = new_ind; arena->ecache_grow.limit = new_ind;
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->ecache_grow.mtx);
return false; return false;
} }
@ -2016,14 +2017,14 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* are likely to be reused soon after deallocation, and the cost of * are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial. * merging/splitting extents is non-trivial.
*/ */
if (eset_init(tsdn, &arena->eset_dirty, extent_state_dirty, true)) { if (ecache_init(tsdn, &arena->ecache_dirty, extent_state_dirty, true)) {
goto label_error; goto label_error;
} }
/* /*
* Coalesce muzzy extents immediately, because operations on them are in * Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents. * the critical path much less often than for dirty extents.
*/ */
if (eset_init(tsdn, &arena->eset_muzzy, extent_state_muzzy, false)) { if (ecache_init(tsdn, &arena->ecache_muzzy, extent_state_muzzy, false)) {
goto label_error; goto label_error;
} }
/* /*
@ -2032,7 +2033,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* coalescing), but also because operations on retained extents are not * coalescing), but also because operations on retained extents are not
* in the critical path. * in the critical path.
*/ */
if (eset_init(tsdn, &arena->eset_retained, extent_state_retained, if (ecache_init(tsdn, &arena->ecache_retained, extent_state_retained,
false)) { false)) {
goto label_error; goto label_error;
} }
@ -2046,10 +2047,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error; goto label_error;
} }
arena->extent_grow_next = sz_psz2ind(HUGEPAGE); if (ecache_grow_init(tsdn, &arena->ecache_grow)) {
arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
goto label_error; goto label_error;
} }
@ -2187,14 +2185,14 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) { arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); ecache_grow_prefork(tsdn, &arena->ecache_grow);
} }
void void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) { arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
eset_prefork(tsdn, &arena->eset_dirty); ecache_prefork(tsdn, &arena->ecache_dirty);
eset_prefork(tsdn, &arena->eset_muzzy); ecache_prefork(tsdn, &arena->ecache_muzzy);
eset_prefork(tsdn, &arena->eset_retained); ecache_prefork(tsdn, &arena->ecache_retained);
} }
void void
@ -2234,10 +2232,10 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base); base_postfork_parent(tsdn, arena->base);
edata_cache_postfork_parent(tsdn, &arena->edata_cache); edata_cache_postfork_parent(tsdn, &arena->edata_cache);
eset_postfork_parent(tsdn, &arena->eset_dirty); ecache_postfork_parent(tsdn, &arena->ecache_dirty);
eset_postfork_parent(tsdn, &arena->eset_muzzy); ecache_postfork_parent(tsdn, &arena->ecache_muzzy);
eset_postfork_parent(tsdn, &arena->eset_retained); ecache_postfork_parent(tsdn, &arena->ecache_retained);
malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); ecache_grow_postfork_parent(tsdn, &arena->ecache_grow);
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {
@ -2280,10 +2278,10 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_postfork_child(tsdn, &arena->large_mtx); malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base); base_postfork_child(tsdn, arena->base);
edata_cache_postfork_child(tsdn, &arena->edata_cache); edata_cache_postfork_child(tsdn, &arena->edata_cache);
eset_postfork_child(tsdn, &arena->eset_dirty); ecache_postfork_child(tsdn, &arena->ecache_dirty);
eset_postfork_child(tsdn, &arena->eset_muzzy); ecache_postfork_child(tsdn, &arena->ecache_muzzy);
eset_postfork_child(tsdn, &arena->eset_retained); ecache_postfork_child(tsdn, &arena->ecache_retained);
malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); ecache_grow_postfork_child(tsdn, &arena->ecache_grow);
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {

View File

@ -114,7 +114,7 @@ decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
static uint64_t static uint64_t
arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
eset_t *eset) { ecache_t *ecache) {
if (malloc_mutex_trylock(tsdn, &decay->mtx)) { if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
/* Use minimal interval if decay is contended. */ /* Use minimal interval if decay is contended. */
return BACKGROUND_THREAD_MIN_INTERVAL_NS; return BACKGROUND_THREAD_MIN_INTERVAL_NS;
@ -130,7 +130,7 @@ arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
uint64_t decay_interval_ns = nstime_ns(&decay->interval); uint64_t decay_interval_ns = nstime_ns(&decay->interval);
assert(decay_interval_ns > 0); assert(decay_interval_ns > 0);
size_t npages = eset_npages_get(eset); size_t npages = ecache_npages_get(ecache);
if (npages == 0) { if (npages == 0) {
unsigned i; unsigned i;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
@ -202,12 +202,12 @@ static uint64_t
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
uint64_t i1, i2; uint64_t i1, i2;
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
&arena->eset_dirty); &arena->ecache_dirty);
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return i1; return i1;
} }
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
&arena->eset_muzzy); &arena->ecache_muzzy);
return i1 < i2 ? i1 : i2; return i1 < i2 ? i1 : i2;
} }
@ -717,8 +717,8 @@ background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
should_signal = true; should_signal = true;
} else if (unlikely(background_thread_indefinite_sleep(info)) && } else if (unlikely(background_thread_indefinite_sleep(info)) &&
(eset_npages_get(&arena->eset_dirty) > 0 || (ecache_npages_get(&arena->ecache_dirty) > 0 ||
eset_npages_get(&arena->eset_muzzy) > 0 || ecache_npages_get(&arena->ecache_muzzy) > 0 ||
info->npages_to_purge_new > 0)) { info->npages_to_purge_new > 0)) {
should_signal = true; should_signal = true;
} else { } else {

View File

@ -3011,9 +3011,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
} }
MUTEX_PROF_RESET(arena->large_mtx); MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->edata_cache.mtx); MUTEX_PROF_RESET(arena->edata_cache.mtx);
MUTEX_PROF_RESET(arena->eset_dirty.mtx); MUTEX_PROF_RESET(arena->ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->eset_muzzy.mtx); MUTEX_PROF_RESET(arena->ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->eset_retained.mtx); MUTEX_PROF_RESET(arena->ecache_retained.mtx);
MUTEX_PROF_RESET(arena->decay_dirty.mtx); MUTEX_PROF_RESET(arena->decay_dirty.mtx);
MUTEX_PROF_RESET(arena->decay_muzzy.mtx); MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx); MUTEX_PROF_RESET(arena->tcache_ql_mtx);

54
src/ecache.c Normal file
View File

@ -0,0 +1,54 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
eset_init(&ecache->eset, state, delay_coalesce);
return false;
}
void
ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_prefork(tsdn, &ecache->mtx);
}
void
ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
}
void
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
}
bool
ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
ecache_grow->next = sz_psz2ind(HUGEPAGE);
ecache_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&ecache_grow->mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
return false;
}
void
ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_prefork(tsdn, &ecache_grow->mtx);
}
void
ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_postfork_parent(tsdn, &ecache_grow->mtx);
}
void
ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_postfork_child(tsdn, &ecache_grow->mtx);
}

View File

@ -8,13 +8,9 @@
const bitmap_info_t eset_bitmap_info = const bitmap_info_t eset_bitmap_info =
BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
bool void
eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state, eset_init(eset_t *eset, extent_state_t state,
bool delay_coalesce) { bool delay_coalesce) {
if (malloc_mutex_init(&eset->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
edata_heap_new(&eset->heaps[i]); edata_heap_new(&eset->heaps[i]);
} }
@ -23,7 +19,6 @@ eset_init(tsdn_t *tsdn, eset_t *eset, extent_state_t state,
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED); atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state; eset->state = state;
eset->delay_coalesce = delay_coalesce; eset->delay_coalesce = delay_coalesce;
return false;
} }
extent_state_t extent_state_t
@ -63,8 +58,7 @@ eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
} }
void void
eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) { eset_insert(eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(edata_state_get(edata) == eset->state); assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata); size_t size = edata_size_get(edata);
@ -94,8 +88,7 @@ eset_insert_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
} }
void void
eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) { eset_remove(eset_t *eset, edata_t *edata) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
assert(edata_state_get(edata) == eset->state); assert(edata_state_get(edata) == eset->state);
size_t size = edata_size_get(edata); size_t size = edata_size_get(edata);
@ -114,9 +107,13 @@ eset_remove_locked(tsdn_t *tsdn, eset_t *eset, edata_t *edata) {
edata_list_remove(&eset->lru, edata); edata_list_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE; size_t npages = size >> LG_PAGE;
/* /*
* As in eset_insert_locked, we hold eset->mtx and so don't need atomic * As in eset_insert, we hold eset->mtx and so don't need atomic
* operations for updating eset->npages. * operations for updating eset->npages.
*/ */
/*
* This class is not thread-safe in general; we rely on external
* synchronization for all mutating operations.
*/
size_t cur_extents_npages = size_t cur_extents_npages =
atomic_load_zu(&eset->npages, ATOMIC_RELAXED); atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages); assert(cur_extents_npages >= npages);
@ -166,7 +163,7 @@ eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
* large enough. * large enough.
*/ */
static edata_t * static edata_t *
eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) { eset_first_fit(eset_t *eset, size_t size) {
edata_t *ret = NULL; edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size)); pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
@ -211,16 +208,14 @@ eset_first_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t size) {
} }
edata_t * edata_t *
eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) { eset_fit(eset_t *eset, size_t esize, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &eset->mtx);
size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
if (max_size < esize) { if (max_size < esize) {
return NULL; return NULL;
} }
edata_t *edata = eset_first_fit_locked(tsdn, eset, max_size); edata_t *edata = eset_first_fit(eset, max_size);
if (alignment > PAGE && edata == NULL) { if (alignment > PAGE && edata == NULL) {
/* /*
@ -233,18 +228,3 @@ eset_fit_locked(tsdn_t *tsdn, eset_t *eset, size_t esize, size_t alignment) {
return edata; return edata;
} }
void
eset_prefork(tsdn_t *tsdn, eset_t *eset) {
malloc_mutex_prefork(tsdn, &eset->mtx);
}
void
eset_postfork_parent(tsdn_t *tsdn, eset_t *eset) {
malloc_mutex_postfork_parent(tsdn, &eset->mtx);
}
void
eset_postfork_child(tsdn_t *tsdn, eset_t *eset) {
malloc_mutex_postfork_child(tsdn, &eset->mtx);
}

View File

@ -45,13 +45,13 @@ static atomic_zu_t highpages;
static void extent_deregister(tsdn_t *tsdn, edata_t *edata); static void extent_deregister(tsdn_t *tsdn, edata_t *edata);
static edata_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, static edata_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, void *new_addr, size_t usize, size_t pad, size_t alignment, ecache_t *ecache, void *new_addr, size_t usize, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained); bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, static edata_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata,
bool *coalesced, bool growing_retained); bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
eset_t *eset, edata_t *edata, bool growing_retained); ecache_t *ecache, edata_t *edata, bool growing_retained);
/******************************************************************************/ /******************************************************************************/
@ -165,22 +165,22 @@ extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
static bool static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata) { rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata) {
edata_state_set(edata, extent_state_active); edata_state_set(edata, extent_state_active);
bool coalesced; bool coalesced;
edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, eset, edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, ecache,
edata, &coalesced, false); edata, &coalesced, false);
edata_state_set(edata, eset_state_get(eset)); edata_state_set(edata, eset_state_get(&ecache->eset));
if (!coalesced) { if (!coalesced) {
return true; return true;
} }
eset_insert_locked(tsdn, eset, edata); eset_insert(&ecache->eset, edata);
return false; return false;
} }
edata_t * edata_t *
extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) { szind_t szind, bool *zero, bool *commit) {
assert(size + pad != 0); assert(size + pad != 0);
@ -188,14 +188,14 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
edata_t *edata = extent_recycle(tsdn, arena, ehooks, eset, new_addr, edata_t *edata = extent_recycle(tsdn, arena, ehooks, ecache, new_addr,
size, pad, alignment, slab, szind, zero, commit, false); size, pad, alignment, slab, szind, zero, commit, false);
assert(edata == NULL || edata_dumpable_get(edata)); assert(edata == NULL || edata_dumpable_get(edata));
return edata; return edata;
} }
void void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
assert(edata_base_get(edata) != NULL); assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0); assert(edata_size_get(edata) != 0);
@ -206,16 +206,16 @@ extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
edata_addr_set(edata, edata_base_get(edata)); edata_addr_set(edata, edata_base_get(edata));
edata_zeroed_set(edata, false); edata_zeroed_set(edata, false);
extent_record(tsdn, arena, ehooks, eset, edata, false); extent_record(tsdn, arena, ehooks, ecache, edata, false);
} }
edata_t * edata_t *
extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
size_t npages_min) { size_t npages_min) {
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
malloc_mutex_lock(tsdn, &eset->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);
/* /*
* Get the LRU coalesced extent, if any. If coalescing was delayed, * Get the LRU coalesced extent, if any. If coalescing was delayed,
@ -224,24 +224,23 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
edata_t *edata; edata_t *edata;
while (true) { while (true) {
/* Get the LRU extent, if any. */ /* Get the LRU extent, if any. */
edata = edata_list_first(&eset->lru); edata = edata_list_first(&ecache->eset.lru);
if (edata == NULL) { if (edata == NULL) {
goto label_return; goto label_return;
} }
/* Check the eviction limit. */ /* Check the eviction limit. */
size_t extents_npages = atomic_load_zu(&eset->npages, size_t extents_npages = ecache_npages_get(ecache);
ATOMIC_RELAXED);
if (extents_npages <= npages_min) { if (extents_npages <= npages_min) {
edata = NULL; edata = NULL;
goto label_return; goto label_return;
} }
eset_remove_locked(tsdn, eset, edata); eset_remove(&ecache->eset, edata);
if (!eset->delay_coalesce) { if (!ecache->eset.delay_coalesce) {
break; break;
} }
/* Try to coalesce. */ /* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, arena, ehooks, rtree_ctx, if (extent_try_delayed_coalesce(tsdn, arena, ehooks, rtree_ctx,
eset, edata)) { ecache, edata)) {
break; break;
} }
/* /*
@ -254,7 +253,7 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
* Either mark the extent active or deregister it to protect against * Either mark the extent active or deregister it to protect against
* concurrent operations. * concurrent operations.
*/ */
switch (eset_state_get(eset)) { switch (eset_state_get(&ecache->eset)) {
case extent_state_active: case extent_state_active:
not_reached(); not_reached();
case extent_state_dirty: case extent_state_dirty:
@ -269,7 +268,7 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
} }
label_return: label_return:
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
return edata; return edata;
} }
@ -278,8 +277,8 @@ label_return:
* indicates OOM), e.g. when trying to split an existing extent. * indicates OOM), e.g. when trying to split an existing extent.
*/ */
static void static void
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *edata, bool growing_retained) { ecache_t *ecache, edata_t *edata, bool growing_retained) {
size_t sz = edata_size_get(edata); size_t sz = edata_size_get(edata);
if (config_stats) { if (config_stats) {
arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
@ -288,7 +287,7 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
* Leak extent after making sure its pages have already been purged, so * Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak. * that this is only a virtual memory leak.
*/ */
if (eset_state_get(eset) == extent_state_dirty) { if (eset_state_get(&ecache->eset) == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz, if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz,
growing_retained)) { growing_retained)) {
extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0, extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0,
@ -299,30 +298,30 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
} }
static void static void
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset, extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
assert(edata_arena_ind_get(edata) == arena_ind_get(arena)); assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
assert(edata_state_get(edata) == extent_state_active); assert(edata_state_get(edata) == extent_state_active);
edata_state_set(edata, eset_state_get(eset)); edata_state_set(edata, eset_state_get(&ecache->eset));
eset_insert_locked(tsdn, eset, edata); eset_insert(&ecache->eset, edata);
} }
static void static void
extent_deactivate(tsdn_t *tsdn, arena_t *arena, eset_t *eset, extent_deactivate(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
malloc_mutex_lock(tsdn, &eset->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);
extent_deactivate_locked(tsdn, arena, eset, edata); extent_deactivate_locked(tsdn, arena, ecache, edata);
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
} }
static void static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, eset_t *eset, extent_activate_locked(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
assert(edata_arena_ind_get(edata) == arena_ind_get(arena)); assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
assert(edata_state_get(edata) == eset_state_get(eset)); assert(edata_state_get(edata) == eset_state_get(&ecache->eset));
eset_remove_locked(tsdn, eset, edata); eset_remove(&ecache->eset, edata);
edata_state_set(edata, extent_state_active); edata_state_set(edata, extent_state_active);
} }
@ -515,12 +514,12 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, edata_t *edata) {
} }
/* /*
* Tries to find and remove an extent from eset that can be used for the * Tries to find and remove an extent from ecache that can be used for the
* given allocation request. * given allocation request.
*/ */
static edata_t * static edata_t *
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, void *new_addr, size_t size, rtree_ctx_t *rtree_ctx, ecache_t *ecache, void *new_addr, size_t size,
size_t pad, size_t alignment, bool slab, bool growing_retained) { size_t pad, size_t alignment, bool slab, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0); WITNESS_RANK_CORE, growing_retained ? 1 : 0);
@ -543,7 +542,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
} }
size_t esize = size + pad; size_t esize = size + pad;
malloc_mutex_lock(tsdn, &eset->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);
edata_t *edata; edata_t *edata;
if (new_addr != NULL) { if (new_addr != NULL) {
edata = extent_lock_edata_from_addr(tsdn, rtree_ctx, new_addr, edata = extent_lock_edata_from_addr(tsdn, rtree_ctx, new_addr,
@ -557,21 +556,22 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
assert(edata_base_get(edata) == new_addr); assert(edata_base_get(edata) == new_addr);
if (edata_arena_ind_get(edata) != arena_ind_get(arena) if (edata_arena_ind_get(edata) != arena_ind_get(arena)
|| edata_size_get(edata) < esize || edata_size_get(edata) < esize
|| edata_state_get(edata) != eset_state_get(eset)) { || edata_state_get(edata)
!= eset_state_get(&ecache->eset)) {
edata = NULL; edata = NULL;
} }
extent_unlock_edata(tsdn, unlock_edata); extent_unlock_edata(tsdn, unlock_edata);
} }
} else { } else {
edata = eset_fit_locked(tsdn, eset, esize, alignment); edata = eset_fit(&ecache->eset, esize, alignment);
} }
if (edata == NULL) { if (edata == NULL) {
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL; return NULL;
} }
extent_activate_locked(tsdn, arena, eset, edata); extent_activate_locked(tsdn, arena, ecache, edata);
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
return edata; return edata;
} }
@ -580,7 +580,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Given an allocation request and an extent guaranteed to be able to satisfy * Given an allocation request and an extent guaranteed to be able to satisfy
* it, this splits off lead and trail extents, leaving edata pointing to an * it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation. * extent satisfying the allocation.
* This function doesn't put lead or trail into any eset_t; it's the caller's * This function doesn't put lead or trail into any ecache; it's the caller's
* job to ensure that they can be reused. * job to ensure that they can be reused.
*/ */
typedef enum { typedef enum {
@ -676,11 +676,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* This fulfills the indicated allocation request out of the given extent (which * This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space * the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent * before or after the resulting allocation, that space is given its own extent
* and put back into eset. * and put back into ecache.
*/ */
static edata_t * static edata_t *
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, void *new_addr, size_t size, rtree_ctx_t *rtree_ctx, ecache_t *ecache, void *new_addr, size_t size,
size_t pad, size_t alignment, bool slab, szind_t szind, edata_t *edata, size_t pad, size_t alignment, bool slab, szind_t szind, edata_t *edata,
bool growing_retained) { bool growing_retained) {
edata_t *lead; edata_t *lead;
@ -697,19 +697,19 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
&& !opt_retain) { && !opt_retain) {
/* /*
* Split isn't supported (implies Windows w/o retain). Avoid * Split isn't supported (implies Windows w/o retain). Avoid
* leaking the eset. * leaking the extent.
*/ */
assert(to_leak != NULL && lead == NULL && trail == NULL); assert(to_leak != NULL && lead == NULL && trail == NULL);
extent_deactivate(tsdn, arena, eset, to_leak); extent_deactivate(tsdn, arena, ecache, to_leak);
return NULL; return NULL;
} }
if (result == extent_split_interior_ok) { if (result == extent_split_interior_ok) {
if (lead != NULL) { if (lead != NULL) {
extent_deactivate(tsdn, arena, eset, lead); extent_deactivate(tsdn, arena, ecache, lead);
} }
if (trail != NULL) { if (trail != NULL) {
extent_deactivate(tsdn, arena, eset, trail); extent_deactivate(tsdn, arena, ecache, trail);
} }
return edata; return edata;
} else { } else {
@ -724,7 +724,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (to_leak != NULL) { if (to_leak != NULL) {
void *leak = edata_base_get(to_leak); void *leak = edata_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_abandon_vm(tsdn, arena, ehooks, eset, to_leak, extents_abandon_vm(tsdn, arena, ehooks, ecache, to_leak,
growing_retained); growing_retained);
assert(extent_lock_edata_from_addr(tsdn, rtree_ctx, leak, assert(extent_lock_edata_from_addr(tsdn, rtree_ctx, leak,
false) == NULL); false) == NULL);
@ -736,10 +736,10 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* /*
* Tries to satisfy the given allocation request by reusing one of the extents * Tries to satisfy the given allocation request by reusing one of the extents
* in the given eset_t. * in the given ecache_t.
*/ */
static edata_t * static edata_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit, bool growing_retained) { szind_t szind, bool *zero, bool *commit, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
@ -752,13 +752,13 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks, edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks,
rtree_ctx, eset, new_addr, size, pad, alignment, slab, rtree_ctx, ecache, new_addr, size, pad, alignment, slab,
growing_retained); growing_retained);
if (edata == NULL) { if (edata == NULL) {
return NULL; return NULL;
} }
edata = extent_recycle_split(tsdn, arena, ehooks, rtree_ctx, eset, edata = extent_recycle_split(tsdn, arena, ehooks, rtree_ctx, ecache,
new_addr, size, pad, alignment, slab, szind, edata, new_addr, size, pad, alignment, slab, szind, edata,
growing_retained); growing_retained);
if (edata == NULL) { if (edata == NULL) {
@ -768,7 +768,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
if (*commit && !edata_committed_get(edata)) { if (*commit && !edata_committed_get(edata)) {
if (extent_commit_impl(tsdn, arena, ehooks, edata, 0, if (extent_commit_impl(tsdn, arena, ehooks, edata, 0,
edata_size_get(edata), growing_retained)) { edata_size_get(edata), growing_retained)) {
extent_record(tsdn, arena, ehooks, eset, edata, extent_record(tsdn, arena, ehooks, ecache, edata,
growing_retained); growing_retained);
return NULL; return NULL;
} }
@ -810,7 +810,7 @@ static edata_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit) { bool *zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); malloc_mutex_assert_owner(tsdn, &arena->ecache_grow.mtx);
assert(pad == 0 || !slab); assert(pad == 0 || !slab);
assert(!*zero || !slab); assert(!*zero || !slab);
@ -825,15 +825,15 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* satisfy this request. * satisfy this request.
*/ */
pszind_t egn_skip = 0; pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); size_t alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip);
while (alloc_size < alloc_size_min) { while (alloc_size < alloc_size_min) {
egn_skip++; egn_skip++;
if (arena->extent_grow_next + egn_skip >= if (arena->ecache_grow.next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) { sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */ /* Outside legal range. */
goto label_err; goto label_err;
} }
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip);
} }
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache, edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache,
@ -881,11 +881,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (result == extent_split_interior_ok) { if (result == extent_split_interior_ok) {
if (lead != NULL) { if (lead != NULL) {
extent_record(tsdn, arena, ehooks, extent_record(tsdn, arena, ehooks,
&arena->eset_retained, lead, true); &arena->ecache_retained, lead, true);
} }
if (trail != NULL) { if (trail != NULL) {
extent_record(tsdn, arena, ehooks, extent_record(tsdn, arena, ehooks,
&arena->eset_retained, trail, true); &arena->ecache_retained, trail, true);
} }
} else { } else {
/* /*
@ -898,12 +898,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_gdump_add(tsdn, to_salvage); extent_gdump_add(tsdn, to_salvage);
} }
extent_record(tsdn, arena, ehooks, extent_record(tsdn, arena, ehooks,
&arena->eset_retained, to_salvage, true); &arena->ecache_retained, to_salvage, true);
} }
if (to_leak != NULL) { if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak); extent_deregister_no_gdump_sub(tsdn, to_leak);
extents_abandon_vm(tsdn, arena, ehooks, extents_abandon_vm(tsdn, arena, ehooks,
&arena->eset_retained, to_leak, true); &arena->ecache_retained, to_leak, true);
} }
goto label_err; goto label_err;
} }
@ -912,7 +912,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (extent_commit_impl(tsdn, arena, ehooks, edata, 0, if (extent_commit_impl(tsdn, arena, ehooks, edata, 0,
edata_size_get(edata), true)) { edata_size_get(edata), true)) {
extent_record(tsdn, arena, ehooks, extent_record(tsdn, arena, ehooks,
&arena->eset_retained, edata, true); &arena->ecache_retained, edata, true);
goto label_err; goto label_err;
} }
/* A successful commit should return zeroed memory. */ /* A successful commit should return zeroed memory. */
@ -930,14 +930,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Increment extent_grow_next if doing so wouldn't exceed the allowed * Increment extent_grow_next if doing so wouldn't exceed the allowed
* range. * range.
*/ */
if (arena->extent_grow_next + egn_skip + 1 <= if (arena->ecache_grow.next + egn_skip + 1 <=
arena->retain_grow_limit) { arena->ecache_grow.limit) {
arena->extent_grow_next += egn_skip + 1; arena->ecache_grow.next += egn_skip + 1;
} else { } else {
arena->extent_grow_next = arena->retain_grow_limit; arena->ecache_grow.next = arena->ecache_grow.limit;
} }
/* All opportunities for failure are past. */ /* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
if (config_prof) { if (config_prof) {
/* Adjust gdump stats now that extent is final size. */ /* Adjust gdump stats now that extent is final size. */
@ -962,7 +962,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return edata; return edata;
label_err: label_err:
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
return NULL; return NULL;
} }
@ -973,13 +973,13 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
assert(size != 0); assert(size != 0);
assert(alignment != 0); assert(alignment != 0);
malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); malloc_mutex_lock(tsdn, &arena->ecache_grow.mtx);
edata_t *edata = extent_recycle(tsdn, arena, ehooks, edata_t *edata = extent_recycle(tsdn, arena, ehooks,
&arena->eset_retained, new_addr, size, pad, alignment, slab, &arena->ecache_retained, new_addr, size, pad, alignment, slab,
szind, zero, commit, true); szind, zero, commit, true);
if (edata != NULL) { if (edata != NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
if (config_prof) { if (config_prof) {
extent_gdump_add(tsdn, edata); extent_gdump_add(tsdn, edata);
} }
@ -988,9 +988,9 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
alignment, slab, szind, zero, commit); alignment, slab, szind, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */ /* extent_grow_retained() always releases extent_grow_mtx. */
} else { } else {
malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
} }
malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); malloc_mutex_assert_not_owner(tsdn, &arena->ecache_grow.mtx);
return edata; return edata;
} }
@ -1054,7 +1054,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
} }
static bool static bool
extent_can_coalesce(arena_t *arena, eset_t *eset, const edata_t *inner, extent_can_coalesce(arena_t *arena, ecache_t *ecache, const edata_t *inner,
const edata_t *outer) { const edata_t *outer) {
assert(edata_arena_ind_get(inner) == arena_ind_get(arena)); assert(edata_arena_ind_get(inner) == arena_ind_get(arena));
if (edata_arena_ind_get(outer) != arena_ind_get(arena)) { if (edata_arena_ind_get(outer) != arena_ind_get(arena)) {
@ -1062,7 +1062,7 @@ extent_can_coalesce(arena_t *arena, eset_t *eset, const edata_t *inner,
} }
assert(edata_state_get(inner) == extent_state_active); assert(edata_state_get(inner) == extent_state_active);
if (edata_state_get(outer) != eset->state) { if (edata_state_get(outer) != ecache->eset.state) {
return false; return false;
} }
@ -1074,19 +1074,20 @@ extent_can_coalesce(arena_t *arena, eset_t *eset, const edata_t *inner,
} }
static bool static bool
extent_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extent_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *inner, edata_t *outer, bool forward, bool growing_retained) { ecache_t *ecache, edata_t *inner, edata_t *outer, bool forward,
assert(extent_can_coalesce(arena, eset, inner, outer)); bool growing_retained) {
assert(extent_can_coalesce(arena, ecache, inner, outer));
extent_activate_locked(tsdn, arena, eset, outer); extent_activate_locked(tsdn, arena, ecache, outer);
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
bool err = extent_merge_impl(tsdn, arena, ehooks, bool err = extent_merge_impl(tsdn, arena, ehooks,
forward ? inner : outer, forward ? outer : inner, growing_retained); forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &eset->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);
if (err) { if (err) {
extent_deactivate_locked(tsdn, arena, eset, outer); extent_deactivate_locked(tsdn, arena, ecache, outer);
} }
return err; return err;
@ -1094,7 +1095,7 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
static edata_t * static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained, bool inactive_only) { bool growing_retained, bool inactive_only) {
/* /*
* We avoid checking / locking inactive neighbors for large size * We avoid checking / locking inactive neighbors for large size
@ -1114,19 +1115,19 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_past_get(edata), inactive_only); edata_past_get(edata), inactive_only);
if (next != NULL) { if (next != NULL) {
/* /*
* eset->mtx only protects against races for * ecache->mtx only protects against races for
* like-state eset, so call extent_can_coalesce() * like-state extents, so call extent_can_coalesce()
* before releasing next's pool lock. * before releasing next's pool lock.
*/ */
bool can_coalesce = extent_can_coalesce(arena, eset, bool can_coalesce = extent_can_coalesce(arena, ecache,
edata, next); edata, next);
extent_unlock_edata(tsdn, next); extent_unlock_edata(tsdn, next);
if (can_coalesce && !extent_coalesce(tsdn, arena, if (can_coalesce && !extent_coalesce(tsdn, arena,
ehooks, eset, edata, next, true, ehooks, ecache, edata, next, true,
growing_retained)) { growing_retained)) {
if (eset->delay_coalesce) { if (ecache->eset.delay_coalesce) {
/* Do minimal coalescing. */ /* Do minimal coalescing. */
*coalesced = true; *coalesced = true;
return edata; return edata;
@ -1139,15 +1140,15 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *prev = extent_lock_edata_from_addr(tsdn, rtree_ctx, edata_t *prev = extent_lock_edata_from_addr(tsdn, rtree_ctx,
edata_before_get(edata), inactive_only); edata_before_get(edata), inactive_only);
if (prev != NULL) { if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(arena, eset, bool can_coalesce = extent_can_coalesce(arena, ecache,
edata, prev); edata, prev);
extent_unlock_edata(tsdn, prev); extent_unlock_edata(tsdn, prev);
if (can_coalesce && !extent_coalesce(tsdn, arena, if (can_coalesce && !extent_coalesce(tsdn, arena,
ehooks, eset, edata, prev, false, ehooks, ecache, edata, prev, false,
growing_retained)) { growing_retained)) {
edata = prev; edata = prev;
if (eset->delay_coalesce) { if (ecache->eset.delay_coalesce) {
/* Do minimal coalescing. */ /* Do minimal coalescing. */
*coalesced = true; *coalesced = true;
return edata; return edata;
@ -1157,7 +1158,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
} }
} while (again); } while (again);
if (eset->delay_coalesce) { if (ecache->eset.delay_coalesce) {
*coalesced = false; *coalesced = false;
} }
return edata; return edata;
@ -1165,35 +1166,35 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
static edata_t * static edata_t *
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained) { bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, eset, return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, ecache,
edata, coalesced, growing_retained, false); edata, coalesced, growing_retained, false);
} }
static edata_t * static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
rtree_ctx_t *rtree_ctx, eset_t *eset, edata_t *edata, bool *coalesced, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained) { bool growing_retained) {
return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, eset, return extent_try_coalesce_impl(tsdn, arena, ehooks, rtree_ctx, ecache,
edata, coalesced, growing_retained, true); edata, coalesced, growing_retained, true);
} }
/* /*
* Does the metadata management portions of putting an unused extent into the * Does the metadata management portions of putting an unused extent into the
* given eset_t (coalesces, deregisters slab interiors, the heap operations). * given ecache_t (coalesces, deregisters slab interiors, the heap operations).
*/ */
static void static void
extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset, extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool growing_retained) { edata_t *edata, bool growing_retained) {
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert((eset_state_get(eset) != extent_state_dirty && assert((eset_state_get(&ecache->eset) != extent_state_dirty &&
eset_state_get(eset) != extent_state_muzzy) || eset_state_get(&ecache->eset) != extent_state_muzzy) ||
!edata_zeroed_get(edata)); !edata_zeroed_get(edata));
malloc_mutex_lock(tsdn, &eset->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);
edata_szind_set(edata, SC_NSIZES); edata_szind_set(edata, SC_NSIZES);
if (edata_slab_get(edata)) { if (edata_slab_get(edata)) {
@ -1204,29 +1205,29 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, eset_t *eset,
assert(rtree_edata_read(tsdn, &extents_rtree, rtree_ctx, assert(rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata), true) == edata); (uintptr_t)edata_base_get(edata), true) == edata);
if (!eset->delay_coalesce) { if (!ecache->eset.delay_coalesce) {
edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx,
eset, edata, NULL, growing_retained); ecache, edata, NULL, growing_retained);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) { } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(eset == &arena->eset_dirty); assert(ecache == &arena->ecache_dirty);
/* Always coalesce large eset eagerly. */ /* Always coalesce large extents eagerly. */
bool coalesced; bool coalesced;
do { do {
assert(edata_state_get(edata) == extent_state_active); assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn, arena, ehooks, edata = extent_try_coalesce_large(tsdn, arena, ehooks,
rtree_ctx, eset, edata, &coalesced, rtree_ctx, ecache, edata, &coalesced,
growing_retained); growing_retained);
} while (coalesced); } while (coalesced);
if (edata_size_get(edata) >= oversize_threshold) { if (edata_size_get(edata) >= oversize_threshold) {
/* Shortcut to purge the oversize extent eagerly. */ /* Shortcut to purge the oversize extent eagerly. */
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
arena_decay_extent(tsdn, arena, ehooks, edata); arena_decay_extent(tsdn, arena, ehooks, edata);
return; return;
} }
} }
extent_deactivate_locked(tsdn, arena, eset, edata); extent_deactivate_locked(tsdn, arena, ecache, edata);
malloc_mutex_unlock(tsdn, &eset->mtx); malloc_mutex_unlock(tsdn, &ecache->mtx);
} }
void void
@ -1312,7 +1313,8 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
extent_gdump_sub(tsdn, edata); extent_gdump_sub(tsdn, edata);
} }
extent_record(tsdn, arena, ehooks, &arena->eset_retained, edata, false); extent_record(tsdn, arena, ehooks, &arena->ecache_retained, edata,
false);
} }
void void

View File

@ -149,10 +149,10 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool commit = true; bool commit = true;
edata_t *trail; edata_t *trail;
bool new_mapping; bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_dirty, if ((trail = extents_alloc(tsdn, arena, ehooks, &arena->ecache_dirty,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES, edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL &is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, ehooks, &arena->eset_muzzy, || (trail = extents_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES, edata_past_get(edata), trailsize, 0, CACHELINE, false, SC_NSIZES,
&is_zeroed_trail, &commit)) != NULL) { &is_zeroed_trail, &commit)) != NULL) {
if (config_stats) { if (config_stats) {

View File

@ -142,7 +142,7 @@ TEST_BEGIN(test_retained) {
size_t usable = 0; size_t usable = 0;
size_t fragmented = 0; size_t fragmented = 0;
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
arena->extent_grow_next; pind++) { arena->ecache_grow.next; pind++) {
size_t psz = sz_pind2sz(pind); size_t psz = sz_pind2sz(pind);
size_t psz_fragmented = psz % esz; size_t psz_fragmented = psz % esz;
size_t psz_usable = psz - psz_fragmented; size_t psz_usable = psz - psz_fragmented;