PA: Move in the ecache_t objects.
This commit is contained in:
parent
585f925055
commit
a24faed569
@ -12,6 +12,7 @@
|
|||||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/nstime.h"
|
#include "jemalloc/internal/nstime.h"
|
||||||
|
#include "jemalloc/internal/pa.h"
|
||||||
#include "jemalloc/internal/ql.h"
|
#include "jemalloc/internal/ql.h"
|
||||||
#include "jemalloc/internal/sc.h"
|
#include "jemalloc/internal/sc.h"
|
||||||
#include "jemalloc/internal/smoothstep.h"
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
@ -150,15 +151,8 @@ struct arena_s {
|
|||||||
/* Synchronizes all large allocation/update/deallocation. */
|
/* Synchronizes all large allocation/update/deallocation. */
|
||||||
malloc_mutex_t large_mtx;
|
malloc_mutex_t large_mtx;
|
||||||
|
|
||||||
/*
|
/* The page-level allocator shard this arena uses. */
|
||||||
* Collections of extents that were previously allocated. These are
|
pa_shard_t pa_shard;
|
||||||
* used when allocating extents, in an attempt to re-use address space.
|
|
||||||
*
|
|
||||||
* Synchronization: internal.
|
|
||||||
*/
|
|
||||||
ecache_t ecache_dirty;
|
|
||||||
ecache_t ecache_muzzy;
|
|
||||||
ecache_t ecache_retained;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decay-based purging state, responsible for scheduling extent state
|
* Decay-based purging state, responsible for scheduling extent state
|
||||||
|
@ -6,4 +6,17 @@
|
|||||||
* allocations.
|
* allocations.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
typedef struct pa_shard_s pa_shard_t;
|
||||||
|
struct pa_shard_s {
|
||||||
|
/*
|
||||||
|
* Collections of extents that were previously allocated. These are
|
||||||
|
* used when allocating extents, in an attempt to re-use address space.
|
||||||
|
*
|
||||||
|
* Synchronization: internal.
|
||||||
|
*/
|
||||||
|
ecache_t ecache_dirty;
|
||||||
|
ecache_t ecache_muzzy;
|
||||||
|
ecache_t ecache_retained;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PA_H */
|
#endif /* JEMALLOC_INTERNAL_PA_H */
|
||||||
|
112
src/arena.c
112
src/arena.c
@ -74,8 +74,8 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
|
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
|
||||||
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
||||||
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
|
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
|
||||||
*ndirty += ecache_npages_get(&arena->ecache_dirty);
|
*ndirty += ecache_npages_get(&arena->pa_shard.ecache_dirty);
|
||||||
*nmuzzy += ecache_npages_get(&arena->ecache_muzzy);
|
*nmuzzy += ecache_npages_get(&arena->pa_shard.ecache_muzzy);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -98,7 +98,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
arena_stats_accum_zu(&astats->mapped, base_mapped
|
arena_stats_accum_zu(&astats->mapped, base_mapped
|
||||||
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
|
+ arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
|
||||||
arena_stats_accum_zu(&astats->retained,
|
arena_stats_accum_zu(&astats->retained,
|
||||||
ecache_npages_get(&arena->ecache_retained) << LG_PAGE);
|
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE);
|
||||||
|
|
||||||
atomic_store_zu(&astats->edata_avail,
|
atomic_store_zu(&astats->edata_avail,
|
||||||
atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED),
|
atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED),
|
||||||
@ -129,8 +129,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
|
arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
|
||||||
arena_stats_accum_zu(&astats->resident, base_resident +
|
arena_stats_accum_zu(&astats->resident, base_resident +
|
||||||
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
|
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
|
||||||
ecache_npages_get(&arena->ecache_dirty) +
|
ecache_npages_get(&arena->pa_shard.ecache_dirty) +
|
||||||
ecache_npages_get(&arena->ecache_muzzy)) << LG_PAGE)));
|
ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE)));
|
||||||
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
|
arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
|
||||||
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
|
&arena->stats.abandoned_vm, ATOMIC_RELAXED));
|
||||||
|
|
||||||
@ -172,12 +172,16 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
|
for (pszind_t i = 0; i < SC_NPSIZES; i++) {
|
||||||
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
|
size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
|
||||||
retained_bytes;
|
retained_bytes;
|
||||||
dirty = ecache_nextents_get(&arena->ecache_dirty, i);
|
dirty = ecache_nextents_get(&arena->pa_shard.ecache_dirty, i);
|
||||||
muzzy = ecache_nextents_get(&arena->ecache_muzzy, i);
|
muzzy = ecache_nextents_get(&arena->pa_shard.ecache_muzzy, i);
|
||||||
retained = ecache_nextents_get(&arena->ecache_retained, i);
|
retained = ecache_nextents_get(&arena->pa_shard.ecache_retained,
|
||||||
dirty_bytes = ecache_nbytes_get(&arena->ecache_dirty, i);
|
i);
|
||||||
muzzy_bytes = ecache_nbytes_get(&arena->ecache_muzzy, i);
|
dirty_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_dirty,
|
||||||
retained_bytes = ecache_nbytes_get(&arena->ecache_retained, i);
|
i);
|
||||||
|
muzzy_bytes = ecache_nbytes_get(&arena->pa_shard.ecache_muzzy,
|
||||||
|
i);
|
||||||
|
retained_bytes = ecache_nbytes_get(
|
||||||
|
&arena->pa_shard.ecache_retained, i);
|
||||||
|
|
||||||
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
|
atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
|
||||||
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
|
atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
|
||||||
@ -226,11 +230,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
|
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
|
||||||
READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx,
|
||||||
arena_prof_mutex_extent_avail)
|
arena_prof_mutex_extent_avail)
|
||||||
READ_ARENA_MUTEX_PROF_DATA(ecache_dirty.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_dirty.mtx,
|
||||||
arena_prof_mutex_extents_dirty)
|
arena_prof_mutex_extents_dirty)
|
||||||
READ_ARENA_MUTEX_PROF_DATA(ecache_muzzy.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_muzzy.mtx,
|
||||||
arena_prof_mutex_extents_muzzy)
|
arena_prof_mutex_extents_muzzy)
|
||||||
READ_ARENA_MUTEX_PROF_DATA(ecache_retained.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_retained.mtx,
|
||||||
arena_prof_mutex_extents_retained)
|
arena_prof_mutex_extents_retained)
|
||||||
READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
|
READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
|
||||||
arena_prof_mutex_decay_dirty)
|
arena_prof_mutex_decay_dirty)
|
||||||
@ -258,7 +262,8 @@ arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
ecache_dalloc(tsdn, arena, ehooks, &arena->ecache_dirty, edata);
|
ecache_dalloc(tsdn, arena, ehooks, &arena->pa_shard.ecache_dirty,
|
||||||
|
edata);
|
||||||
if (arena_dirty_decay_ms_get(arena) == 0) {
|
if (arena_dirty_decay_ms_get(arena) == 0) {
|
||||||
arena_decay_dirty(tsdn, arena, false, true);
|
arena_decay_dirty(tsdn, arena, false, true);
|
||||||
} else {
|
} else {
|
||||||
@ -434,16 +439,18 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
size_t mapped_add;
|
size_t mapped_add;
|
||||||
size_t esize = usize + sz_large_pad;
|
size_t esize = usize + sz_large_pad;
|
||||||
edata_t *edata = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty,
|
edata_t *edata = ecache_alloc(tsdn, arena, ehooks,
|
||||||
NULL, esize, alignment, false, szind, zero);
|
&arena->pa_shard.ecache_dirty, NULL, esize, alignment, false, szind,
|
||||||
|
zero);
|
||||||
if (edata == NULL && arena_may_have_muzzy(arena)) {
|
if (edata == NULL && arena_may_have_muzzy(arena)) {
|
||||||
edata = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
|
edata = ecache_alloc(tsdn, arena, ehooks,
|
||||||
NULL, esize, alignment, false, szind, zero);
|
&arena->pa_shard.ecache_muzzy, NULL, esize, alignment,
|
||||||
|
false, szind, zero);
|
||||||
}
|
}
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
edata = ecache_alloc_grow(tsdn, arena, ehooks,
|
edata = ecache_alloc_grow(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, NULL, esize, alignment, false,
|
&arena->pa_shard.ecache_retained, NULL, esize, alignment,
|
||||||
szind, zero);
|
false, szind, zero);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/*
|
/*
|
||||||
* edata may be NULL on OOM, but in that case mapped_add
|
* edata may be NULL on OOM, but in that case mapped_add
|
||||||
@ -808,14 +815,14 @@ bool
|
|||||||
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
ssize_t decay_ms) {
|
ssize_t decay_ms) {
|
||||||
return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
|
return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
|
||||||
&arena->ecache_dirty, decay_ms);
|
&arena->pa_shard.ecache_dirty, decay_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
ssize_t decay_ms) {
|
ssize_t decay_ms) {
|
||||||
return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
|
return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
|
||||||
&arena->ecache_muzzy, decay_ms);
|
&arena->pa_shard.ecache_muzzy, decay_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
@ -867,7 +874,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
!extent_purge_lazy_wrapper(tsdn, arena,
|
!extent_purge_lazy_wrapper(tsdn, arena,
|
||||||
ehooks, edata, 0, edata_size_get(edata))) {
|
ehooks, edata, 0, edata_size_get(edata))) {
|
||||||
ecache_dalloc(tsdn, arena, ehooks,
|
ecache_dalloc(tsdn, arena, ehooks,
|
||||||
&arena->ecache_muzzy, edata);
|
&arena->pa_shard.ecache_muzzy, edata);
|
||||||
arena_background_thread_inactivity_check(tsdn,
|
arena_background_thread_inactivity_check(tsdn,
|
||||||
arena, is_background_thread);
|
arena, is_background_thread);
|
||||||
break;
|
break;
|
||||||
@ -978,18 +985,18 @@ static bool
|
|||||||
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||||
bool all) {
|
bool all) {
|
||||||
return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
|
return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
|
||||||
&arena->ecache_dirty, is_background_thread, all);
|
&arena->pa_shard.ecache_dirty, is_background_thread, all);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
||||||
bool all) {
|
bool all) {
|
||||||
if (ecache_npages_get(&arena->ecache_muzzy) == 0 &&
|
if (ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0 &&
|
||||||
arena_muzzy_decay_ms_get(arena) <= 0) {
|
arena_muzzy_decay_ms_get(arena) <= 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
|
return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
|
||||||
&arena->ecache_muzzy, is_background_thread, all);
|
&arena->pa_shard.ecache_muzzy, is_background_thread, all);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1159,7 +1166,7 @@ arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
edata_t *edata;
|
edata_t *edata;
|
||||||
while ((edata = ecache_evict(tsdn, arena, ehooks,
|
while ((edata = ecache_evict(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, 0)) != NULL) {
|
&arena->pa_shard.ecache_retained, 0)) != NULL) {
|
||||||
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
|
extent_destroy_wrapper(tsdn, arena, ehooks, edata);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1175,8 +1182,8 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
|||||||
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
|
||||||
* extents, so only retained extents may remain.
|
* extents, so only retained extents may remain.
|
||||||
*/
|
*/
|
||||||
assert(ecache_npages_get(&arena->ecache_dirty) == 0);
|
assert(ecache_npages_get(&arena->pa_shard.ecache_dirty) == 0);
|
||||||
assert(ecache_npages_get(&arena->ecache_muzzy) == 0);
|
assert(ecache_npages_get(&arena->pa_shard.ecache_muzzy) == 0);
|
||||||
|
|
||||||
/* Deallocate retained memory. */
|
/* Deallocate retained memory. */
|
||||||
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
arena_destroy_retained(tsd_tsdn(tsd), arena);
|
||||||
@ -1210,8 +1217,9 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
slab = ecache_alloc_grow(tsdn, arena, ehooks, &arena->ecache_retained,
|
slab = ecache_alloc_grow(tsdn, arena, ehooks,
|
||||||
NULL, bin_info->slab_size, PAGE, true, szind, &zero);
|
&arena->pa_shard.ecache_retained, NULL, bin_info->slab_size, PAGE,
|
||||||
|
true, szind, &zero);
|
||||||
|
|
||||||
if (config_stats && slab != NULL) {
|
if (config_stats && slab != NULL) {
|
||||||
arena_stats_mapped_add(tsdn, &arena->stats,
|
arena_stats_mapped_add(tsdn, &arena->stats,
|
||||||
@ -1230,11 +1238,13 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
ehooks_t *ehooks = arena_get_ehooks(arena);
|
ehooks_t *ehooks = arena_get_ehooks(arena);
|
||||||
szind_t szind = sz_size2index(bin_info->reg_size);
|
szind_t szind = sz_size2index(bin_info->reg_size);
|
||||||
bool zero = false;
|
bool zero = false;
|
||||||
edata_t *slab = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty,
|
edata_t *slab = ecache_alloc(tsdn, arena, ehooks,
|
||||||
NULL, bin_info->slab_size, PAGE, true, binind, &zero);
|
&arena->pa_shard.ecache_dirty, NULL, bin_info->slab_size, PAGE,
|
||||||
|
true, binind, &zero);
|
||||||
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
if (slab == NULL && arena_may_have_muzzy(arena)) {
|
||||||
slab = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
|
slab = ecache_alloc(tsdn, arena, ehooks,
|
||||||
NULL, bin_info->slab_size, PAGE, true, binind, &zero);
|
&arena->pa_shard.ecache_muzzy, NULL, bin_info->slab_size,
|
||||||
|
PAGE, true, binind, &zero);
|
||||||
}
|
}
|
||||||
if (slab == NULL) {
|
if (slab == NULL) {
|
||||||
slab = arena_slab_alloc_hard(tsdn, arena, ehooks, bin_info,
|
slab = arena_slab_alloc_hard(tsdn, arena, ehooks, bin_info,
|
||||||
@ -2023,16 +2033,16 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
* are likely to be reused soon after deallocation, and the cost of
|
* are likely to be reused soon after deallocation, and the cost of
|
||||||
* merging/splitting extents is non-trivial.
|
* merging/splitting extents is non-trivial.
|
||||||
*/
|
*/
|
||||||
if (ecache_init(tsdn, &arena->ecache_dirty, extent_state_dirty, ind,
|
if (ecache_init(tsdn, &arena->pa_shard.ecache_dirty, extent_state_dirty,
|
||||||
true)) {
|
ind, true)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Coalesce muzzy extents immediately, because operations on them are in
|
* Coalesce muzzy extents immediately, because operations on them are in
|
||||||
* the critical path much less often than for dirty extents.
|
* the critical path much less often than for dirty extents.
|
||||||
*/
|
*/
|
||||||
if (ecache_init(tsdn, &arena->ecache_muzzy, extent_state_muzzy, ind,
|
if (ecache_init(tsdn, &arena->pa_shard.ecache_muzzy, extent_state_muzzy,
|
||||||
false)) {
|
ind, false)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -2041,8 +2051,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
* coalescing), but also because operations on retained extents are not
|
* coalescing), but also because operations on retained extents are not
|
||||||
* in the critical path.
|
* in the critical path.
|
||||||
*/
|
*/
|
||||||
if (ecache_init(tsdn, &arena->ecache_retained, extent_state_retained,
|
if (ecache_init(tsdn, &arena->pa_shard.ecache_retained,
|
||||||
ind, false)) {
|
extent_state_retained, ind, false)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2198,9 +2208,9 @@ arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
|
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
|
||||||
ecache_prefork(tsdn, &arena->ecache_dirty);
|
ecache_prefork(tsdn, &arena->pa_shard.ecache_dirty);
|
||||||
ecache_prefork(tsdn, &arena->ecache_muzzy);
|
ecache_prefork(tsdn, &arena->pa_shard.ecache_muzzy);
|
||||||
ecache_prefork(tsdn, &arena->ecache_retained);
|
ecache_prefork(tsdn, &arena->pa_shard.ecache_retained);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -2240,9 +2250,9 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
|
||||||
base_postfork_parent(tsdn, arena->base);
|
base_postfork_parent(tsdn, arena->base);
|
||||||
edata_cache_postfork_parent(tsdn, &arena->edata_cache);
|
edata_cache_postfork_parent(tsdn, &arena->edata_cache);
|
||||||
ecache_postfork_parent(tsdn, &arena->ecache_dirty);
|
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_dirty);
|
||||||
ecache_postfork_parent(tsdn, &arena->ecache_muzzy);
|
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_muzzy);
|
||||||
ecache_postfork_parent(tsdn, &arena->ecache_retained);
|
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_retained);
|
||||||
ecache_grow_postfork_parent(tsdn, &arena->ecache_grow);
|
ecache_grow_postfork_parent(tsdn, &arena->ecache_grow);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
|
||||||
@ -2286,9 +2296,9 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
|
||||||
base_postfork_child(tsdn, arena->base);
|
base_postfork_child(tsdn, arena->base);
|
||||||
edata_cache_postfork_child(tsdn, &arena->edata_cache);
|
edata_cache_postfork_child(tsdn, &arena->edata_cache);
|
||||||
ecache_postfork_child(tsdn, &arena->ecache_dirty);
|
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_dirty);
|
||||||
ecache_postfork_child(tsdn, &arena->ecache_muzzy);
|
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_muzzy);
|
||||||
ecache_postfork_child(tsdn, &arena->ecache_retained);
|
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_retained);
|
||||||
ecache_grow_postfork_child(tsdn, &arena->ecache_grow);
|
ecache_grow_postfork_child(tsdn, &arena->ecache_grow);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
|
||||||
|
@ -202,12 +202,12 @@ static uint64_t
|
|||||||
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
|
arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
|
||||||
uint64_t i1, i2;
|
uint64_t i1, i2;
|
||||||
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
|
i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
|
||||||
&arena->ecache_dirty);
|
&arena->pa_shard.ecache_dirty);
|
||||||
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
|
||||||
return i1;
|
return i1;
|
||||||
}
|
}
|
||||||
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
|
i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
|
||||||
&arena->ecache_muzzy);
|
&arena->pa_shard.ecache_muzzy);
|
||||||
|
|
||||||
return i1 < i2 ? i1 : i2;
|
return i1 < i2 ? i1 : i2;
|
||||||
}
|
}
|
||||||
@ -717,8 +717,8 @@ background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
|
|||||||
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
|
||||||
should_signal = true;
|
should_signal = true;
|
||||||
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
|
} else if (unlikely(background_thread_indefinite_sleep(info)) &&
|
||||||
(ecache_npages_get(&arena->ecache_dirty) > 0 ||
|
(ecache_npages_get(&arena->pa_shard.ecache_dirty) > 0 ||
|
||||||
ecache_npages_get(&arena->ecache_muzzy) > 0 ||
|
ecache_npages_get(&arena->pa_shard.ecache_muzzy) > 0 ||
|
||||||
info->npages_to_purge_new > 0)) {
|
info->npages_to_purge_new > 0)) {
|
||||||
should_signal = true;
|
should_signal = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -3073,9 +3073,9 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
|
|||||||
}
|
}
|
||||||
MUTEX_PROF_RESET(arena->large_mtx);
|
MUTEX_PROF_RESET(arena->large_mtx);
|
||||||
MUTEX_PROF_RESET(arena->edata_cache.mtx);
|
MUTEX_PROF_RESET(arena->edata_cache.mtx);
|
||||||
MUTEX_PROF_RESET(arena->ecache_dirty.mtx);
|
MUTEX_PROF_RESET(arena->pa_shard.ecache_dirty.mtx);
|
||||||
MUTEX_PROF_RESET(arena->ecache_muzzy.mtx);
|
MUTEX_PROF_RESET(arena->pa_shard.ecache_muzzy.mtx);
|
||||||
MUTEX_PROF_RESET(arena->ecache_retained.mtx);
|
MUTEX_PROF_RESET(arena->pa_shard.ecache_retained.mtx);
|
||||||
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
|
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
|
||||||
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
|
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
|
||||||
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
||||||
|
20
src/extent.c
20
src/extent.c
@ -686,11 +686,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
if (result == extent_split_interior_ok) {
|
if (result == extent_split_interior_ok) {
|
||||||
if (lead != NULL) {
|
if (lead != NULL) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, lead, true);
|
&arena->pa_shard.ecache_retained, lead, true);
|
||||||
}
|
}
|
||||||
if (trail != NULL) {
|
if (trail != NULL) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, trail, true);
|
&arena->pa_shard.ecache_retained, trail, true);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -703,12 +703,12 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
extent_gdump_add(tsdn, to_salvage);
|
extent_gdump_add(tsdn, to_salvage);
|
||||||
}
|
}
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, to_salvage, true);
|
&arena->pa_shard.ecache_retained, to_salvage, true);
|
||||||
}
|
}
|
||||||
if (to_leak != NULL) {
|
if (to_leak != NULL) {
|
||||||
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
extent_deregister_no_gdump_sub(tsdn, to_leak);
|
||||||
extents_abandon_vm(tsdn, arena, ehooks,
|
extents_abandon_vm(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, to_leak, true);
|
&arena->pa_shard.ecache_retained, to_leak, true);
|
||||||
}
|
}
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
@ -717,7 +717,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
if (extent_commit_impl(tsdn, ehooks, edata, 0,
|
||||||
edata_size_get(edata), true)) {
|
edata_size_get(edata), true)) {
|
||||||
extent_record(tsdn, arena, ehooks,
|
extent_record(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, edata, true);
|
&arena->pa_shard.ecache_retained, edata, true);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
/* A successful commit should return zeroed memory. */
|
/* A successful commit should return zeroed memory. */
|
||||||
@ -774,8 +774,8 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
malloc_mutex_lock(tsdn, &arena->ecache_grow.mtx);
|
malloc_mutex_lock(tsdn, &arena->ecache_grow.mtx);
|
||||||
|
|
||||||
edata_t *edata = extent_recycle(tsdn, arena, ehooks,
|
edata_t *edata = extent_recycle(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, new_addr, size, alignment, slab, szind,
|
&arena->pa_shard.ecache_retained, new_addr, size, alignment, slab,
|
||||||
zero, commit, true);
|
szind, zero, commit, true);
|
||||||
if (edata != NULL) {
|
if (edata != NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
|
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx);
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
@ -974,7 +974,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks,
|
edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks,
|
||||||
ecache, edata, NULL, growing_retained);
|
ecache, edata, NULL, growing_retained);
|
||||||
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
||||||
assert(ecache == &arena->ecache_dirty);
|
assert(ecache == &arena->pa_shard.ecache_dirty);
|
||||||
/* Always coalesce large extents eagerly. */
|
/* Always coalesce large extents eagerly. */
|
||||||
bool coalesced;
|
bool coalesced;
|
||||||
do {
|
do {
|
||||||
@ -1076,8 +1076,8 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
extent_gdump_sub(tsdn, edata);
|
extent_gdump_sub(tsdn, edata);
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_record(tsdn, arena, ehooks, &arena->ecache_retained, edata,
|
extent_record(tsdn, arena, ehooks, &arena->pa_shard.ecache_retained,
|
||||||
false);
|
edata, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
16
src/large.c
16
src/large.c
@ -119,19 +119,19 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
|||||||
bool is_zeroed_trail = zero;
|
bool is_zeroed_trail = zero;
|
||||||
edata_t *trail;
|
edata_t *trail;
|
||||||
bool new_mapping;
|
bool new_mapping;
|
||||||
if ((trail = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_dirty,
|
if ((trail = ecache_alloc(tsdn, arena, ehooks,
|
||||||
edata_past_get(edata), trailsize, CACHELINE, false, SC_NSIZES,
|
&arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize,
|
||||||
&is_zeroed_trail)) != NULL
|
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL
|
||||||
|| (trail = ecache_alloc(tsdn, arena, ehooks, &arena->ecache_muzzy,
|
|| (trail = ecache_alloc(tsdn, arena, ehooks,
|
||||||
edata_past_get(edata), trailsize, CACHELINE, false, SC_NSIZES,
|
&arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize,
|
||||||
&is_zeroed_trail)) != NULL) {
|
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) {
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
new_mapping = false;
|
new_mapping = false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ((trail = ecache_alloc_grow(tsdn, arena, ehooks,
|
if ((trail = ecache_alloc_grow(tsdn, arena, ehooks,
|
||||||
&arena->ecache_retained, edata_past_get(edata), trailsize,
|
&arena->pa_shard.ecache_retained, edata_past_get(edata),
|
||||||
CACHELINE, false, SC_NSIZES, &is_zeroed_trail))
|
trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail))
|
||||||
== NULL) {
|
== NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user