PA: Move in nactive counter.
This commit is contained in:
parent
c075fd0bcb
commit
527dd4cdb8
@ -67,13 +67,6 @@ struct arena_s {
|
|||||||
*/
|
*/
|
||||||
atomic_u_t dss_prec;
|
atomic_u_t dss_prec;
|
||||||
|
|
||||||
/*
|
|
||||||
* Number of pages in active extents.
|
|
||||||
*
|
|
||||||
* Synchronization: atomic.
|
|
||||||
*/
|
|
||||||
atomic_zu_t nactive;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extant large allocations.
|
* Extant large allocations.
|
||||||
*
|
*
|
||||||
|
@ -73,6 +73,13 @@ struct pa_shard_stats_s {
|
|||||||
*/
|
*/
|
||||||
typedef struct pa_shard_s pa_shard_t;
|
typedef struct pa_shard_s pa_shard_t;
|
||||||
struct pa_shard_s {
|
struct pa_shard_s {
|
||||||
|
/*
|
||||||
|
* Number of pages in active extents.
|
||||||
|
*
|
||||||
|
* Synchronization: atomic.
|
||||||
|
*/
|
||||||
|
atomic_zu_t nactive;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Collections of extents that were previously allocated. These are
|
* Collections of extents that were previously allocated. These are
|
||||||
* used when allocating extents, in an attempt to re-use address space.
|
* used when allocating extents, in an attempt to re-use address space.
|
||||||
|
29
src/arena.c
29
src/arena.c
@ -71,7 +71,7 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
*dss = dss_prec_names[arena_dss_prec_get(arena)];
|
*dss = dss_prec_names[arena_dss_prec_get(arena)];
|
||||||
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
|
*dirty_decay_ms = arena_dirty_decay_ms_get(arena);
|
||||||
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
*muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
|
||||||
*nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
|
*nactive += atomic_load_zu(&arena->pa_shard.nactive, ATOMIC_RELAXED);
|
||||||
*ndirty += ecache_npages_get(&arena->pa_shard.ecache_dirty);
|
*ndirty += ecache_npages_get(&arena->pa_shard.ecache_dirty);
|
||||||
*nmuzzy += ecache_npages_get(&arena->pa_shard.ecache_muzzy);
|
*nmuzzy += ecache_npages_get(&arena->pa_shard.ecache_muzzy);
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
|
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
|
||||||
atomic_load_add_store_zu(&astats->metadata_thp, metadata_thp);
|
atomic_load_add_store_zu(&astats->metadata_thp, metadata_thp);
|
||||||
atomic_load_add_store_zu(&astats->resident, base_resident +
|
atomic_load_add_store_zu(&astats->resident, base_resident +
|
||||||
(((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
|
(((atomic_load_zu(&arena->pa_shard.nactive, ATOMIC_RELAXED) +
|
||||||
ecache_npages_get(&arena->pa_shard.ecache_dirty) +
|
ecache_npages_get(&arena->pa_shard.ecache_dirty) +
|
||||||
ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE)));
|
ecache_npages_get(&arena->pa_shard.ecache_muzzy)) << LG_PAGE)));
|
||||||
atomic_load_add_store_zu(&astats->pa_shard_stats.abandoned_vm,
|
atomic_load_add_store_zu(&astats->pa_shard_stats.abandoned_vm,
|
||||||
@ -386,17 +386,6 @@ arena_slab_reg_dalloc(edata_t *slab, slab_data_t *slab_data, void *ptr) {
|
|||||||
edata_nfree_inc(slab);
|
edata_nfree_inc(slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
arena_nactive_add(arena_t *arena, size_t add_pages) {
|
|
||||||
atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_nactive_sub(arena_t *arena, size_t sub_pages) {
|
|
||||||
assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
|
|
||||||
atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||||
szind_t index, hindex;
|
szind_t index, hindex;
|
||||||
@ -457,7 +446,6 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
}
|
}
|
||||||
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
||||||
}
|
}
|
||||||
arena_nactive_add(arena, esize >> LG_PAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (edata != NULL && sz_large_pad != 0) {
|
if (edata != NULL && sz_large_pad != 0) {
|
||||||
@ -475,35 +463,30 @@ arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
|
|||||||
edata_usize_get(edata));
|
edata_usize_get(edata));
|
||||||
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
||||||
}
|
}
|
||||||
arena_nactive_sub(arena, edata_size_get(edata) >> LG_PAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||||
size_t oldusize) {
|
size_t oldusize) {
|
||||||
size_t usize = edata_usize_get(edata);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t udiff = oldusize - usize;
|
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
||||||
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
||||||
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
||||||
}
|
}
|
||||||
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||||
size_t oldusize) {
|
size_t oldusize) {
|
||||||
size_t usize = edata_usize_get(edata);
|
size_t usize = edata_usize_get(edata);
|
||||||
size_t udiff = usize - oldusize;
|
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
|
||||||
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
|
||||||
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
|
||||||
}
|
}
|
||||||
arena_nactive_add(arena, udiff >> LG_PAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t
|
ssize_t
|
||||||
@ -658,8 +641,6 @@ arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
|
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
|
||||||
arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
|
|
||||||
|
|
||||||
bool generated_dirty;
|
bool generated_dirty;
|
||||||
pa_dalloc(tsdn, &arena->pa_shard, slab, &generated_dirty);
|
pa_dalloc(tsdn, &arena->pa_shard, slab, &generated_dirty);
|
||||||
if (generated_dirty) {
|
if (generated_dirty) {
|
||||||
@ -801,7 +782,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
|
atomic_store_zu(&arena->pa_shard.nactive, 0, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -885,8 +866,6 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
|
edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
|
||||||
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
|
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
|
||||||
|
|
||||||
arena_nactive_add(arena, edata_size_get(slab) >> LG_PAGE);
|
|
||||||
|
|
||||||
return slab;
|
return slab;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1637,8 +1616,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
|
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
|
||||||
ATOMIC_RELAXED);
|
ATOMIC_RELAXED);
|
||||||
|
|
||||||
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
|
|
||||||
|
|
||||||
edata_list_init(&arena->large);
|
edata_list_init(&arena->large);
|
||||||
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
|
||||||
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
|
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
|
||||||
|
@ -3516,7 +3516,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
|
|||||||
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
|
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
|
||||||
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
|
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
|
||||||
/* Expose the underlying counter for fast read. */
|
/* Expose the underlying counter for fast read. */
|
||||||
pactivep = (size_t *)&(arena->nactive.repr);
|
pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
|
||||||
READ(pactivep, size_t *);
|
READ(pactivep, size_t *);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
#else
|
#else
|
||||||
|
29
src/pa.c
29
src/pa.c
@ -1,6 +1,17 @@
|
|||||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||||
|
|
||||||
|
static void
|
||||||
|
pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
|
||||||
|
atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
|
||||||
|
assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
|
||||||
|
atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
||||||
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx) {
|
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx) {
|
||||||
@ -43,6 +54,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
|||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED);
|
atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED);
|
||||||
|
atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
|
||||||
|
|
||||||
shard->stats_mtx = stats_mtx;
|
shard->stats_mtx = stats_mtx;
|
||||||
shard->stats = stats;
|
shard->stats = stats;
|
||||||
@ -83,7 +95,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||||||
edata = ecache_alloc_grow(tsdn, shard, ehooks,
|
edata = ecache_alloc_grow(tsdn, shard, ehooks,
|
||||||
&shard->ecache_retained, NULL, size, alignment, slab,
|
&shard->ecache_retained, NULL, size, alignment, slab,
|
||||||
szind, zero);
|
szind, zero);
|
||||||
if (config_stats) {
|
if (config_stats && edata != NULL) {
|
||||||
/*
|
/*
|
||||||
* edata may be NULL on OOM, but in that case mapped_add
|
* edata may be NULL on OOM, but in that case mapped_add
|
||||||
* isn't used below, so there's no need to conditionlly
|
* isn't used below, so there's no need to conditionlly
|
||||||
@ -92,6 +104,9 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||||||
*mapped_add = size;
|
*mapped_add = size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (edata != NULL) {
|
||||||
|
pa_nactive_add(shard, size >> LG_PAGE);
|
||||||
|
}
|
||||||
return edata;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,6 +115,7 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||||||
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) {
|
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) {
|
||||||
assert(new_size > old_size);
|
assert(new_size > old_size);
|
||||||
assert(edata_size_get(edata) == old_size);
|
assert(edata_size_get(edata) == old_size);
|
||||||
|
assert((new_size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
||||||
void *trail_begin = edata_past_get(edata);
|
void *trail_begin = edata_past_get(edata);
|
||||||
@ -133,6 +149,7 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||||||
*mapped_add = 0;
|
*mapped_add = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
pa_nactive_add(shard, expand_amount >> LG_PAGE);
|
||||||
emap_remap(tsdn, &emap_global, edata, szind, slab);
|
emap_remap(tsdn, &emap_global, edata, szind, slab);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -141,6 +158,9 @@ bool
|
|||||||
pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||||
size_t new_size, szind_t szind, bool slab, bool *generated_dirty) {
|
size_t new_size, szind_t szind, bool slab, bool *generated_dirty) {
|
||||||
assert(new_size < old_size);
|
assert(new_size < old_size);
|
||||||
|
assert(edata_size_get(edata) == old_size);
|
||||||
|
assert((new_size & PAGE_MASK) == 0);
|
||||||
|
size_t shrink_amount = old_size - new_size;
|
||||||
|
|
||||||
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
||||||
*generated_dirty = false;
|
*generated_dirty = false;
|
||||||
@ -150,11 +170,13 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
edata_t *trail = extent_split_wrapper(tsdn, &shard->edata_cache, ehooks,
|
edata_t *trail = extent_split_wrapper(tsdn, &shard->edata_cache, ehooks,
|
||||||
edata, new_size, szind, slab, old_size - new_size, SC_NSIZES,
|
edata, new_size, szind, slab, shrink_amount, SC_NSIZES,
|
||||||
false);
|
false);
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
|
||||||
|
|
||||||
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, trail);
|
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, trail);
|
||||||
*generated_dirty = true;
|
*generated_dirty = true;
|
||||||
return false;
|
return false;
|
||||||
@ -163,6 +185,7 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
|||||||
void
|
void
|
||||||
pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
||||||
bool *generated_dirty) {
|
bool *generated_dirty) {
|
||||||
|
pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
|
||||||
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
||||||
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, edata);
|
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, edata);
|
||||||
*generated_dirty = true;
|
*generated_dirty = true;
|
||||||
@ -345,3 +368,5 @@ pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
|
|||||||
|
|
||||||
return epoch_advanced;
|
return epoch_advanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user