PA: Move in arena extent_sn counter.

Just another step towards making PA self-contained.
This commit is contained in:
David Goldblatt 2020-03-09 11:10:43 -07:00 committed by David Goldblatt
parent 1ada4aef84
commit ce8c0d6c09
7 changed files with 19 additions and 22 deletions

View File

@ -81,7 +81,6 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
bool arena_init_huge(void);
bool arena_is_huge(unsigned arena_ind);

View File

@ -121,13 +121,6 @@ struct arena_s {
/* Synchronization: internal. */
counter_accum_t prof_accum;
/*
* Extent serial number generator state.
*
* Synchronization: atomic.
*/
atomic_zu_t extent_sn_next;
/*
* Represents a dss_prec_t, but atomically.
*

View File

@ -46,11 +46,15 @@ struct pa_shard_s {
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
/* Extent serial number generator state. */
atomic_zu_t extent_sn_next;
pa_shard_stats_t *stats;
};
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats);
size_t pa_shard_extent_sn_next(pa_shard_t *shard);
#endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -1979,11 +1979,6 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
size_t
arena_extent_sn_next(arena_t *arena) {
return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
arena_t *
arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena_t *arena;
@ -2032,8 +2027,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
}
atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
ATOMIC_RELAXED);

View File

@ -662,8 +662,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
}
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_retained), ptr,
alloc_size, false, SC_NSIZES, arena_extent_sn_next(arena),
extent_state_active, zeroed, committed, true, EXTENT_IS_HEAD);
alloc_size, false, SC_NSIZES,
pa_shard_extent_sn_next(&arena->pa_shard), extent_state_active,
zeroed, committed, true, EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, edata)) {
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
@ -816,8 +817,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return NULL;
}
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_dirty), addr,
size, slab, szind, arena_extent_sn_next(arena), extent_state_active,
*zero, *commit, true, EXTENT_NOT_HEAD);
size, slab, szind, pa_shard_extent_sn_next(&arena->pa_shard),
extent_state_active, *zero, *commit, true, EXTENT_NOT_HEAD);
if (extent_register(tsdn, edata)) {
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
return NULL;

View File

@ -155,9 +155,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if (gap_size_page != 0) {
edata_init(gap, arena_ind_get(arena),
gap_addr_page, gap_size_page, false,
SC_NSIZES, arena_extent_sn_next(arena),
extent_state_active, false, true, true,
EXTENT_NOT_HEAD);
SC_NSIZES, pa_shard_extent_sn_next(
&arena->pa_shard), extent_state_active,
false, true, true, EXTENT_NOT_HEAD);
}
/*
* Compute the address just past the end of the desired

View File

@ -42,8 +42,15 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
return true;
}
atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED);
shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats));
return false;
}
size_t
pa_shard_extent_sn_next(pa_shard_t *shard) {
return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED);
}