PA: Move in arena extent_sn counter.
Just another step towards making PA self-contained.
This commit is contained in:
parent
1ada4aef84
commit
ce8c0d6c09
@ -81,7 +81,6 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
|||||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||||
size_t arena_extent_sn_next(arena_t *arena);
|
|
||||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||||
bool arena_init_huge(void);
|
bool arena_init_huge(void);
|
||||||
bool arena_is_huge(unsigned arena_ind);
|
bool arena_is_huge(unsigned arena_ind);
|
||||||
|
@ -121,13 +121,6 @@ struct arena_s {
|
|||||||
/* Synchronization: internal. */
|
/* Synchronization: internal. */
|
||||||
counter_accum_t prof_accum;
|
counter_accum_t prof_accum;
|
||||||
|
|
||||||
/*
|
|
||||||
* Extent serial number generator state.
|
|
||||||
*
|
|
||||||
* Synchronization: atomic.
|
|
||||||
*/
|
|
||||||
atomic_zu_t extent_sn_next;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Represents a dss_prec_t, but atomically.
|
* Represents a dss_prec_t, but atomically.
|
||||||
*
|
*
|
||||||
|
@ -46,11 +46,15 @@ struct pa_shard_s {
|
|||||||
/* The grow info for the retained ecache. */
|
/* The grow info for the retained ecache. */
|
||||||
ecache_grow_t ecache_grow;
|
ecache_grow_t ecache_grow;
|
||||||
|
|
||||||
|
/* Extent serial number generator state. */
|
||||||
|
atomic_zu_t extent_sn_next;
|
||||||
|
|
||||||
pa_shard_stats_t *stats;
|
pa_shard_stats_t *stats;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Returns true on error. */
|
/* Returns true on error. */
|
||||||
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
||||||
pa_shard_stats_t *stats);
|
pa_shard_stats_t *stats);
|
||||||
|
size_t pa_shard_extent_sn_next(pa_shard_t *shard);
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PA_H */
|
#endif /* JEMALLOC_INTERNAL_PA_H */
|
||||||
|
@ -1979,11 +1979,6 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
|
|||||||
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
|
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
|
||||||
arena_extent_sn_next(arena_t *arena) {
|
|
||||||
return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
|
|
||||||
}
|
|
||||||
|
|
||||||
arena_t *
|
arena_t *
|
||||||
arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
@ -2032,8 +2027,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
|
|
||||||
|
|
||||||
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
|
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
|
||||||
ATOMIC_RELAXED);
|
ATOMIC_RELAXED);
|
||||||
|
|
||||||
|
@ -662,8 +662,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_retained), ptr,
|
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_retained), ptr,
|
||||||
alloc_size, false, SC_NSIZES, arena_extent_sn_next(arena),
|
alloc_size, false, SC_NSIZES,
|
||||||
extent_state_active, zeroed, committed, true, EXTENT_IS_HEAD);
|
pa_shard_extent_sn_next(&arena->pa_shard), extent_state_active,
|
||||||
|
zeroed, committed, true, EXTENT_IS_HEAD);
|
||||||
|
|
||||||
if (extent_register_no_gdump_add(tsdn, edata)) {
|
if (extent_register_no_gdump_add(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
||||||
@ -816,8 +817,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_dirty), addr,
|
edata_init(edata, ecache_ind_get(&arena->pa_shard.ecache_dirty), addr,
|
||||||
size, slab, szind, arena_extent_sn_next(arena), extent_state_active,
|
size, slab, szind, pa_shard_extent_sn_next(&arena->pa_shard),
|
||||||
*zero, *commit, true, EXTENT_NOT_HEAD);
|
extent_state_active, *zero, *commit, true, EXTENT_NOT_HEAD);
|
||||||
if (extent_register(tsdn, edata)) {
|
if (extent_register(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -155,9 +155,9 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
if (gap_size_page != 0) {
|
if (gap_size_page != 0) {
|
||||||
edata_init(gap, arena_ind_get(arena),
|
edata_init(gap, arena_ind_get(arena),
|
||||||
gap_addr_page, gap_size_page, false,
|
gap_addr_page, gap_size_page, false,
|
||||||
SC_NSIZES, arena_extent_sn_next(arena),
|
SC_NSIZES, pa_shard_extent_sn_next(
|
||||||
extent_state_active, false, true, true,
|
&arena->pa_shard), extent_state_active,
|
||||||
EXTENT_NOT_HEAD);
|
false, true, true, EXTENT_NOT_HEAD);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Compute the address just past the end of the desired
|
* Compute the address just past the end of the desired
|
||||||
|
7
src/pa.c
7
src/pa.c
@ -42,8 +42,15 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
atomic_store_zu(&shard->extent_sn_next, 0, ATOMIC_RELAXED);
|
||||||
|
|
||||||
shard->stats = stats;
|
shard->stats = stats;
|
||||||
memset(shard->stats, 0, sizeof(*shard->stats));
|
memset(shard->stats, 0, sizeof(*shard->stats));
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t
|
||||||
|
pa_shard_extent_sn_next(pa_shard_t *shard) {
|
||||||
|
return atomic_fetch_add_zu(&shard->extent_sn_next, 1, ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user