Put extent_state_t into ecache as well as eset.

This commit is contained in:
David Goldblatt 2019-12-12 16:44:49 -08:00 committed by David Goldblatt
parent 98eb40e563
commit d8b0b66c6c
6 changed files with 17 additions and 17 deletions

View File

@ -8,6 +8,8 @@ typedef struct ecache_s ecache_t;
struct ecache_s { struct ecache_s {
malloc_mutex_t mtx; malloc_mutex_t mtx;
eset_t eset; eset_t eset;
/* All stored extents must be in the same state. */
extent_state_t state;
/* /*
* If true, delay coalescing until eviction; otherwise coalesce during * If true, delay coalescing until eviction; otherwise coalesce during
* deallocation. * deallocation.

View File

@ -30,12 +30,14 @@ struct eset_s {
/* Page sum for all extents in heaps. */ /* Page sum for all extents in heaps. */
atomic_zu_t npages; atomic_zu_t npages;
/* All stored extents must be in the same state. */ /*
* A duplication of the data in the containing ecache. We use this only
* for assertions on the states of the passed-in extents.
*/
extent_state_t state; extent_state_t state;
}; };
void eset_init(eset_t *eset, extent_state_t state); void eset_init(eset_t *eset, extent_state_t state);
extent_state_t eset_state_get(const eset_t *eset);
size_t eset_npages_get(eset_t *eset); size_t eset_npages_get(eset_t *eset);
/* Get the number of extents in the given page size index. */ /* Get the number of extents in the given page size index. */

View File

@ -857,7 +857,7 @@ arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t npages = edata_size_get(edata) >> LG_PAGE; size_t npages = edata_size_get(edata) >> LG_PAGE;
npurged += npages; npurged += npages;
edata_list_remove(decay_extents, edata); edata_list_remove(decay_extents, edata);
switch (eset_state_get(&ecache->eset)) { switch (ecache->state) {
case extent_state_active: case extent_state_active:
not_reached(); not_reached();
case extent_state_dirty: case extent_state_dirty:

View File

@ -8,6 +8,7 @@ ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
malloc_mutex_rank_exclusive)) { malloc_mutex_rank_exclusive)) {
return true; return true;
} }
ecache->state = state;
ecache->delay_coalesce = delay_coalesce; ecache->delay_coalesce = delay_coalesce;
eset_init(&ecache->eset, state); eset_init(&ecache->eset, state);
return false; return false;

View File

@ -19,11 +19,6 @@ eset_init(eset_t *eset, extent_state_t state) {
eset->state = state; eset->state = state;
} }
extent_state_t
eset_state_get(const eset_t *eset) {
return eset->state;
}
size_t size_t
eset_npages_get(eset_t *eset) { eset_npages_get(eset_t *eset) {
return atomic_load_zu(&eset->npages, ATOMIC_RELAXED); return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);

View File

@ -170,7 +170,7 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
bool coalesced; bool coalesced;
edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, ecache, edata = extent_try_coalesce(tsdn, arena, ehooks, rtree_ctx, ecache,
edata, &coalesced, false); edata, &coalesced, false);
edata_state_set(edata, eset_state_get(&ecache->eset)); edata_state_set(edata, ecache->state);
if (!coalesced) { if (!coalesced) {
return true; return true;
@ -253,7 +253,7 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
* Either mark the extent active or deregister it to protect against * Either mark the extent active or deregister it to protect against
* concurrent operations. * concurrent operations.
*/ */
switch (eset_state_get(&ecache->eset)) { switch (ecache->state) {
case extent_state_active: case extent_state_active:
not_reached(); not_reached();
case extent_state_dirty: case extent_state_dirty:
@ -287,7 +287,7 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Leak extent after making sure its pages have already been purged, so * Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak. * that this is only a virtual memory leak.
*/ */
if (eset_state_get(&ecache->eset) == extent_state_dirty) { if (ecache->state == extent_state_dirty) {
if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz, if (extent_purge_lazy_impl(tsdn, arena, ehooks, edata, 0, sz,
growing_retained)) { growing_retained)) {
extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0, extent_purge_forced_impl(tsdn, arena, ehooks, edata, 0,
@ -303,7 +303,7 @@ extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache,
assert(edata_arena_ind_get(edata) == arena_ind_get(arena)); assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
assert(edata_state_get(edata) == extent_state_active); assert(edata_state_get(edata) == extent_state_active);
edata_state_set(edata, eset_state_get(&ecache->eset)); edata_state_set(edata, ecache->state);
eset_insert(&ecache->eset, edata); eset_insert(&ecache->eset, edata);
} }
@ -319,7 +319,7 @@ static void
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache, extent_activate_locked(tsdn_t *tsdn, arena_t *arena, ecache_t *ecache,
edata_t *edata) { edata_t *edata) {
assert(edata_arena_ind_get(edata) == arena_ind_get(arena)); assert(edata_arena_ind_get(edata) == arena_ind_get(arena));
assert(edata_state_get(edata) == eset_state_get(&ecache->eset)); assert(edata_state_get(edata) == ecache->state);
eset_remove(&ecache->eset, edata); eset_remove(&ecache->eset, edata);
edata_state_set(edata, extent_state_active); edata_state_set(edata, extent_state_active);
@ -557,7 +557,7 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
if (edata_arena_ind_get(edata) != arena_ind_get(arena) if (edata_arena_ind_get(edata) != arena_ind_get(arena)
|| edata_size_get(edata) < esize || edata_size_get(edata) < esize
|| edata_state_get(edata) || edata_state_get(edata)
!= eset_state_get(&ecache->eset)) { != ecache->state) {
edata = NULL; edata = NULL;
} }
extent_unlock_edata(tsdn, unlock_edata); extent_unlock_edata(tsdn, unlock_edata);
@ -1063,7 +1063,7 @@ extent_can_coalesce(arena_t *arena, ecache_t *ecache, const edata_t *inner,
} }
assert(edata_state_get(inner) == extent_state_active); assert(edata_state_get(inner) == extent_state_active);
if (edata_state_get(outer) != ecache->eset.state) { if (edata_state_get(outer) != ecache->state) {
return false; return false;
} }
@ -1191,8 +1191,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert((eset_state_get(&ecache->eset) != extent_state_dirty && assert((ecache->state != extent_state_dirty &&
eset_state_get(&ecache->eset) != extent_state_muzzy) || ecache->state != extent_state_muzzy) ||
!edata_zeroed_get(edata)); !edata_zeroed_get(edata));
malloc_mutex_lock(tsdn, &ecache->mtx); malloc_mutex_lock(tsdn, &ecache->mtx);