Edata: split up different list linkage uses.

This commit is contained in:
David Goldblatt 2020-06-11 15:15:51 -07:00 committed by David Goldblatt
parent 129b727058
commit 392f645f4d
12 changed files with 55 additions and 47 deletions

View File

@ -69,7 +69,7 @@ struct arena_s {
*
* Synchronization: large_mtx.
*/
edata_list_t large;
edata_list_active_t large;
/* Synchronizes all large allocation/update/deallocation. */
malloc_mutex_t large_mtx;

View File

@ -32,7 +32,7 @@ struct bin_s {
edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
edata_list_t slabs_full;
edata_list_active_t slabs_full;
/* Bin statistics. */
bin_stats_t stats;

View File

@ -185,22 +185,28 @@ struct edata_s {
size_t e_bsize;
};
/*
* List linkage, used by a variety of lists:
* - bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(edata_t) ql_link;
/*
* Linkage for per size class sn/address-ordered heaps, and
* for extent_avail
*/
phn(edata_t) ph_link;
union {
/*
* List linkage used when the edata_t is active; either in
* arena's large allocations or bin_t's slabs_full.
*/
ql_elm(edata_t) ql_link_active;
/*
* Pairing heap linkage. Used whenever the extent is inactive
* (in the page allocators), or when it is active and in
* slabs_nonfull, or when the edata_t is unassociated with an
* extent and sitting in an edata_cache.
*/
phn(edata_t) ph_link;
};
union {
/*
* List linkage used when the extent is inactive:
* - Stashed dirty extents
* - Ecache LRU functionality.
*/
ql_elm(edata_t) ql_link_inactive;
/* Small region slab metadata. */
slab_data_t e_slab_data;
@ -209,7 +215,8 @@ struct edata_s {
};
};
TYPED_LIST(edata_list, edata_t, ql_link)
TYPED_LIST(edata_list_active, edata_t, ql_link_active)
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
static inline unsigned
edata_arena_ind_get(const edata_t *edata) {

View File

@ -27,7 +27,7 @@ void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
typedef struct edata_cache_small_s edata_cache_small_t;
struct edata_cache_small_s {
edata_list_t list;
edata_list_inactive_t list;
size_t count;
edata_cache_t *fallback;
};

View File

@ -25,7 +25,7 @@ struct eset_s {
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/* LRU of all extents in heaps. */
edata_list_t lru;
edata_list_inactive_t lru;
/* Page sum for all extents in heaps. */
atomic_zu_t npages;

View File

@ -594,7 +594,7 @@ arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
edata_list_append(&bin->slabs_full, slab);
edata_list_active_append(&bin->slabs_full, slab);
}
static void
@ -602,7 +602,7 @@ arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
edata_list_remove(&bin->slabs_full, slab);
edata_list_active_remove(&bin->slabs_full, slab);
}
static void
@ -622,8 +622,8 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = edata_list_first(&bin->slabs_full); slab != NULL;
slab = edata_list_first(&bin->slabs_full)) {
for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
slab = edata_list_active_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@ -655,8 +655,8 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
for (edata_t *edata = edata_list_first(&arena->large); edata !=
NULL; edata = edata_list_first(&arena->large)) {
for (edata_t *edata = edata_list_active_first(&arena->large);
edata != NULL; edata = edata_list_active_first(&arena->large)) {
void *ptr = edata_base_get(edata);
size_t usize;
@ -1465,7 +1465,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
ATOMIC_RELAXED);
edata_list_init(&arena->large);
edata_list_active_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;

View File

@ -46,7 +46,7 @@ bin_init(bin_t *bin) {
}
bin->slabcur = NULL;
edata_heap_new(&bin->slabs_nonfull);
edata_list_init(&bin->slabs_full);
edata_list_active_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}

View File

@ -59,7 +59,7 @@ edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
void
edata_cache_small_init(edata_cache_small_t *ecs, edata_cache_t *fallback) {
edata_list_init(&ecs->list);
edata_list_inactive_init(&ecs->list);
ecs->count = 0;
ecs->fallback = fallback;
}
@ -67,9 +67,9 @@ edata_cache_small_init(edata_cache_small_t *ecs, edata_cache_t *fallback) {
edata_t *
edata_cache_small_get(edata_cache_small_t *ecs) {
assert(ecs->count > 0);
edata_t *edata = edata_list_first(&ecs->list);
edata_t *edata = edata_list_inactive_first(&ecs->list);
assert(edata != NULL);
edata_list_remove(&ecs->list, edata);
edata_list_inactive_remove(&ecs->list, edata);
ecs->count--;
return edata;
}
@ -77,7 +77,7 @@ edata_cache_small_get(edata_cache_small_t *ecs) {
void
edata_cache_small_put(edata_cache_small_t *ecs, edata_t *edata) {
assert(edata != NULL);
edata_list_append(&ecs->list, edata);
edata_list_inactive_append(&ecs->list, edata);
ecs->count++;
}
@ -93,7 +93,7 @@ bool edata_cache_small_prepare(tsdn_t *tsdn, edata_cache_small_t *ecs,
if (edata == NULL) {
return true;
}
ql_elm_new(edata, ql_link);
ql_elm_new(edata, ql_link_inactive);
edata_cache_small_put(ecs, edata);
}
return false;

View File

@ -12,7 +12,7 @@ eset_init(eset_t *eset, extent_state_t state) {
edata_heap_new(&eset->heaps[i]);
}
bitmap_init(eset->bitmap, &eset_bitmap_info, true);
edata_list_init(&eset->lru);
edata_list_inactive_init(&eset->lru);
atomic_store_zu(&eset->npages, 0, ATOMIC_RELAXED);
eset->state = state;
}
@ -65,7 +65,7 @@ eset_insert(eset_t *eset, edata_t *edata) {
eset_stats_add(eset, pind, size);
}
edata_list_append(&eset->lru, edata);
edata_list_inactive_append(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* All modifications to npages hold the mutex (as asserted above), so we
@ -95,7 +95,7 @@ eset_remove(eset_t *eset, edata_t *edata) {
bitmap_set(eset->bitmap, &eset_bitmap_info,
(size_t)pind);
}
edata_list_remove(&eset->lru, edata);
edata_list_inactive_remove(&eset->lru, edata);
size_t npages = size >> LG_PAGE;
/*
* As in eset_insert, we hold eset->mtx and so don't need atomic

View File

@ -139,7 +139,7 @@ ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_t *edata;
while (true) {
/* Get the LRU extent, if any. */
edata = edata_list_first(&ecache->eset.lru);
edata = edata_list_inactive_first(&ecache->eset.lru);
if (edata == NULL) {
goto label_return;
}

View File

@ -43,7 +43,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (!arena_is_auto(arena)) {
/* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_append(&arena->large, edata);
edata_list_active_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
@ -225,14 +225,14 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_remove(&arena->large, edata);
edata_list_active_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
edata_list_remove(&arena->large, edata);
edata_list_active_remove(&arena->large, edata);
}
}
arena_extent_dalloc_large_prep(tsdn, arena, edata);

View File

@ -239,7 +239,8 @@ pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
static size_t
pa_stash_decayed(tsdn_t *tsdn, pa_shard_t *shard, ecache_t *ecache,
size_t npages_limit, size_t npages_decay_max, edata_list_t *result) {
size_t npages_limit, size_t npages_decay_max,
edata_list_inactive_t *result) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
@ -252,7 +253,7 @@ pa_stash_decayed(tsdn_t *tsdn, pa_shard_t *shard, ecache_t *ecache,
if (edata == NULL) {
break;
}
edata_list_append(result, edata);
edata_list_inactive_append(result, edata);
nstashed += edata_size_get(edata) >> LG_PAGE;
}
return nstashed;
@ -261,7 +262,7 @@ pa_stash_decayed(tsdn_t *tsdn, pa_shard_t *shard, ecache_t *ecache,
static size_t
pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
edata_list_t *decay_extents) {
edata_list_inactive_t *decay_extents) {
bool err;
size_t nmadvise = 0;
@ -272,9 +273,9 @@ pa_decay_stashed(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
bool try_muzzy = !fully_decay && pa_shard_may_have_muzzy(shard);
for (edata_t *edata = edata_list_first(decay_extents); edata !=
NULL; edata = edata_list_first(decay_extents)) {
edata_list_remove(decay_extents, edata);
for (edata_t *edata = edata_list_inactive_first(decay_extents);
edata != NULL; edata = edata_list_inactive_first(decay_extents)) {
edata_list_inactive_remove(decay_extents, edata);
size_t size = edata_size_get(edata);
size_t npages = size >> LG_PAGE;
@ -342,8 +343,8 @@ pa_decay_to_limit(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
decay->purging = true;
malloc_mutex_unlock(tsdn, &decay->mtx);
edata_list_t decay_extents;
edata_list_init(&decay_extents);
edata_list_inactive_t decay_extents;
edata_list_inactive_init(&decay_extents);
size_t npurge = pa_stash_decayed(tsdn, shard, ecache, npages_limit,
npages_decay_max, &decay_extents);
if (npurge != 0) {