psset: Do first-fit based on slab age.

This functions more like the serial number strategy of the ecache and
hpa_central_t.  Longer-lived slabs are more likely to continue to live for
longer in the future.
This commit is contained in:
David Goldblatt 2020-09-18 16:36:40 -07:00 committed by David Goldblatt
parent 634ec6f50a
commit d16849c91d
3 changed files with 100 additions and 32 deletions

View File

@ -44,7 +44,7 @@ struct psset_s {
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
edata_heap_t pageslabs[PSSET_NPSIZES];
edata_age_heap_t pageslabs[PSSET_NPSIZES];
bitmap_t bitmap[BITMAP_GROUPS(PSSET_NPSIZES)];
/*
* Full slabs don't live in any edata heap. But we still track their
@ -52,6 +52,9 @@ struct psset_s {
*/
psset_bin_stats_t full_slab_stats;
psset_bin_stats_t slab_stats[PSSET_NPSIZES];
/* How many alloc_new calls have happened? */
uint64_t age_counter;
};
void psset_init(psset_t *psset);

View File

@ -11,7 +11,7 @@ static const bitmap_info_t psset_bitmap_info =
void
psset_init(psset_t *psset) {
for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
edata_heap_new(&psset->pageslabs[i]);
edata_age_heap_new(&psset->pageslabs[i]);
}
bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true);
psset->full_slab_stats.npageslabs = 0;
@ -22,6 +22,7 @@ psset_init(psset_t *psset) {
psset->slab_stats[i].nactive = 0;
psset->slab_stats[i].ninactive = 0;
}
psset->age_counter = 0;
}
/*
@ -48,13 +49,13 @@ psset_bin_stats_adjust(psset_bin_stats_t *binstats, edata_t *ps, bool inc) {
static void
psset_edata_heap_remove(psset_t *psset, pszind_t pind, edata_t *ps) {
edata_heap_remove(&psset->pageslabs[pind], ps);
edata_age_heap_remove(&psset->pageslabs[pind], ps);
psset_bin_stats_adjust(&psset->slab_stats[pind], ps, /* inc */ false);
}
static void
psset_edata_heap_insert(psset_t *psset, pszind_t pind, edata_t *ps) {
edata_heap_insert(&psset->pageslabs[pind], ps);
edata_age_heap_insert(&psset->pageslabs[pind], ps);
psset_bin_stats_adjust(&psset->slab_stats[pind], ps, /* inc */ true);
}
@ -70,32 +71,24 @@ psset_assert_ps_consistent(edata_t *ps) {
*/
static edata_t *
psset_recycle_extract(psset_t *psset, size_t size) {
pszind_t ret_ind;
edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
for (pszind_t i = (pszind_t)bitmap_ffu(psset->bitmap,
&psset_bitmap_info, (size_t)pind);
i < PSSET_NPSIZES;
i = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info,
(size_t)i + 1)) {
assert(!edata_heap_empty(&psset->pageslabs[i]));
edata_t *ps = edata_heap_first(&psset->pageslabs[i]);
if (ret == NULL || edata_snad_comp(ps, ret) < 0) {
ret = ps;
ret_ind = i;
}
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
pszind_t pind = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info,
(size_t)min_pind);
if (pind == PSSET_NPSIZES) {
return NULL;
}
if (ret == NULL) {
edata_t *ps = edata_age_heap_first(&psset->pageslabs[pind]);
if (ps == NULL) {
return NULL;
}
psset_edata_heap_remove(psset, ret_ind, ret);
if (edata_heap_empty(&psset->pageslabs[ret_ind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, ret_ind);
psset_edata_heap_remove(psset, pind, ps);
if (edata_age_heap_empty(&psset->pageslabs[pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, pind);
}
psset_assert_ps_consistent(ret);
return ret;
psset_assert_ps_consistent(ps);
return ps;
}
static void
@ -107,7 +100,7 @@ psset_insert(psset_t *psset, edata_t *ps, size_t largest_range) {
assert(pind < PSSET_NPSIZES);
if (edata_heap_empty(&psset->pageslabs[pind])) {
if (edata_age_heap_empty(&psset->pageslabs[pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info, (size_t)pind);
}
psset_edata_heap_insert(psset, pind, ps);
@ -215,6 +208,8 @@ psset_alloc_new(psset_t *psset, edata_t *ps, edata_t *r_edata, size_t size) {
assert(fb_empty(ps_fb, ps_npages));
assert(ps_npages >= (size >> LG_PAGE));
edata_nfree_set(ps, (uint32_t)ps_npages);
edata_age_set(ps, psset->age_counter);
psset->age_counter++;
psset_ps_alloc_insert(psset, ps, r_edata, size);
}
@ -287,7 +282,7 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
*/
if (ps_old_longest_free_range > 0) {
psset_edata_heap_remove(psset, old_pind, ps);
if (edata_heap_empty(&psset->pageslabs[old_pind])) {
if (edata_age_heap_empty(&psset->pageslabs[old_pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info,
(size_t)old_pind);
}
@ -299,7 +294,7 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
/* Otherwise, it gets reinserted. */
pszind_t new_pind = sz_psz2ind(sz_psz_quantize_floor(
new_range_len << LG_PAGE));
if (edata_heap_empty(&psset->pageslabs[new_pind])) {
if (edata_age_heap_empty(&psset->pageslabs[new_pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info,
(size_t)new_pind);
}

View File

@ -266,8 +266,7 @@ TEST_BEGIN(test_multi_pageslab) {
/*
* Free up a 2-page hole in the earlier slab, and a 1-page one in the
* later one. We should still pick the earlier slab for a 1-page
* allocation.
* later one. We should still pick the later one.
*/
ps = psset_dalloc(&psset, &alloc[0][0]);
expect_ptr_null(ps, "Unexpected eviction");
@ -276,8 +275,8 @@ TEST_BEGIN(test_multi_pageslab) {
ps = psset_dalloc(&psset, &alloc[1][0]);
expect_ptr_null(ps, "Unexpected eviction");
err = psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
expect_ptr_eq(&pageslab[0], edata_ps_get(&alloc[0][0]),
"Should have picked first pageslab");
expect_ptr_eq(&pageslab[1], edata_ps_get(&alloc[0][0]),
"Should have picked the fuller pageslab");
/*
* Now both slabs have 1-page holes. Free up a second one in the later
@ -370,6 +369,76 @@ TEST_BEGIN(test_stats) {
}
TEST_END
TEST_BEGIN(test_oldest_fit) {
bool err;
edata_t alloc[PAGESLAB_PAGES];
edata_t worse_alloc[PAGESLAB_PAGES];
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_init(&pageslab, /* arena_ind */ 0, (void *)(10 * PAGESLAB_SIZE),
PAGESLAB_SIZE, /* slab */ true, SC_NSIZES, PAGESLAB_SN + 1,
extent_state_active, /* zeroed */ false, /* comitted */ true,
EXTENT_PAI_HPA, EXTENT_IS_HEAD);
/*
* This pageslab is better from an edata_comp_snad POV, but will be
* added to the set after the previous one, and so should be less
* preferred for allocations.
*/
edata_t worse_pageslab;
memset(&worse_pageslab, 0, sizeof(pageslab));
edata_init(&worse_pageslab, /* arena_ind */ 0,
(void *)(9 * PAGESLAB_SIZE), PAGESLAB_SIZE, /* slab */ true,
SC_NSIZES, PAGESLAB_SN - 1, extent_state_active, /* zeroed */ false,
/* comitted */ true, EXTENT_PAI_HPA, EXTENT_IS_HEAD);
psset_t psset;
psset_init(&psset);
edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) {
edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
expect_ptr_eq(&pageslab, edata_ps_get(&alloc[i]),
"Allocated from the wrong pageslab");
}
edata_init_test(&worse_alloc[0]);
psset_alloc_new(&psset, &worse_pageslab, &worse_alloc[0], PAGE);
expect_ptr_eq(&worse_pageslab, edata_ps_get(&worse_alloc[0]),
"Allocated from the wrong pageslab");
/*
* Make the two pssets otherwise indistinguishable; all full except for
* a single page.
*/
for (size_t i = 1; i < PAGESLAB_PAGES - 1; i++) {
edata_init_test(&worse_alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
expect_ptr_eq(&worse_pageslab, edata_ps_get(&alloc[i]),
"Allocated from the wrong pageslab");
}
/* Deallocate the last page from the older pageslab. */
edata_t *evicted = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]);
expect_ptr_null(evicted, "Unexpected eviction");
/*
* This edata is the whole purpose for the test; it should come from the
* older pageslab.
*/
edata_t test_edata;
edata_init_test(&test_edata);
err = psset_alloc_reuse(&psset, &test_edata, PAGE);
expect_false(err, "Nonempty psset failed page allocation");
expect_ptr_eq(&pageslab, edata_ps_get(&test_edata),
"Allocated from the wrong pageslab");
}
TEST_END
int
main(void) {
return test_no_reentrancy(
@ -378,5 +447,6 @@ main(void) {
test_reuse,
test_evict,
test_multi_pageslab,
test_stats);
test_stats,
test_oldest_fit);
}