server-skynet-source-3rd-je.../test/unit/hpdata.c
David Goldblatt 30b9e8162b HPA: Generalize purging.
Previously, we would purge a hugepage only when it's completely empty.  With
this change, we can purge even when only partially empty.  Although the
heuristic here is still fairly primitive, this infrastructure can scale to
become more advanced.
2021-02-04 20:58:31 -08:00

188 lines
6.2 KiB
C

#include "test/jemalloc_test.h"
#define HPDATA_ADDR ((void *)(10 * HUGEPAGE))
#define HPDATA_AGE 123
TEST_BEGIN(test_reserve_alloc) {
hpdata_t hpdata;
hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
/* Allocating a page at a time, we should do first fit. */
for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(HUGEPAGE_PAGES - i,
hpdata_longest_free_range_get(&hpdata), "");
void *alloc = hpdata_reserve_alloc(&hpdata, PAGE);
expect_ptr_eq((char *)HPDATA_ADDR + i * PAGE, alloc, "");
expect_true(hpdata_consistent(&hpdata), "");
}
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(0, hpdata_longest_free_range_get(&hpdata), "");
/*
* Build up a bigger free-range, 2 pages at a time, until we've got 6
* adjacent free pages total. Pages 8-13 should be unreserved after
* this.
*/
hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 10 * PAGE, 2 * PAGE);
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(2, hpdata_longest_free_range_get(&hpdata), "");
hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 12 * PAGE, 2 * PAGE);
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(4, hpdata_longest_free_range_get(&hpdata), "");
hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 8 * PAGE, 2 * PAGE);
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
/*
* Leave page 14 reserved, but free page 15 (this test the case where
* unreserving combines two ranges).
*/
hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 15 * PAGE, PAGE);
/*
* Longest free range shouldn't change; we've got a free range of size
* 6, then a reserved page, then another free range.
*/
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
/* After freeing page 14, the two ranges get combined. */
hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 14 * PAGE, PAGE);
expect_true(hpdata_consistent(&hpdata), "");
expect_zu_eq(8, hpdata_longest_free_range_get(&hpdata), "");
}
TEST_END
TEST_BEGIN(test_purge_simple) {
hpdata_t hpdata;
hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE_PAGES / 2 * PAGE);
expect_ptr_eq(alloc, HPDATA_ADDR, "");
/* Create HUGEPAGE_PAGES / 4 dirty inactive pages at the beginning. */
hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
expect_zu_eq(hpdata_ndirty_get(&hpdata), HUGEPAGE_PAGES / 2, "");
expect_false(hpdata_changing_state_get(&hpdata), "");
hpdata_purge_state_t purge_state;
hpdata_purge_begin(&hpdata, &purge_state);
expect_true(hpdata_changing_state_get(&hpdata), "");
void *purge_addr;
size_t purge_size;
bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
&purge_size);
expect_true(got_result, "");
expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
expect_true(hpdata_changing_state_get(&hpdata), "");
got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
&purge_size);
expect_false(got_result, "Unexpected additional purge range: "
"extent at %p of size %zu", purge_addr, purge_size);
expect_true(hpdata_changing_state_get(&hpdata), "");
hpdata_purge_end(&hpdata, &purge_state);
expect_false(hpdata_changing_state_get(&hpdata), "");
expect_zu_eq(hpdata_ndirty_get(&hpdata), HUGEPAGE_PAGES / 4, "");
}
TEST_END
/*
* We only test intervening dalloc's not intervening allocs; we don't need
* intervening allocs, and foreseeable optimizations will make them not just
* unnecessary but incorrect. In particular, if there are two dirty extents
* separated only by a retained extent, we can just purge the entire range,
* saving a purge call.
*/
TEST_BEGIN(test_purge_intervening_dalloc) {
hpdata_t hpdata;
hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
/* Allocate the first 3/4 of the pages. */
void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
expect_ptr_eq(alloc, HPDATA_ADDR, "");
/* Free the first 1/4 and the third 1/4 of the pages. */
hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
hpdata_unreserve(&hpdata,
(void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
HUGEPAGE_PAGES / 4 * PAGE);
expect_zu_eq(hpdata_ndirty_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
hpdata_purge_state_t purge_state;
hpdata_purge_begin(&hpdata, &purge_state);
void *purge_addr;
size_t purge_size;
/* First purge. */
bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
&purge_size);
expect_true(got_result, "");
expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
/* Deallocate the second 1/4 before the second purge occurs. */
hpdata_unreserve(&hpdata,
(void *)((uintptr_t)alloc + 1 * HUGEPAGE_PAGES / 4 * PAGE),
HUGEPAGE_PAGES / 4 * PAGE);
/* Now continue purging. */
got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
&purge_size);
expect_true(got_result, "");
expect_ptr_eq(
(void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
purge_addr, "");
expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
&purge_size);
expect_false(got_result, "Unexpected additional purge range: "
"extent at %p of size %zu", purge_addr, purge_size);
hpdata_purge_end(&hpdata, &purge_state);
expect_zu_eq(hpdata_ndirty_get(&hpdata), HUGEPAGE_PAGES / 4, "");
}
TEST_END
TEST_BEGIN(test_hugify) {
hpdata_t hpdata;
hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE / 2);
expect_ptr_eq(alloc, HPDATA_ADDR, "");
expect_zu_eq(HUGEPAGE_PAGES / 2, hpdata_ndirty_get(&hpdata), "");
expect_false(hpdata_changing_state_get(&hpdata), "");
hpdata_hugify_begin(&hpdata);
expect_true(hpdata_changing_state_get(&hpdata), "");
hpdata_hugify_end(&hpdata);
expect_false(hpdata_changing_state_get(&hpdata), "");
/* Hugeifying should have increased the dirty page count. */
expect_zu_eq(HUGEPAGE_PAGES, hpdata_ndirty_get(&hpdata), "");
}
TEST_END
int main(void) {
return test_no_reentrancy(
test_reserve_alloc,
test_purge_simple,
test_purge_intervening_dalloc,
test_hugify);
}