Wake up background threads on demand
This change allows every allocator conforming to PAI communicate that it deferred some work for the future. Without it if a background thread goes into indefinite sleep, there is no way to notify it about upcoming deferred work.
This commit is contained in:
committed by
Alexander Lapenkov
parent
97da57c13a
commit
8229cc77c5
@@ -49,7 +49,7 @@ TEST_BEGIN(test_decay_npages_purge_in) {
|
||||
expect_false(decay_init(&decay, &curtime, (ssize_t)decay_ms),
|
||||
"Failed to initialize decay");
|
||||
|
||||
const size_t new_pages = 100;
|
||||
size_t new_pages = 100;
|
||||
|
||||
nstime_t time;
|
||||
nstime_copy(&time, &decay_nstime);
|
||||
|
@@ -79,9 +79,12 @@ TEST_BEGIN(test_alloc_max) {
|
||||
edata_t *edata;
|
||||
|
||||
/* Small max */
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false);
|
||||
bool deferred_work_generated;
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false);
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||
|
||||
destroy_test_data(shard);
|
||||
@@ -166,6 +169,8 @@ TEST_BEGIN(test_stress) {
|
||||
mem_tree_t tree;
|
||||
mem_tree_new(&tree);
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
for (size_t i = 0; i < 100 * 1000; i++) {
|
||||
size_t operation = prng_range_zu(&prng_state, 2);
|
||||
if (operation == 0) {
|
||||
@@ -183,7 +188,8 @@ TEST_BEGIN(test_stress) {
|
||||
size_t npages = npages_min + prng_range_zu(&prng_state,
|
||||
npages_max - npages_min);
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||
npages * PAGE, PAGE, false);
|
||||
npages * PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(edata,
|
||||
"Unexpected allocation failure");
|
||||
live_edatas[nlive_edatas] = edata;
|
||||
@@ -199,7 +205,8 @@ TEST_BEGIN(test_stress) {
|
||||
live_edatas[victim] = live_edatas[nlive_edatas - 1];
|
||||
nlive_edatas--;
|
||||
node_remove(&tree, to_free);
|
||||
pai_dalloc(tsdn, &shard->pai, to_free);
|
||||
pai_dalloc(tsdn, &shard->pai, to_free,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,7 +225,8 @@ TEST_BEGIN(test_stress) {
|
||||
for (size_t i = 0; i < nlive_edatas; i++) {
|
||||
edata_t *to_free = live_edatas[i];
|
||||
node_remove(&tree, to_free);
|
||||
pai_dalloc(tsdn, &shard->pai, to_free);
|
||||
pai_dalloc(tsdn, &shard->pai, to_free,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
hpa_shard_destroy(tsdn, shard);
|
||||
|
||||
@@ -244,6 +252,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
&test_hpa_shard_opts_default);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
enum {NALLOCS = 8};
|
||||
|
||||
edata_t *allocs[NALLOCS];
|
||||
@@ -253,13 +263,13 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
*/
|
||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
edata_list_active_t allocs_list;
|
||||
edata_list_active_init(&allocs_list);
|
||||
size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
|
||||
&allocs_list);
|
||||
&allocs_list, &deferred_work_generated);
|
||||
expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
|
||||
for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
|
||||
allocs[i] = edata_list_active_first(&allocs_list);
|
||||
@@ -279,15 +289,17 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||
edata_list_active_append(&allocs_list, allocs[i]);
|
||||
}
|
||||
pai_dalloc_batch(tsdn, &shard->pai, &allocs_list);
|
||||
pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
|
||||
&deferred_work_generated);
|
||||
for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
|
||||
pai_dalloc(tsdn, &shard->pai, allocs[i]);
|
||||
pai_dalloc(tsdn, &shard->pai, allocs[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
/* Reallocate (individually), and ensure reuse and contiguity. */
|
||||
for (size_t i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
||||
}
|
||||
void *new_base = edata_base_get(allocs[0]);
|
||||
@@ -355,11 +367,14 @@ TEST_BEGIN(test_defer_time) {
|
||||
|
||||
hpa_shard_t *shard = create_test_data(&hooks, &opts);
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
nstime_init(&defer_curtime, 0);
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
edata_t *edatas[HUGEPAGE_PAGES];
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false);
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
@@ -374,7 +389,8 @@ TEST_BEGIN(test_defer_time) {
|
||||
|
||||
/* Purge. Recall that dirty_mult is .25. */
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[i]);
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
@@ -391,14 +407,16 @@ TEST_BEGIN(test_defer_time) {
|
||||
* be marked for pending hugify.
|
||||
*/
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false);
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/*
|
||||
* We would be ineligible for hugification, had we not already met the
|
||||
* threshold before dipping below it.
|
||||
*/
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[0]);
|
||||
pai_dalloc(tsdn, &shard->pai, edatas[0],
|
||||
&deferred_work_generated);
|
||||
/* Wait for the threshold again. */
|
||||
nstime_init2(&defer_curtime, 22, 0);
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
|
@@ -65,6 +65,23 @@ set_background_thread_enabled(bool enabled) {
|
||||
expect_d_eq(0, err, "Unexpected mallctl failure");
|
||||
}
|
||||
|
||||
static void
|
||||
wait_until_thread_is_enabled(unsigned arena_id) {
|
||||
tsd_t* tsd = tsd_fetch();
|
||||
|
||||
bool sleeping = false;
|
||||
int iterations = 0;
|
||||
do {
|
||||
background_thread_info_t *info =
|
||||
background_thread_info_get(arena_id);
|
||||
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
|
||||
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
|
||||
sleeping = background_thread_indefinite_sleep(info);
|
||||
assert_d_lt(iterations, (int)1e6,
|
||||
"Waiting for a thread to start for too long");
|
||||
} while (!sleeping);
|
||||
}
|
||||
|
||||
static void
|
||||
expect_purging(unsigned arena_ind, bool expect_deferred) {
|
||||
size_t empty_ndirty;
|
||||
@@ -132,6 +149,7 @@ TEST_BEGIN(test_hpa_background_thread_enable_disable) {
|
||||
expect_purging(arena_ind, false);
|
||||
|
||||
set_background_thread_enabled(true);
|
||||
wait_until_thread_is_enabled(arena_ind);
|
||||
expect_purging(arena_ind, true);
|
||||
}
|
||||
TEST_END
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
export MALLOC_CONF="hpa_dirty_mult:0,background_thread_hpa_interval_max_ms:50,hpa_sec_nshards:0"
|
||||
export MALLOC_CONF="hpa_dirty_mult:0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0"
|
||||
|
||||
|
@@ -87,12 +87,13 @@ static void *
|
||||
do_alloc_free_purge(void *arg) {
|
||||
test_data_t *test_data = (test_data_t *)arg;
|
||||
for (int i = 0; i < 10 * 1000; i++) {
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
|
||||
PAGE, /* slab */ false, /* szind */ 0, /* zero */ false);
|
||||
PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(edata, "");
|
||||
bool generated_dirty;
|
||||
pa_dalloc(TSDN_NULL, &test_data->shard, edata,
|
||||
&generated_dirty);
|
||||
&deferred_work_generated);
|
||||
malloc_mutex_lock(TSDN_NULL,
|
||||
&test_data->shard.pac.decay_dirty.mtx);
|
||||
pac_decay_all(TSDN_NULL, &test_data->shard.pac,
|
||||
|
115
test/unit/sec.c
115
test/unit/sec.c
@@ -50,8 +50,9 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
|
||||
|
||||
static inline edata_t *
|
||||
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero) {
|
||||
size_t alignment, bool zero, bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
if (ta->alloc_fail) {
|
||||
return NULL;
|
||||
}
|
||||
@@ -70,8 +71,10 @@ pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
|
||||
static inline size_t
|
||||
pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results) {
|
||||
size_t nallocs, edata_list_active_t *results,
|
||||
bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
if (ta->alloc_fail) {
|
||||
return 0;
|
||||
}
|
||||
@@ -92,31 +95,37 @@ pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
|
||||
static bool
|
||||
pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero) {
|
||||
size_t old_size, size_t new_size, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
ta->expand_count++;
|
||||
return ta->expand_return_value;
|
||||
}
|
||||
|
||||
static bool
|
||||
pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size) {
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
ta->shrink_count++;
|
||||
return ta->shrink_return_value;
|
||||
}
|
||||
|
||||
static void
|
||||
pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
|
||||
pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
ta->dalloc_count++;
|
||||
free(edata);
|
||||
}
|
||||
|
||||
static void
|
||||
pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
|
||||
edata_list_active_t *list) {
|
||||
edata_list_active_t *list, bool *deferred_work_generated) {
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
|
||||
edata_t *edata;
|
||||
while ((edata = edata_list_active_first(list)) != NULL) {
|
||||
@@ -168,14 +177,15 @@ TEST_BEGIN(test_reuse) {
|
||||
enum { NALLOCS = 11 };
|
||||
edata_t *one_page[NALLOCS];
|
||||
edata_t *two_page[NALLOCS];
|
||||
bool deferred_work_generated;
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
|
||||
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
}
|
||||
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
||||
@@ -189,10 +199,12 @@ TEST_BEGIN(test_reuse) {
|
||||
* separation works correctly.
|
||||
*/
|
||||
for (int i = NALLOCS - 1; i >= 0; i--) {
|
||||
pai_dalloc(tsdn, &sec.pai, one_page[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, one_page[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
for (int i = NALLOCS - 1; i >= 0; i--) {
|
||||
pai_dalloc(tsdn, &sec.pai, two_page[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, two_page[i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
|
||||
"Incorrect number of allocations");
|
||||
@@ -204,9 +216,9 @@ TEST_BEGIN(test_reuse) {
|
||||
*/
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_eq(one_page[i], alloc1,
|
||||
"Got unexpected allocation");
|
||||
expect_ptr_eq(two_page[i], alloc2,
|
||||
@@ -238,14 +250,16 @@ TEST_BEGIN(test_auto_flush) {
|
||||
enum { NALLOCS = 10 };
|
||||
edata_t *extra_alloc;
|
||||
edata_t *allocs[NALLOCS];
|
||||
bool deferred_work_generated;
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false);
|
||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||
@@ -254,7 +268,7 @@ TEST_BEGIN(test_auto_flush) {
|
||||
"Incorrect number of allocations");
|
||||
/* Free until the SEC is full, but should not have flushed yet. */
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
|
||||
}
|
||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||
"Incorrect number of allocations");
|
||||
@@ -267,7 +281,7 @@ TEST_BEGIN(test_auto_flush) {
|
||||
* entirety when it decides to do so, and it has only one bin active
|
||||
* right now.
|
||||
*/
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc);
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
|
||||
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
|
||||
"Incorrect number of allocations");
|
||||
expect_zu_eq(0, ta.dalloc_count,
|
||||
@@ -291,16 +305,17 @@ do_disable_flush_test(bool is_disable) {
|
||||
|
||||
enum { NALLOCS = 11 };
|
||||
edata_t *allocs[NALLOCS];
|
||||
bool deferred_work_generated;
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
/* Free all but the last aloc. */
|
||||
for (int i = 0; i < NALLOCS - 1; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
|
||||
}
|
||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||
|
||||
@@ -326,7 +341,8 @@ do_disable_flush_test(bool is_disable) {
|
||||
* If we free into a disabled SEC, it should forward to the fallback.
|
||||
* Otherwise, the SEC should accept the allocation.
|
||||
*/
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
|
||||
&deferred_work_generated);
|
||||
|
||||
expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
|
||||
"Incorrect number of allocations");
|
||||
@@ -356,6 +372,8 @@ TEST_BEGIN(test_max_alloc_respected) {
|
||||
size_t max_alloc = 2 * PAGE;
|
||||
size_t attempted_alloc = 3 * PAGE;
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
|
||||
/* max_bytes */ 1000 * PAGE);
|
||||
|
||||
@@ -365,13 +383,13 @@ TEST_BEGIN(test_max_alloc_respected) {
|
||||
expect_zu_eq(i, ta.dalloc_count,
|
||||
"Incorrect number of deallocations");
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
||||
PAGE, /* zero */ false);
|
||||
PAGE, /* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
expect_zu_eq(i + 1, ta.alloc_count,
|
||||
"Incorrect number of allocations");
|
||||
expect_zu_eq(i, ta.dalloc_count,
|
||||
"Incorrect number of deallocations");
|
||||
pai_dalloc(tsdn, &sec.pai, edata);
|
||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
@@ -387,27 +405,31 @@ TEST_BEGIN(test_expand_shrink_delegate) {
|
||||
/* See the note above -- we can't use the real tsd. */
|
||||
tsdn_t *tsdn = TSDN_NULL;
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
||||
/* max_bytes */ 1000 * PAGE);
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
||||
bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_false(err, "Unexpected expand failure");
|
||||
expect_zu_eq(1, ta.expand_count, "");
|
||||
ta.expand_return_value = true;
|
||||
err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_true(err, "Unexpected expand success");
|
||||
expect_zu_eq(2, ta.expand_count, "");
|
||||
|
||||
err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE);
|
||||
err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
|
||||
&deferred_work_generated);
|
||||
expect_false(err, "Unexpected shrink failure");
|
||||
expect_zu_eq(1, ta.shrink_count, "");
|
||||
ta.shrink_return_value = true;
|
||||
err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE);
|
||||
err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
|
||||
&deferred_work_generated);
|
||||
expect_true(err, "Unexpected shrink success");
|
||||
expect_zu_eq(2, ta.shrink_count, "");
|
||||
}
|
||||
@@ -426,9 +448,10 @@ TEST_BEGIN(test_nshards_0) {
|
||||
opts.nshards = 0;
|
||||
sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
|
||||
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
pai_dalloc(tsdn, &sec.pai, edata);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||
|
||||
/* Both operations should have gone directly to the fallback. */
|
||||
expect_zu_eq(1, ta.alloc_count, "");
|
||||
@@ -461,25 +484,28 @@ TEST_BEGIN(test_stats_simple) {
|
||||
FLUSH_PAGES = 20,
|
||||
};
|
||||
|
||||
bool deferred_work_generated;
|
||||
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
|
||||
/* max_bytes */ FLUSH_PAGES * PAGE);
|
||||
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
/* Increase and decrease, without flushing. */
|
||||
for (size_t i = 0; i < NITERS; i++) {
|
||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[j]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[j],
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, j + 1);
|
||||
}
|
||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
||||
}
|
||||
}
|
||||
@@ -505,25 +531,30 @@ TEST_BEGIN(test_stats_auto_flush) {
|
||||
edata_t *extra_alloc1;
|
||||
edata_t *allocs[2 * FLUSH_PAGES];
|
||||
|
||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false);
|
||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false);
|
||||
bool deferred_work_generated;
|
||||
|
||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
|
||||
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
|
||||
}
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc0);
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
|
||||
|
||||
/* Flush the remaining pages; stats should still work. */
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc1);
|
||||
pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
|
||||
|
||||
expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
|
||||
- ta.dalloc_count - ta.dalloc_batch_count);
|
||||
@@ -545,16 +576,17 @@ TEST_BEGIN(test_stats_manual_flush) {
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
|
||||
/* max_bytes */ FLUSH_PAGES * PAGE);
|
||||
|
||||
bool deferred_work_generated;
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false);
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
/* Dalloc the first half of the allocations. */
|
||||
for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, i + 1);
|
||||
}
|
||||
|
||||
@@ -563,7 +595,8 @@ TEST_BEGIN(test_stats_manual_flush) {
|
||||
|
||||
/* Flush the remaining pages. */
|
||||
for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i]);
|
||||
pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, i + 1);
|
||||
}
|
||||
sec_disable(tsdn, &sec);
|
||||
|
Reference in New Issue
Block a user