Pass 'frequent_reuse' hint to PAI
Currently used only for guarding purposes, the hint is used to determine if the allocation is supposed to be frequently reused. For example, it might urge the allocator to ensure the allocation is cached.
This commit is contained in:
committed by
Alexander Lapenkov
parent
2c70e8d351
commit
f56f5b9930
@@ -81,10 +81,10 @@ TEST_BEGIN(test_alloc_max) {
|
||||
/* Small max */
|
||||
bool deferred_work_generated = false;
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||
false, &deferred_work_generated);
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||
|
||||
destroy_test_data(shard);
|
||||
@@ -188,7 +188,7 @@ TEST_BEGIN(test_stress) {
|
||||
size_t npages = npages_min + prng_range_zu(&prng_state,
|
||||
npages_max - npages_min);
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||
npages * PAGE, PAGE, false, false,
|
||||
npages * PAGE, PAGE, false, false, false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(edata,
|
||||
"Unexpected allocation failure");
|
||||
@@ -264,7 +264,7 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* frequent_reuse */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
edata_list_active_t allocs_list;
|
||||
@@ -300,8 +300,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
/* Reallocate (individually), and ensure reuse and contiguity. */
|
||||
for (size_t i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
||||
}
|
||||
void *new_base = edata_base_get(allocs[0]);
|
||||
@@ -376,7 +376,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
edata_t *edatas[HUGEPAGE_PAGES];
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
false, &deferred_work_generated);
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
@@ -410,7 +410,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
*/
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
false, &deferred_work_generated);
|
||||
false, false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/*
|
||||
|
@@ -50,7 +50,7 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
|
||||
|
||||
static inline edata_t *
|
||||
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded,
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!guarded);
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
@@ -178,12 +178,12 @@ TEST_BEGIN(test_reuse) {
|
||||
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
}
|
||||
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
||||
@@ -214,11 +214,11 @@ TEST_BEGIN(test_reuse) {
|
||||
*/
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_eq(one_page[i], alloc1,
|
||||
"Got unexpected allocation");
|
||||
expect_ptr_eq(two_page[i], alloc2,
|
||||
@@ -255,12 +255,13 @@ TEST_BEGIN(test_auto_flush) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
/* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||
@@ -311,8 +312,8 @@ do_disable_flush_test(bool is_disable) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
/* Free all but the last aloc. */
|
||||
@@ -386,7 +387,7 @@ TEST_BEGIN(test_max_alloc_respected) {
|
||||
"Incorrect number of deallocations");
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
||||
PAGE, /* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* frequent_reuse */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
expect_zu_eq(i + 1, ta.alloc_count,
|
||||
"Incorrect number of allocations");
|
||||
@@ -413,7 +414,7 @@ TEST_BEGIN(test_expand_shrink_delegate) {
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
||||
/* max_bytes */ 1000 * PAGE);
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
||||
@@ -454,7 +455,7 @@ TEST_BEGIN(test_nshards_0) {
|
||||
|
||||
bool deferred_work_generated = false;
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||
|
||||
@@ -497,8 +498,8 @@ TEST_BEGIN(test_stats_simple) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
@@ -512,6 +513,7 @@ TEST_BEGIN(test_stats_simple) {
|
||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
/* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
||||
}
|
||||
@@ -541,14 +543,16 @@ TEST_BEGIN(test_stats_auto_flush) {
|
||||
bool deferred_work_generated = false;
|
||||
|
||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
/* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
/* guarded */ false, /* frequent_reuse */ false,
|
||||
&deferred_work_generated);
|
||||
|
||||
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
@@ -588,8 +592,8 @@ TEST_BEGIN(test_stats_manual_flush) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||
false, &deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user