Pass 'frequent_reuse' hint to PAI
Currently used only for guarding purposes, the hint is used to determine if the allocation is supposed to be frequently reused. For example, it might urge the allocator to ensure the allocation is cached.
This commit is contained in:
parent
2c70e8d351
commit
f56f5b9930
@ -7,7 +7,7 @@ typedef struct pai_s pai_t;
|
|||||||
struct pai_s {
|
struct pai_s {
|
||||||
/* Returns NULL on failure. */
|
/* Returns NULL on failure. */
|
||||||
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t alignment, bool zero, bool guarded,
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||||
bool *deferred_work_generated);
|
bool *deferred_work_generated);
|
||||||
/*
|
/*
|
||||||
* Returns the number of extents added to the list (which may be fewer
|
* Returns the number of extents added to the list (which may be fewer
|
||||||
@ -37,10 +37,11 @@ struct pai_s {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static inline edata_t *
|
static inline edata_t *
|
||||||
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||||
bool guarded, bool *deferred_work_generated) {
|
bool zero, bool guarded, bool frequent_reuse,
|
||||||
|
bool *deferred_work_generated) {
|
||||||
return self->alloc(tsdn, self, size, alignment, zero, guarded,
|
return self->alloc(tsdn, self, size, alignment, zero, guarded,
|
||||||
deferred_work_generated);
|
frequent_reuse, deferred_work_generated);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
||||||
|
|
||||||
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||||
|
bool *deferred_work_generated);
|
||||||
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||||
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||||
@ -760,7 +761,7 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||||
bool guarded, bool *deferred_work_generated) {
|
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(!guarded);
|
assert(!guarded);
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
|
4
src/pa.c
4
src/pa.c
@ -128,7 +128,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||||||
edata_t *edata = NULL;
|
edata_t *edata = NULL;
|
||||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||||
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
||||||
zero, /* guarded */ false, deferred_work_generated);
|
zero, /* guarded */ false, slab, deferred_work_generated);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Fall back to the PAC if the HPA is off or couldn't serve the given
|
* Fall back to the PAC if the HPA is off or couldn't serve the given
|
||||||
@ -136,7 +136,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
|||||||
*/
|
*/
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
||||||
guarded, deferred_work_generated);
|
guarded, slab, deferred_work_generated);
|
||||||
}
|
}
|
||||||
if (edata != NULL) {
|
if (edata != NULL) {
|
||||||
assert(edata_size_get(edata) == size);
|
assert(edata_size_get(edata) == size);
|
||||||
|
@ -5,7 +5,8 @@
|
|||||||
#include "jemalloc/internal/san.h"
|
#include "jemalloc/internal/san.h"
|
||||||
|
|
||||||
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||||
|
bool *deferred_work_generated);
|
||||||
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||||
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||||
@ -152,7 +153,8 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||||
bool zero, bool guarded, bool *deferred_work_generated) {
|
bool zero, bool guarded, bool frequent_reuse,
|
||||||
|
bool *deferred_work_generated) {
|
||||||
pac_t *pac = (pac_t *)self;
|
pac_t *pac = (pac_t *)self;
|
||||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||||
|
|
||||||
|
@ -7,7 +7,8 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
|||||||
for (size_t i = 0; i < nallocs; i++) {
|
for (size_t i = 0; i < nallocs; i++) {
|
||||||
bool deferred_by_alloc = false;
|
bool deferred_by_alloc = false;
|
||||||
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
|
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
|
||||||
/* zero */ false, /* guarded */ false, &deferred_by_alloc);
|
/* zero */ false, /* guarded */ false,
|
||||||
|
/* frequent_reuse */ false, &deferred_by_alloc);
|
||||||
*deferred_work_generated |= deferred_by_alloc;
|
*deferred_work_generated |= deferred_by_alloc;
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return i;
|
return i;
|
||||||
|
11
src/sec.c
11
src/sec.c
@ -4,7 +4,8 @@
|
|||||||
#include "jemalloc/internal/sec.h"
|
#include "jemalloc/internal/sec.h"
|
||||||
|
|
||||||
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||||
|
bool *deferred_work_generated);
|
||||||
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||||
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||||
@ -218,7 +219,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||||
bool guarded, bool *deferred_work_generated) {
|
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(!guarded);
|
assert(!guarded);
|
||||||
|
|
||||||
@ -227,7 +228,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
|||||||
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
||||||
|| size > sec->opts.max_alloc) {
|
|| size > sec->opts.max_alloc) {
|
||||||
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
|
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
|
||||||
/* guarded */ false, deferred_work_generated);
|
/* guarded */ false, frequent_reuse,
|
||||||
|
deferred_work_generated);
|
||||||
}
|
}
|
||||||
pszind_t pszind = sz_psz2ind(size);
|
pszind_t pszind = sz_psz2ind(size);
|
||||||
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
||||||
@ -250,7 +252,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
|||||||
size);
|
size);
|
||||||
} else {
|
} else {
|
||||||
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
|
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
|
||||||
zero, /* guarded */ false, deferred_work_generated);
|
zero, /* guarded */ false, frequent_reuse,
|
||||||
|
deferred_work_generated);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return edata;
|
return edata;
|
||||||
|
@ -81,10 +81,10 @@ TEST_BEGIN(test_alloc_max) {
|
|||||||
/* Small max */
|
/* Small max */
|
||||||
bool deferred_work_generated = false;
|
bool deferred_work_generated = false;
|
||||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||||
false, &deferred_work_generated);
|
false, false, &deferred_work_generated);
|
||||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||||
|
|
||||||
destroy_test_data(shard);
|
destroy_test_data(shard);
|
||||||
@ -188,7 +188,7 @@ TEST_BEGIN(test_stress) {
|
|||||||
size_t npages = npages_min + prng_range_zu(&prng_state,
|
size_t npages = npages_min + prng_range_zu(&prng_state,
|
||||||
npages_max - npages_min);
|
npages_max - npages_min);
|
||||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||||
npages * PAGE, PAGE, false, false,
|
npages * PAGE, PAGE, false, false, false,
|
||||||
&deferred_work_generated);
|
&deferred_work_generated);
|
||||||
assert_ptr_not_null(edata,
|
assert_ptr_not_null(edata,
|
||||||
"Unexpected allocation failure");
|
"Unexpected allocation failure");
|
||||||
@ -264,7 +264,7 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
|||||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false,
|
||||||
&deferred_work_generated);
|
/* frequent_reuse */ false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||||
}
|
}
|
||||||
edata_list_active_t allocs_list;
|
edata_list_active_t allocs_list;
|
||||||
@ -300,8 +300,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
|||||||
/* Reallocate (individually), and ensure reuse and contiguity. */
|
/* Reallocate (individually), and ensure reuse and contiguity. */
|
||||||
for (size_t i = 0; i < NALLOCS; i++) {
|
for (size_t i = 0; i < NALLOCS; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
||||||
}
|
}
|
||||||
void *new_base = edata_base_get(allocs[0]);
|
void *new_base = edata_base_get(allocs[0]);
|
||||||
@ -376,7 +376,7 @@ TEST_BEGIN(test_defer_time) {
|
|||||||
edata_t *edatas[HUGEPAGE_PAGES];
|
edata_t *edatas[HUGEPAGE_PAGES];
|
||||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||||
false, &deferred_work_generated);
|
false, false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||||
}
|
}
|
||||||
hpa_shard_do_deferred_work(tsdn, shard);
|
hpa_shard_do_deferred_work(tsdn, shard);
|
||||||
@ -410,7 +410,7 @@ TEST_BEGIN(test_defer_time) {
|
|||||||
*/
|
*/
|
||||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||||
false, &deferred_work_generated);
|
false, false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -50,7 +50,7 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
|
|||||||
|
|
||||||
static inline edata_t *
|
static inline edata_t *
|
||||||
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||||
size_t alignment, bool zero, bool guarded,
|
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||||
bool *deferred_work_generated) {
|
bool *deferred_work_generated) {
|
||||||
assert(!guarded);
|
assert(!guarded);
|
||||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||||
@ -178,12 +178,12 @@ TEST_BEGIN(test_reuse) {
|
|||||||
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
||||||
for (int i = 0; i < NALLOCS; i++) {
|
for (int i = 0; i < NALLOCS; i++) {
|
||||||
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||||
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||||
}
|
}
|
||||||
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
||||||
@ -214,11 +214,11 @@ TEST_BEGIN(test_reuse) {
|
|||||||
*/
|
*/
|
||||||
for (int i = 0; i < NALLOCS; i++) {
|
for (int i = 0; i < NALLOCS; i++) {
|
||||||
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_eq(one_page[i], alloc1,
|
expect_ptr_eq(one_page[i], alloc1,
|
||||||
"Got unexpected allocation");
|
"Got unexpected allocation");
|
||||||
expect_ptr_eq(two_page[i], alloc2,
|
expect_ptr_eq(two_page[i], alloc2,
|
||||||
@ -255,12 +255,13 @@ TEST_BEGIN(test_auto_flush) {
|
|||||||
/* max_bytes */ NALLOCS * PAGE);
|
/* max_bytes */ NALLOCS * PAGE);
|
||||||
for (int i = 0; i < NALLOCS; i++) {
|
for (int i = 0; i < NALLOCS; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||||
}
|
}
|
||||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||||
/* guarded */ false, &deferred_work_generated);
|
/* guarded */ false, /* frequent_reuse */ false,
|
||||||
|
&deferred_work_generated);
|
||||||
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
||||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||||
@ -311,8 +312,8 @@ do_disable_flush_test(bool is_disable) {
|
|||||||
/* max_bytes */ NALLOCS * PAGE);
|
/* max_bytes */ NALLOCS * PAGE);
|
||||||
for (int i = 0; i < NALLOCS; i++) {
|
for (int i = 0; i < NALLOCS; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||||
}
|
}
|
||||||
/* Free all but the last aloc. */
|
/* Free all but the last aloc. */
|
||||||
@ -386,7 +387,7 @@ TEST_BEGIN(test_max_alloc_respected) {
|
|||||||
"Incorrect number of deallocations");
|
"Incorrect number of deallocations");
|
||||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
||||||
PAGE, /* zero */ false, /* guarded */ false,
|
PAGE, /* zero */ false, /* guarded */ false,
|
||||||
&deferred_work_generated);
|
/* frequent_reuse */ false, &deferred_work_generated);
|
||||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||||
expect_zu_eq(i + 1, ta.alloc_count,
|
expect_zu_eq(i + 1, ta.alloc_count,
|
||||||
"Incorrect number of allocations");
|
"Incorrect number of allocations");
|
||||||
@ -413,7 +414,7 @@ TEST_BEGIN(test_expand_shrink_delegate) {
|
|||||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
||||||
/* max_bytes */ 1000 * PAGE);
|
/* max_bytes */ 1000 * PAGE);
|
||||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||||
&deferred_work_generated);
|
&deferred_work_generated);
|
||||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||||
|
|
||||||
@ -454,7 +455,7 @@ TEST_BEGIN(test_nshards_0) {
|
|||||||
|
|
||||||
bool deferred_work_generated = false;
|
bool deferred_work_generated = false;
|
||||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
|
||||||
&deferred_work_generated);
|
&deferred_work_generated);
|
||||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||||
|
|
||||||
@ -497,8 +498,8 @@ TEST_BEGIN(test_stats_simple) {
|
|||||||
edata_t *allocs[FLUSH_PAGES];
|
edata_t *allocs[FLUSH_PAGES];
|
||||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_stats_pages(tsdn, &sec, 0);
|
expect_stats_pages(tsdn, &sec, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -512,6 +513,7 @@ TEST_BEGIN(test_stats_simple) {
|
|||||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||||
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false,
|
||||||
|
/* frequent_reuse */ false,
|
||||||
&deferred_work_generated);
|
&deferred_work_generated);
|
||||||
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
||||||
}
|
}
|
||||||
@ -541,14 +543,16 @@ TEST_BEGIN(test_stats_auto_flush) {
|
|||||||
bool deferred_work_generated = false;
|
bool deferred_work_generated = false;
|
||||||
|
|
||||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||||
/* guarded */ false, &deferred_work_generated);
|
/* guarded */ false, /* frequent_reuse */ false,
|
||||||
|
&deferred_work_generated);
|
||||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||||
/* guarded */ false, &deferred_work_generated);
|
/* guarded */ false, /* frequent_reuse */ false,
|
||||||
|
&deferred_work_generated);
|
||||||
|
|
||||||
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||||
@ -588,8 +592,8 @@ TEST_BEGIN(test_stats_manual_flush) {
|
|||||||
edata_t *allocs[FLUSH_PAGES];
|
edata_t *allocs[FLUSH_PAGES];
|
||||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||||
/* zero */ false, /* guarded */ false,
|
/* zero */ false, /* guarded */ false, /* frequent_reuse */
|
||||||
&deferred_work_generated);
|
false, &deferred_work_generated);
|
||||||
expect_stats_pages(tsdn, &sec, 0);
|
expect_stats_pages(tsdn, &sec, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user