Implement guard pages.
Adding guarded extents, which are regular extents surrounded by guard pages (mprotected). To reduce syscalls, small guarded extents are cached as a separate eset in ecache, and decay through the dirty / muzzy / retained pipeline as usual.
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/arena_decay.h"
|
||||
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
@@ -22,155 +23,6 @@ nstime_update_mock(nstime_t *time) {
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
unsigned arena_ind;
|
||||
size_t sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
static void
|
||||
do_arena_destroy(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_epoch(void) {
|
||||
uint64_t epoch = 1;
|
||||
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_purge(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_decay(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
uint64_t npurge = 0;
|
||||
size_t sz = sizeof(npurge);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
|
||||
config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
|
||||
return npurge;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_purged(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_muzzy_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
|
||||
get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pdirty(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pdirty;
|
||||
size_t sz = sizeof(pdirty);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pdirty;
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pmuzzy(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pmuzzy;
|
||||
size_t sz = sizeof(pmuzzy);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pmuzzy;
|
||||
}
|
||||
|
||||
static void *
|
||||
do_mallocx(size_t size, int flags) {
|
||||
void *p = mallocx(size, flags);
|
||||
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
return p;
|
||||
}
|
||||
|
||||
static void
|
||||
generate_dirty(unsigned arena_ind, size_t size) {
|
||||
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
void *p = do_mallocx(size, flags);
|
||||
dallocx(p, flags);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_decay_ticks) {
|
||||
test_skip_if(is_background_thread_enabled());
|
||||
test_skip_if(opt_hpa);
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/guard.h"
|
||||
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
|
||||
@@ -30,8 +31,18 @@ TEST_BEGIN(test_large_double_free_tcache) {
|
||||
|
||||
test_large_double_free_pre();
|
||||
char *ptr = malloc(SC_LARGE_MINCLASS);
|
||||
bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
|
||||
free(ptr);
|
||||
free(ptr);
|
||||
if (!guarded) {
|
||||
free(ptr);
|
||||
} else {
|
||||
/*
|
||||
* Skip because guarded extents may unguard immediately on
|
||||
* deallocation, in which case the second free will crash before
|
||||
* reaching the intended safety check.
|
||||
*/
|
||||
fake_abort_called = true;
|
||||
}
|
||||
mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
|
||||
test_large_double_free_post();
|
||||
}
|
||||
@@ -43,8 +54,18 @@ TEST_BEGIN(test_large_double_free_no_tcache) {
|
||||
|
||||
test_large_double_free_pre();
|
||||
char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
|
||||
bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
if (!guarded) {
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
} else {
|
||||
/*
|
||||
* Skip because guarded extents may unguard immediately on
|
||||
* deallocation, in which case the second free will crash before
|
||||
* reaching the intended safety check.
|
||||
*/
|
||||
fake_abort_called = true;
|
||||
}
|
||||
test_large_double_free_post();
|
||||
}
|
||||
TEST_END
|
||||
|
201
test/unit/guard.c
Normal file
201
test/unit/guard.c
Normal file
@@ -0,0 +1,201 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/arena_decay.h"
|
||||
#include "test/guard.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
static void
|
||||
verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
|
||||
expect_true(extent_is_guarded(tsdn, ptr),
|
||||
"All extents should be guarded.");
|
||||
}
|
||||
|
||||
#define MAX_SMALL_ALLOCATIONS 4096
|
||||
void *small_alloc[MAX_SMALL_ALLOCATIONS];
|
||||
|
||||
TEST_BEGIN(test_guarded_small) {
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
unsigned npages = 16, pages_found = 0, ends_found = 0;
|
||||
VARIABLE_ARRAY(uintptr_t, pages, npages);
|
||||
|
||||
/* Allocate to get sanitized pointers. */
|
||||
size_t sz = PAGE / 8;
|
||||
unsigned n_alloc = 0;
|
||||
while (n_alloc < MAX_SMALL_ALLOCATIONS) {
|
||||
void *ptr = malloc(sz);
|
||||
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
|
||||
small_alloc[n_alloc] = ptr;
|
||||
verify_extent_guarded(tsdn, ptr);
|
||||
if ((uintptr_t)ptr % PAGE == 0) {
|
||||
pages[pages_found++] = (uintptr_t)ptr;
|
||||
}
|
||||
if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
|
||||
ends_found++;
|
||||
}
|
||||
n_alloc++;
|
||||
if (pages_found == npages && ends_found == npages) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Should found the ptrs being checked for overflow and underflow. */
|
||||
expect_u_eq(pages_found, npages, "Could not found the expected pages.");
|
||||
expect_u_eq(ends_found, npages, "Could not found the expected pages.");
|
||||
|
||||
/* Verify the pages are not continuous, i.e. separated by guards. */
|
||||
for (unsigned i = 0; i < npages - 1; i++) {
|
||||
for (unsigned j = i + 1; j < npages; j++) {
|
||||
uintptr_t ptr_diff = pages[i] > pages[j] ?
|
||||
pages[i] - pages[j] : pages[j] - pages[i];
|
||||
expect_zu_gt((size_t)ptr_diff, 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n_alloc + 1; i++) {
|
||||
free(small_alloc[i]);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_guarded_large) {
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
unsigned nlarge = 32;
|
||||
VARIABLE_ARRAY(uintptr_t, large, nlarge);
|
||||
|
||||
/* Allocate to get sanitized pointers. */
|
||||
size_t large_sz = SC_LARGE_MINCLASS;
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
void *ptr = malloc(large_sz);
|
||||
verify_extent_guarded(tsdn, ptr);
|
||||
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
|
||||
large[i] = (uintptr_t)ptr;
|
||||
}
|
||||
|
||||
/* Verify the pages are not continuous, i.e. separated by guards. */
|
||||
uintptr_t min_diff = (uintptr_t)-1;
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
for (unsigned j = i + 1; j < nlarge; j++) {
|
||||
uintptr_t ptr_diff = large[i] > large[j] ?
|
||||
large[i] - large[j] : large[j] - large[i];
|
||||
expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
if (ptr_diff < min_diff) {
|
||||
min_diff = ptr_diff;
|
||||
}
|
||||
}
|
||||
}
|
||||
expect_zu_ge((size_t)min_diff, large_sz + 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
free((void *)large[i]);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
static void
|
||||
verify_pdirty(unsigned arena_ind, uint64_t expected) {
|
||||
uint64_t pdirty = get_arena_pdirty(arena_ind);
|
||||
expect_u64_eq(pdirty, expected / PAGE,
|
||||
"Unexpected dirty page amount.");
|
||||
}
|
||||
|
||||
static void
|
||||
verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
|
||||
uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
|
||||
expect_u64_eq(pmuzzy, expected / PAGE,
|
||||
"Unexpected muzzy page amount.");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_guarded_decay) {
|
||||
unsigned arena_ind = do_arena_create(-1, -1);
|
||||
do_decay(arena_ind);
|
||||
do_purge(arena_ind);
|
||||
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Verify that guarded extents as dirty. */
|
||||
size_t sz1 = PAGE, sz2 = PAGE * 2;
|
||||
/* W/o maps_coalesce, guarded extents are unguarded eagerly. */
|
||||
size_t add_guard_size = maps_coalesce ? 0 : PAGE_GUARDS_SIZE;
|
||||
generate_dirty(arena_ind, sz1);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Should reuse the first extent. */
|
||||
generate_dirty(arena_ind, sz1);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Should not reuse; expect new dirty pages. */
|
||||
generate_dirty(arena_ind, sz2);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
|
||||
/* Should reuse dirty extents for the two mallocx. */
|
||||
void *p1 = do_mallocx(sz1, flags);
|
||||
verify_extent_guarded(tsdn, p1);
|
||||
verify_pdirty(arena_ind, sz2 + add_guard_size);
|
||||
|
||||
void *p2 = do_mallocx(sz2, flags);
|
||||
verify_extent_guarded(tsdn, p2);
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
dallocx(p1, flags);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
dallocx(p2, flags);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
do_purge(arena_ind);
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
if (config_stats) {
|
||||
expect_u64_eq(get_arena_npurge(arena_ind), 1,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_dirty_purged(arena_ind),
|
||||
(sz1 + sz2 + 2 * add_guard_size) / PAGE,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
|
||||
"Expected purging to occur");
|
||||
}
|
||||
|
||||
if (opt_retain) {
|
||||
/*
|
||||
* With retain, guarded extents are not mergable and will be
|
||||
* cached in ecache_retained. They should be reused.
|
||||
*/
|
||||
void *new_p1 = do_mallocx(sz1, flags);
|
||||
verify_extent_guarded(tsdn, p1);
|
||||
expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
|
||||
|
||||
void *new_p2 = do_mallocx(sz2, flags);
|
||||
verify_extent_guarded(tsdn, p2);
|
||||
expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
|
||||
|
||||
dallocx(new_p1, flags);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
dallocx(new_p2, flags);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
}
|
||||
|
||||
do_arena_destroy(arena_ind);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_guarded_small,
|
||||
test_guarded_large,
|
||||
test_guarded_decay);
|
||||
}
|
3
test/unit/guard.sh
Normal file
3
test/unit/guard.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
|
@@ -80,11 +80,11 @@ TEST_BEGIN(test_alloc_max) {
|
||||
|
||||
/* Small max */
|
||||
bool deferred_work_generated;
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false,
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||
|
||||
destroy_test_data(shard);
|
||||
@@ -188,7 +188,7 @@ TEST_BEGIN(test_stress) {
|
||||
size_t npages = npages_min + prng_range_zu(&prng_state,
|
||||
npages_max - npages_min);
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||
npages * PAGE, PAGE, false,
|
||||
npages * PAGE, PAGE, false, false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(edata,
|
||||
"Unexpected allocation failure");
|
||||
@@ -263,7 +263,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
*/
|
||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
edata_list_active_t allocs_list;
|
||||
@@ -299,7 +300,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
/* Reallocate (individually), and ensure reuse and contiguity. */
|
||||
for (size_t i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
||||
}
|
||||
void *new_base = edata_base_get(allocs[0]);
|
||||
@@ -374,7 +376,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
edata_t *edatas[HUGEPAGE_PAGES];
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
@@ -408,7 +410,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
*/
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/*
|
||||
|
@@ -128,6 +128,8 @@ TEST_BEGIN(test_hpa_background_thread_purges) {
|
||||
test_skip_if(!config_stats);
|
||||
test_skip_if(!hpa_supported());
|
||||
test_skip_if(!have_background_thread);
|
||||
/* Skip since guarded pages cannot be allocated from hpa. */
|
||||
test_skip_if(san_enabled());
|
||||
|
||||
unsigned arena_ind = create_arena();
|
||||
/*
|
||||
@@ -142,6 +144,8 @@ TEST_BEGIN(test_hpa_background_thread_enable_disable) {
|
||||
test_skip_if(!config_stats);
|
||||
test_skip_if(!hpa_supported());
|
||||
test_skip_if(!have_background_thread);
|
||||
/* Skip since guarded pages cannot be allocated from hpa. */
|
||||
test_skip_if(san_enabled());
|
||||
|
||||
unsigned arena_ind = create_arena();
|
||||
|
||||
|
@@ -91,7 +91,7 @@ do_alloc_free_purge(void *arg) {
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
|
||||
PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
assert_ptr_not_null(edata, "");
|
||||
pa_dalloc(TSDN_NULL, &test_data->shard, edata,
|
||||
&deferred_work_generated);
|
||||
|
@@ -1,5 +1,6 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
static unsigned arena_ind;
|
||||
@@ -103,7 +104,8 @@ TEST_BEGIN(test_retained) {
|
||||
|
||||
arena_ind = do_arena_create(NULL);
|
||||
sz = nallocx(HUGEPAGE, 0);
|
||||
esz = sz + sz_large_pad;
|
||||
size_t guard_sz = san_enabled() ? PAGE_GUARDS_SIZE : 0;
|
||||
esz = sz + sz_large_pad + guard_sz;
|
||||
|
||||
atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
|
||||
|
||||
@@ -133,7 +135,8 @@ TEST_BEGIN(test_retained) {
|
||||
*/
|
||||
do_refresh();
|
||||
|
||||
size_t allocated = esz * nthreads * PER_THD_NALLOCS;
|
||||
size_t allocated = (esz - guard_sz) * nthreads *
|
||||
PER_THD_NALLOCS;
|
||||
size_t active = do_get_active(arena_ind);
|
||||
expect_zu_le(allocated, active, "Unexpected active memory");
|
||||
size_t mapped = do_get_mapped(arena_ind);
|
||||
|
@@ -50,7 +50,9 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
|
||||
|
||||
static inline edata_t *
|
||||
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated) {
|
||||
size_t alignment, bool zero, bool guarded,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!guarded);
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
if (ta->alloc_fail) {
|
||||
@@ -182,10 +184,12 @@ TEST_BEGIN(test_reuse) {
|
||||
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
}
|
||||
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
||||
@@ -216,9 +220,11 @@ TEST_BEGIN(test_reuse) {
|
||||
*/
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_eq(one_page[i], alloc1,
|
||||
"Got unexpected allocation");
|
||||
expect_ptr_eq(two_page[i], alloc2,
|
||||
@@ -255,11 +261,12 @@ TEST_BEGIN(test_auto_flush) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||
@@ -310,7 +317,8 @@ do_disable_flush_test(bool is_disable) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
/* Free all but the last aloc. */
|
||||
@@ -383,7 +391,8 @@ TEST_BEGIN(test_max_alloc_respected) {
|
||||
expect_zu_eq(i, ta.dalloc_count,
|
||||
"Incorrect number of deallocations");
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
||||
PAGE, /* zero */ false, &deferred_work_generated);
|
||||
PAGE, /* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
expect_zu_eq(i + 1, ta.alloc_count,
|
||||
"Incorrect number of allocations");
|
||||
@@ -410,7 +419,8 @@ TEST_BEGIN(test_expand_shrink_delegate) {
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
||||
/* max_bytes */ 1000 * PAGE);
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
||||
bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
|
||||
@@ -450,7 +460,8 @@ TEST_BEGIN(test_nshards_0) {
|
||||
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||
|
||||
/* Both operations should have gone directly to the fallback. */
|
||||
@@ -492,7 +503,8 @@ TEST_BEGIN(test_stats_simple) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
@@ -505,7 +517,8 @@ TEST_BEGIN(test_stats_simple) {
|
||||
}
|
||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
||||
}
|
||||
}
|
||||
@@ -534,13 +547,14 @@ TEST_BEGIN(test_stats_auto_flush) {
|
||||
bool deferred_work_generated;
|
||||
|
||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
|
||||
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
@@ -580,7 +594,8 @@ TEST_BEGIN(test_stats_manual_flush) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user