Implement dynamic per arena control over dirty page purging.

Add mallctls:
- arenas.lg_dirty_mult is initialized via opt.lg_dirty_mult, and can be
  modified to change the initial lg_dirty_mult setting for newly created
  arenas.
- arena.<i>.lg_dirty_mult controls an individual arena's dirty page
  purging threshold, and synchronously triggers any purging that may be
  necessary to maintain the constraint.
- arena.<i>.chunk.purge allows the per arena dirty page purging function
  to be replaced.

This resolves #93.
This commit is contained in:
Jason Evans
2015-03-18 18:55:33 -07:00
parent c9db461ffb
commit 8d6a3e8321
13 changed files with 460 additions and 99 deletions

View File

@@ -2,13 +2,8 @@
chunk_alloc_t *old_alloc;
chunk_dalloc_t *old_dalloc;
bool
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
{
return (old_dalloc(chunk, size, arena_ind));
}
chunk_purge_t *old_purge;
bool purged;
void *
chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
@@ -18,36 +13,79 @@ chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
return (old_alloc(new_addr, size, alignment, zero, arena_ind));
}
bool
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
{
return (old_dalloc(chunk, size, arena_ind));
}
bool
chunk_purge(void *chunk, size_t offset, size_t length, unsigned arena_ind)
{
purged = true;
return (old_purge(chunk, offset, length, arena_ind));
}
TEST_BEGIN(test_chunk)
{
void *p;
chunk_alloc_t *new_alloc;
chunk_dalloc_t *new_dalloc;
size_t old_size, new_size;
chunk_purge_t *new_purge;
size_t old_size, new_size, huge0, huge1, huge2, sz;
new_alloc = chunk_alloc;
new_dalloc = chunk_dalloc;
new_purge = chunk_purge;
old_size = sizeof(chunk_alloc_t *);
new_size = sizeof(chunk_alloc_t *);
assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc,
&old_size, &new_alloc, new_size), 0,
"Unexpected alloc error");
assert_ptr_ne(old_alloc, new_alloc,
"Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc, &old_size,
&new_alloc, new_size), 0, "Unexpected alloc error");
assert_ptr_ne(old_alloc, new_alloc, "Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
&new_dalloc, new_size), 0, "Unexpected dalloc error");
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
assert_d_eq(mallctl("arena.0.chunk.purge", &old_purge, &old_size,
&new_purge, new_size), 0, "Unexpected purge error");
assert_ptr_ne(old_purge, new_purge, "Unexpected purge error");
sz = sizeof(size_t);
assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
"Unexpected arenas.hchunk.0.size failure");
assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
"Unexpected arenas.hchunk.1.size failure");
assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
"Unexpected arenas.hchunk.2.size failure");
if (huge0 * 2 > huge2) {
/*
* There are at least four size classes per doubling, so
* xallocx() from size=huge2 to size=huge1 is guaranteed to
* leave trailing purgeable memory.
*/
p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
purged = false;
assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
"Unexpected xallocx() failure");
assert_true(purged, "Unexpected purge");
dallocx(p, 0);
}
p = mallocx(42, 0);
assert_ptr_ne(p, NULL, "Unexpected alloc error");
assert_ptr_not_null(p, "Unexpected mallocx() error");
free(p);
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
NULL, &old_alloc, old_size), 0,
"Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL, NULL, &old_alloc,
old_size), 0, "Unexpected alloc error");
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
old_size), 0, "Unexpected dalloc error");
assert_d_eq(mallctl("arena.0.chunk.purge", NULL, NULL, &old_purge,
old_size), 0, "Unexpected purge error");
}
TEST_END

View File

@@ -348,6 +348,38 @@ TEST_BEGIN(test_thread_arena)
}
TEST_END
TEST_BEGIN(test_arena_i_lg_dirty_mult)
{
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
lg_dirty_mult = -2;
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
lg_dirty_mult = (sizeof(size_t) << 3);
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
lg_dirty_mult < (sizeof(ssize_t) << 3); prev_lg_dirty_mult =
lg_dirty_mult, lg_dirty_mult++) {
ssize_t old_lg_dirty_mult;
assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
&sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
"Unexpected old arena.0.lg_dirty_mult");
}
}
TEST_END
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
@@ -427,6 +459,38 @@ TEST_BEGIN(test_arenas_initialized)
}
TEST_END
TEST_BEGIN(test_arenas_lg_dirty_mult)
{
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t);
assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
lg_dirty_mult = -2;
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
lg_dirty_mult = (sizeof(size_t) << 3);
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
lg_dirty_mult < (sizeof(ssize_t) << 3); prev_lg_dirty_mult =
lg_dirty_mult, lg_dirty_mult++) {
ssize_t old_lg_dirty_mult;
assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
&sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
"Unexpected old arenas.lg_dirty_mult");
}
}
TEST_END
TEST_BEGIN(test_arenas_constants)
{
@@ -554,9 +618,11 @@ main(void)
test_tcache_none,
test_tcache,
test_thread_arena,
test_arena_i_lg_dirty_mult,
test_arena_i_purge,
test_arena_i_dss,
test_arenas_initialized,
test_arenas_lg_dirty_mult,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,

View File

@@ -22,7 +22,7 @@ TEST_BEGIN(test_rtree_get_empty)
rtree_t rtree;
assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
"Unexpected rtree_new() failure");
assert_ptr_eq(rtree_get(&rtree, 0), NULL,
assert_ptr_null(rtree_get(&rtree, 0),
"rtree_get() should return NULL for empty tree");
rtree_delete(&rtree);
}
@@ -75,8 +75,8 @@ TEST_BEGIN(test_rtree_bits)
"get key=%#"PRIxPTR, i, j, k, keys[j],
keys[k]);
}
assert_ptr_eq(rtree_get(&rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), NULL,
assert_ptr_null(rtree_get(&rtree,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i))),
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
rtree_set(&rtree, keys[j], NULL);
@@ -117,11 +117,11 @@ TEST_BEGIN(test_rtree_random)
for (j = 0; j < NSET; j++) {
rtree_set(&rtree, keys[j], NULL);
assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL,
assert_ptr_null(rtree_get(&rtree, keys[j]),
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL,
assert_ptr_null(rtree_get(&rtree, keys[j]),
"rtree_get() should return previously set value");
}