Immediately purge cached extents if decay_time is 0.

This fixes a regression caused by
54269dc0ed (Remove obsolete
arena_maybe_purge() call.), as well as providing a general fix.

This resolves #665.
This commit is contained in:
Jason Evans 2017-03-01 15:25:48 -08:00
parent d61a5f76b2
commit fd058f572b
5 changed files with 138 additions and 44 deletions

View File

@ -33,8 +33,6 @@ extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero); size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
extent_t *extent); extent_t *extent);
void arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
extent_t *extent);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize); extent_t *extent, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
@ -42,7 +40,6 @@ void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
ssize_t arena_decay_time_get(arena_t *arena); ssize_t arena_decay_time_get(arena_t *arena);
bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena); void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena); void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,

View File

@ -26,7 +26,6 @@ arena_dss_prec_get
arena_dss_prec_set arena_dss_prec_set
arena_extent_alloc_large arena_extent_alloc_large
arena_extent_cache_dalloc arena_extent_cache_dalloc
arena_extent_dalloc_large_finish
arena_extent_dalloc_large_prep arena_extent_dalloc_large_prep
arena_extent_ralloc_large_expand arena_extent_ralloc_large_expand
arena_extent_ralloc_large_shrink arena_extent_ralloc_large_shrink
@ -40,7 +39,6 @@ arena_internal_get
arena_internal_sub arena_internal_sub
arena_malloc arena_malloc
arena_malloc_hard arena_malloc_hard
arena_maybe_purge
arena_migrate arena_migrate
arena_new arena_new
arena_nthreads_dec arena_nthreads_dec

View File

@ -259,7 +259,9 @@ arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0); witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent); extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
arena_purge(tsdn, arena, false); if (arena_decay_time_get(arena) == 0) {
arena_purge(tsdn, arena, true);
}
} }
JEMALLOC_INLINE_C void * JEMALLOC_INLINE_C void *
@ -456,13 +458,6 @@ arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
} }
void
arena_extent_dalloc_large_finish(tsdn_t *tsdn, arena_t *arena,
extent_t *extent) {
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
extent_dalloc_cache(tsdn, arena, &extent_hooks, extent);
}
void void
arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t oldusize) { size_t oldusize) {
@ -663,34 +658,7 @@ arena_decay_time_valid(ssize_t decay_time) {
return false; return false;
} }
ssize_t static void
arena_decay_time_get(arena_t *arena) {
return arena_decay_time_read(arena);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return true;
}
malloc_mutex_lock(tsdn, &arena->decay.mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit(arena, decay_time);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->decay.mtx);
return false;
}
void
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) { arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_assert_owner(tsdn, &arena->decay.mtx); malloc_mutex_assert_owner(tsdn, &arena->decay.mtx);
@ -735,6 +703,33 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) {
} }
} }
ssize_t
arena_decay_time_get(arena_t *arena) {
return arena_decay_time_read(arena);
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return true;
}
malloc_mutex_lock(tsdn, &arena->decay.mtx);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
* the old backlog onto the new backlog, but there is no justification
* for such complexity since decay_time changes are intended to be
* infrequent, either between the {-1, 0, >0} states, or a one-time
* arbitrary change during initial arena configuration.
*/
arena_decay_reinit(arena, decay_time);
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->decay.mtx);
return false;
}
static size_t static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
size_t ndirty_limit, extent_list_t *purge_extents) { size_t ndirty_limit, extent_list_t *purge_extents) {
@ -846,7 +841,7 @@ arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
extent_dalloc_cache(tsdn, arena, &extent_hooks, slab); arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, slab);
} }
static void static void

View File

@ -319,7 +319,8 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
static void static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
arena_extent_dalloc_large_finish(tsdn, arena, extent); extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
arena_extent_cache_dalloc(tsdn, arena, &extent_hooks, extent);
} }
void void

View File

@ -348,10 +348,113 @@ TEST_BEGIN(test_decay_nonmonotonic) {
} }
TEST_END TEST_END
static unsigned
do_arena_create(ssize_t decay_time) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.decay_time", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&decay_time,
sizeof(decay_time)), 0, "Unexpected mallctlbymib() failure");
return arena_ind;
}
static void
do_arena_destroy(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
void
do_epoch(void) {
uint64_t epoch = 1;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
}
static size_t
get_arena_pdirty(unsigned arena_ind) {
do_epoch();
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[2] = (size_t)arena_ind;
size_t pdirty;
size_t sz = sizeof(pdirty);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
return pdirty;
}
static void *
do_mallocx(size_t size, int flags) {
void *p = mallocx(size, flags);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
return p;
}
static void
generate_dirty(unsigned arena_ind, size_t size) {
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
void *p = do_mallocx(size, flags);
dallocx(p, flags);
}
TEST_BEGIN(test_decay_now) {
unsigned arena_ind = do_arena_create(0);
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
/* Verify that dirty pages never linger after deallocation. */
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
size_t size = sizes[i];
generate_dirty(arena_ind, size);
assert_zu_eq(get_arena_pdirty(arena_ind), 0,
"Unexpected dirty pages");
}
do_arena_destroy(arena_ind);
}
TEST_END
TEST_BEGIN(test_decay_never) {
unsigned arena_ind = do_arena_create(-1);
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
void *ptrs[sizeof(sizes)/sizeof(size_t)];
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
ptrs[i] = do_mallocx(sizes[i], flags);
}
/* Verify that each deallocation generates additional dirty pages. */
size_t pdirty_prev = get_arena_pdirty(arena_ind);
assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
dallocx(ptrs[i], flags);
size_t pdirty = get_arena_pdirty(arena_ind);
assert_zu_gt(pdirty, pdirty_prev,
"Expected dirty pages to increase.");
pdirty_prev = pdirty;
}
do_arena_destroy(arena_ind);
}
TEST_END
int int
main(void) { main(void) {
return test( return test(
test_decay_ticks, test_decay_ticks,
test_decay_ticker, test_decay_ticker,
test_decay_nonmonotonic); test_decay_nonmonotonic,
test_decay_now,
test_decay_never);
} }