Protect extents_dirty access with extents_mtx.

This fixes race conditions during purging.
This commit is contained in:
Jason Evans 2016-09-22 11:57:28 -07:00
parent bc49157d21
commit f6d01ff4b7
5 changed files with 112 additions and 58 deletions

View File

@ -176,12 +176,6 @@ struct arena_s {
*/
size_t ndirty;
/*
* Ring sentinel used to track unused dirty memory. Dirty memory is
* managed as an LRU of cached extents.
*/
extent_t extents_dirty;
/*
* Approximate time in seconds from the creation of a set of unused
* dirty pages until an equivalent set of unused dirty pages is purged
@ -240,7 +234,12 @@ struct arena_s {
*/
extent_heap_t extents_cached[NPSIZES];
extent_heap_t extents_retained[NPSIZES];
/* Protects extents_cached and extents_retained. */
/*
* Ring sentinel used to track unused dirty memory. Dirty memory is
* managed as an LRU of cached extents.
*/
extent_t extents_dirty;
/* Protects extents_{cached,retained,dirty}. */
malloc_mutex_t extents_mtx;
/* User-configurable extent hook functions. */
@ -287,10 +286,10 @@ extent_t *arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
size_t alignment, bool *zero);
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent);
void arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
void arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
void arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, bool cache);
void arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, bool cache);
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_extent_dalloc_large(tsdn_t *tsdn, arena_t *arena,

View File

@ -99,6 +99,9 @@ size_t extent_size_quantize_ceil(size_t size);
ph_proto(, extent_heap_, extent_heap_t, extent_t)
extent_t *extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab);
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab);

View File

@ -145,6 +145,7 @@ extent_addr_randomize
extent_addr_set
extent_alloc
extent_alloc_cache
extent_alloc_cache_locked
extent_alloc_dss
extent_alloc_mmap
extent_alloc_wrapper

View File

@ -101,9 +101,12 @@ arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
}
void
arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
bool cache)
{
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
if (cache) {
extent_ring_insert(&arena->extents_dirty, extent);
arena->ndirty += arena_extent_dirty_npages(extent);
@ -111,9 +114,12 @@ arena_extent_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
}
void
arena_extent_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
bool dirty)
{
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
if (dirty) {
extent_ring_remove(extent);
assert(arena->ndirty >= arena_extent_dirty_npages(extent));
@ -727,6 +733,8 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
extent_t *extent;
size_t ndirty = 0;
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
&arena->extents_dirty; extent = qr_next(extent, qr_link))
ndirty += extent_size_get(extent) >> LG_PAGE;
@ -741,6 +749,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_t *extent, *next;
size_t nstashed = 0;
malloc_mutex_lock(tsdn, &arena->extents_mtx);
/* Stash extents according to ndirty_limit. */
for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
&arena->extents_dirty; extent = next) {
@ -756,9 +766,9 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
next = qr_next(extent, qr_link);
/* Allocate. */
zero = false;
textent = arena_extent_cache_alloc_locked(tsdn, arena,
r_extent_hooks, extent_base_get(extent),
extent_size_get(extent), 0, CACHELINE, &zero, false);
textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
CACHELINE, &zero, false);
assert(textent == extent);
assert(zero == extent_zeroed_get(extent));
extent_ring_remove(extent);
@ -770,6 +780,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
break;
}
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (nstashed);
}
@ -1788,9 +1799,6 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
false, false);
if (opt_purge == purge_mode_decay)
arena_decay_init(arena, arena_decay_time_default_get());
@ -1804,12 +1812,15 @@ arena_new(tsdn_t *tsdn, unsigned ind)
extent_heap_new(&arena->extents_retained[i]);
}
arena->extent_hooks = (extent_hooks_t *)&extent_hooks_default;
extent_init(&arena->extents_dirty, arena, NULL, 0, 0, false, false,
false, false);
if (malloc_mutex_init(&arena->extents_mtx, "arena_extents",
WITNESS_RANK_ARENA_EXTENTS))
return (NULL);
arena->extent_hooks = (extent_hooks_t *)&extent_hooks_default;
ql_new(&arena->extent_cache);
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
WITNESS_RANK_ARENA_EXTENT_CACHE))

View File

@ -191,18 +191,26 @@ extent_ad_comp(const extent_t *a, const extent_t *b)
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
static void
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
extent_heap_insert(&extent_heaps[pind], extent);
}
static void
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
extent_heap_remove(&extent_heaps[pind], extent);
}
@ -381,9 +389,9 @@ extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
static extent_t *
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
size_t usize, size_t pad, size_t alignment, bool *zero, bool *commit,
bool slab)
extent_heap_t extent_heaps[NPSIZES], bool locked, bool cache,
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
bool *commit, bool slab)
{
extent_t *extent;
rtree_ctx_t rtree_ctx_fallback;
@ -398,6 +406,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
/* Beware size_t wrap-around. */
if (alloc_size < usize)
return (NULL);
if (!locked)
malloc_mutex_lock(tsdn, &arena->extents_mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
if (new_addr != NULL) {
@ -419,11 +428,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent = extent_first_best_fit(arena, extent_heaps, alloc_size);
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
size)) {
if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (NULL);
}
extent_heaps_remove(extent_heaps, extent);
arena_extent_cache_maybe_remove(arena, extent, cache);
extent_heaps_remove(tsdn, extent_heaps, extent);
arena_extent_cache_maybe_remove(tsdn, arena, extent, cache);
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
@ -444,11 +454,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (extent == NULL) {
extent_deregister(tsdn, lead);
extent_leak(tsdn, arena, r_extent_hooks, cache, lead);
if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (NULL);
}
extent_heaps_insert(extent_heaps, lead);
arena_extent_cache_maybe_insert(arena, lead, cache);
extent_heaps_insert(tsdn, extent_heaps, lead);
arena_extent_cache_maybe_insert(tsdn, arena, lead, cache);
}
/* Split the trail. */
@ -459,11 +470,12 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_deregister(tsdn, extent);
extent_leak(tsdn, arena, r_extent_hooks, cache,
extent);
if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (NULL);
}
extent_heaps_insert(extent_heaps, trail);
arena_extent_cache_maybe_insert(arena, trail, cache);
extent_heaps_insert(tsdn, extent_heaps, trail);
arena_extent_cache_maybe_insert(tsdn, arena, trail, cache);
} else if (leadsize == 0) {
/*
* Splitting causes usize to be set as a side effect, but no
@ -474,6 +486,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!extent_committed_get(extent) && extent_commit_wrapper(tsdn, arena,
r_extent_hooks, extent, 0, extent_size_get(extent))) {
if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache,
extent);
@ -488,6 +501,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_interior_register(tsdn, rtree_ctx, extent);
}
if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
if (*zero) {
@ -540,27 +554,51 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return (NULL);
}
extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab)
static extent_t *
extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize,
size_t pad, size_t alignment, bool *zero, bool slab)
{
extent_t *extent;
bool commit;
assert(usize + pad != 0);
assert(alignment != 0);
if (locked)
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
commit = true;
extent = extent_recycle(tsdn, arena, r_extent_hooks,
arena->extents_cached, true, new_addr, usize, pad, alignment, zero,
&commit, slab);
arena->extents_cached, locked, true, new_addr, usize, pad,
alignment, zero, &commit, slab);
if (extent == NULL)
return (NULL);
assert(commit);
return (extent);
}
extent_t *
extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab)
{
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
new_addr, usize, pad, alignment, zero, slab));
}
extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab)
{
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
new_addr, usize, pad, alignment, zero, slab));
}
static void *
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
@ -607,8 +645,8 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
assert(alignment != 0);
extent = extent_recycle(tsdn, arena, r_extent_hooks,
arena->extents_retained, false, new_addr, usize, pad, alignment,
zero, commit, slab);
arena->extents_retained, false, false, new_addr, usize, pad,
alignment, zero, commit, slab);
if (extent != NULL && config_stats) {
size_t size = usize + pad;
arena->stats.retained -= size;
@ -697,22 +735,24 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
if (!extent_can_coalesce(a, b))
return;
extent_heaps_remove(extent_heaps, a);
extent_heaps_remove(extent_heaps, b);
extent_heaps_remove(tsdn, extent_heaps, a);
extent_heaps_remove(tsdn, extent_heaps, b);
arena_extent_cache_maybe_remove(extent_arena_get(a), a, cache);
arena_extent_cache_maybe_remove(extent_arena_get(b), b, cache);
arena_extent_cache_maybe_remove(tsdn, extent_arena_get(a), a, cache);
arena_extent_cache_maybe_remove(tsdn, extent_arena_get(b), b, cache);
if (extent_merge_wrapper(tsdn, arena, r_extent_hooks, a, b)) {
extent_heaps_insert(extent_heaps, a);
extent_heaps_insert(extent_heaps, b);
arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
arena_extent_cache_maybe_insert(extent_arena_get(b), b, cache);
extent_heaps_insert(tsdn, extent_heaps, a);
extent_heaps_insert(tsdn, extent_heaps, b);
arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a,
cache);
arena_extent_cache_maybe_insert(tsdn, extent_arena_get(b), b,
cache);
return;
}
extent_heaps_insert(extent_heaps, a);
arena_extent_cache_maybe_insert(extent_arena_get(a), a, cache);
extent_heaps_insert(tsdn, extent_heaps, a);
arena_extent_cache_maybe_insert(tsdn, extent_arena_get(a), a, cache);
}
static void
@ -737,8 +777,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
}
assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
extent_heaps_insert(extent_heaps, extent);
arena_extent_cache_maybe_insert(arena, extent, cache);
extent_heaps_insert(tsdn, extent_heaps, extent);
arena_extent_cache_maybe_insert(tsdn, arena, extent, cache);
/* Try to coalesce forward. */
next = rtree_read(tsdn, &extents_rtree, rtree_ctx,