Disable coalescing of cached extents.

Extent splitting and coalescing is a major component of large allocation
overhead, and disabling coalescing of cached extents provides a simple
and effective hysteresis mechanism.  Once two-phase purging is
implemented, it will probably make sense to leave coalescing disabled
for the first phase, but coalesce during the second phase.
This commit is contained in:
Jason Evans 2017-02-12 23:18:57 -08:00
parent c1ebfaa673
commit 2dfc5b5aac
4 changed files with 43 additions and 24 deletions

View File

@ -21,7 +21,8 @@ size_t extent_size_quantize_ceil(size_t size);
ph_proto(, extent_heap_, extent_heap_t, extent_t) ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state); bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool try_coalesce);
extent_state_t extents_state_get(const extents_t *extents); extent_state_t extents_state_get(const extents_t *extents);
size_t extents_npages_get(extents_t *extents); size_t extents_npages_get(extents_t *extents);
extent_t *extents_evict(tsdn_t *tsdn, extents_t *extents, size_t npages_min); extent_t *extents_evict(tsdn_t *tsdn, extents_t *extents, size_t npages_min);

View File

@ -115,6 +115,9 @@ struct extents_s {
/* All stored extents must be in the same state. */ /* All stored extents must be in the same state. */
extent_state_t state; extent_state_t state;
/* If true, try to coalesce during extent deallocation. */
bool try_coalesce;
}; };
#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */

View File

@ -1718,11 +1718,12 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error; goto label_error;
} }
if (extents_init(tsdn, &arena->extents_cached, extent_state_dirty)) { if (extents_init(tsdn, &arena->extents_cached, extent_state_dirty,
false)) {
goto label_error; goto label_error;
} }
if (extents_init(tsdn, &arena->extents_retained, if (extents_init(tsdn, &arena->extents_retained,
extent_state_retained)) { extent_state_retained, true)) {
goto label_error; goto label_error;
} }

View File

@ -191,7 +191,8 @@ extent_size_quantize_t *extent_size_quantize_ceil =
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
bool bool
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state) { extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
bool try_coalesce) {
if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS)) { if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS)) {
return true; return true;
} }
@ -201,6 +202,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state) {
extent_list_init(&extents->lru); extent_list_init(&extents->lru);
extents->npages = 0; extents->npages = 0;
extents->state = state; extents->state = state;
extents->try_coalesce = try_coalesce;
return false; return false;
} }
@ -1058,26 +1060,10 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
return err; return err;
} }
static void static extent_t *
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extents_t *extents, extent_t *extent) { extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
rtree_ctx_t rtree_ctx_fallback; extent_t *extent) {
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert(extents_state_get(extents) != extent_state_dirty ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_usize_set(extent, 0);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
/* /*
* Continue attempting to coalesce until failure, to protect against * Continue attempting to coalesce until failure, to protect against
* races with other threads that are thwarted by this one. * races with other threads that are thwarted by this one.
@ -1125,6 +1111,34 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
} }
} while (coalesced); } while (coalesced);
return extent;
}
static void
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extents_t *extents, extent_t *extent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
assert(extents_state_get(extents) != extent_state_dirty ||
!extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_usize_set(extent, 0);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
}
assert(extent_lookup(tsdn, extent_base_get(extent), true) == extent);
if (extents->try_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent);
}
extent_deactivate_locked(tsdn, arena, extents, extent); extent_deactivate_locked(tsdn, arena, extents, extent);
malloc_mutex_unlock(tsdn, &extents->mtx); malloc_mutex_unlock(tsdn, &extents->mtx);