Convert rtree from per chunk to per page.

Refactor [de]registration to maintain interior rtree entries for slabs.
This commit is contained in:
Jason Evans 2016-05-24 18:22:10 -07:00
parent 5c6be2bdd3
commit 47613afc34
5 changed files with 94 additions and 52 deletions

View File

@ -54,10 +54,10 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero);
bool *zero, bool slab);
extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
bool *zero, bool *commit, bool slab);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *extent);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,

View File

@ -146,7 +146,6 @@ JEMALLOC_INLINE bool
extent_retained_get(const extent_t *extent)
{
assert(!extent->e_slab);
return (qr_next(&extent->rd, rd_link) == &extent->rd);
}
@ -161,7 +160,6 @@ JEMALLOC_INLINE bool
extent_committed_get(const extent_t *extent)
{
assert(!extent->e_slab);
return (extent->e_committed);
}

View File

@ -223,13 +223,13 @@ arena_chunk_dirty_npages(const extent_t *extent)
static extent_t *
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero)
bool *zero, bool slab)
{
malloc_mutex_assert_owner(tsdn, &arena->lock);
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero));
alignment, zero, slab));
}
extent_t *
@ -241,7 +241,7 @@ arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_lock(tsdn, &arena->lock);
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
new_addr, size, alignment, zero);
new_addr, size, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
return (extent);
@ -575,7 +575,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_unlock(tsdn, &arena->lock);
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
chunksize, zero, commit);
chunksize, zero, commit, true);
if (extent != NULL && !*commit) {
/* Commit header. */
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
@ -585,9 +585,6 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
}
}
if (extent != NULL)
extent_slab_set(extent, true);
malloc_mutex_lock(tsdn, &arena->lock);
return (extent);
@ -601,11 +598,9 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
chunksize, chunksize, zero);
if (extent != NULL) {
extent_slab_set(extent, true);
chunksize, chunksize, zero, true);
if (extent != NULL)
*commit = true;
}
if (extent == NULL) {
extent = arena_chunk_alloc_internal_hard(tsdn, arena,
&chunk_hooks, zero, commit);
@ -699,7 +694,6 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
extent_committed_set(extent,
(arena_mapbits_decommitted_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == 0));
extent_slab_set(extent, false);
if (!extent_committed_get(extent)) {
/*
* Decommit the header. Mark the chunk as decommitted even if
@ -828,7 +822,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
bool commit = true;
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
alignment, zero, &commit);
alignment, zero, &commit, false);
if (extent == NULL) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
@ -861,7 +855,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
arena_nactive_add(arena, usize >> LG_PAGE);
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
csize, alignment, zero);
csize, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
if (extent == NULL) {
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
@ -1429,7 +1423,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
zero = false;
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
chunk_hooks, extent_addr_get(chunkselm),
extent_size_get(chunkselm), chunksize, &zero);
extent_size_get(chunkselm), chunksize, &zero,
false);
assert(extent == chunkselm);
assert(zero == extent_zeroed_get(chunkselm));
extent_dirty_insert(chunkselm, purge_runs_sentinel,
@ -2561,10 +2556,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsdn, arena, usize, zero);
else {
else
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
}
}
return (ret);
}

View File

@ -168,10 +168,9 @@ extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
return (true);
assert(*r_elm_a != NULL);
if (extent_size_get(extent) > chunksize) {
if (extent_size_get(extent) > PAGE) {
uintptr_t last =
(CHUNK_CEILING((uintptr_t)extent_past_get(extent) -
chunksize));
(CHUNK_CEILING((uintptr_t)extent_past_get(extent)) - PAGE);
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last,
dependent, init_missing);
@ -203,6 +202,20 @@ extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
}
static void
chunk_interior_register(tsdn_t *tsdn, const extent_t *extent)
{
size_t i;
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
LG_PAGE), extent);
}
}
static bool
chunk_register(tsdn_t *tsdn, const extent_t *extent)
{
@ -211,6 +224,8 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
return (true);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
if (extent_slab_get(extent))
chunk_interior_register(tsdn, extent);
extent_rtree_release(tsdn, elm_a, elm_b);
if (config_prof && opt_prof && extent_active_get(extent)) {
@ -232,6 +247,20 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
return (false);
}
static void
chunk_interior_deregister(tsdn_t *tsdn, const extent_t *extent)
{
size_t i;
assert(extent_slab_get(extent));
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
rtree_clear(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
LG_PAGE));
}
}
static void
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
{
@ -239,6 +268,8 @@ chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
if (extent_slab_get(extent))
chunk_interior_deregister(tsdn, extent);
extent_rtree_release(tsdn, elm_a, elm_b);
if (config_prof && opt_prof && extent_active_get(extent)) {
@ -290,7 +321,7 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
static extent_t *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit)
size_t size, size_t alignment, bool *zero, bool *commit, bool slab)
{
extent_t *extent;
size_t alloc_size, leadsize, trailsize;
@ -374,6 +405,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
extent_active_set(extent, true);
if (slab) {
extent_slab_set(extent, slab);
chunk_interior_register(tsdn, extent);
}
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@ -431,7 +466,7 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_t *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero)
void *new_addr, size_t size, size_t alignment, bool *zero, bool slab)
{
extent_t *extent;
bool commit;
@ -443,7 +478,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = true;
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
true, new_addr, size, alignment, zero, &commit);
true, new_addr, size, alignment, zero, &commit, slab);
if (extent == NULL)
return (NULL);
assert(commit);
@ -484,7 +519,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
static extent_t *
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool slab)
{
extent_t *extent;
@ -494,7 +530,7 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((alignment & chunksize_mask) == 0);
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
false, new_addr, size, alignment, zero, commit);
false, new_addr, size, alignment, zero, commit, slab);
if (config_stats && extent != NULL)
arena->stats.retained -= size;
@ -502,33 +538,44 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (extent);
}
static extent_t *
chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit, bool slab)
{
extent_t *extent;
void *addr;
extent = extent_alloc(tsdn, arena);
if (extent == NULL)
return (NULL);
addr = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
arena->ind);
if (addr == NULL)
return (NULL);
extent_init(extent, arena, addr, size, true, zero, commit, slab);
if (chunk_register(tsdn, extent)) {
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
return (NULL);
}
return (extent);
}
extent_t *
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool slab)
{
extent_t *extent;
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
alignment, zero, commit, slab);
if (extent == NULL) {
void *chunk;
extent = extent_alloc(tsdn, arena);
if (extent == NULL)
return (NULL);
chunk = chunk_hooks->alloc(new_addr, size, alignment,
zero, commit, arena->ind);
if (chunk == NULL)
return (NULL);
extent_init(extent, arena, chunk, size, true, zero, commit,
false);
}
if (chunk_register(tsdn, extent)) {
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
return (NULL);
extent = chunk_alloc_wrapper_hard(tsdn, arena, chunk_hooks,
new_addr, size, alignment, zero, commit, slab);
}
return (extent);
@ -593,7 +640,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert((extent_size_get(extent) & chunksize_mask) == 0);
extent_active_set(extent, false);
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
if (extent_slab_get(extent)) {
chunk_interior_deregister(tsdn, extent);
extent_slab_set(extent, false);
}
assert(chunk_lookup(tsdn, extent_addr_get(extent), true) == extent);
extent_heaps_insert(extent_heaps, extent);
@ -609,7 +659,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Try to coalesce backward. */
prev = rtree_read(tsdn, &chunks_rtree,
(uintptr_t)extent_addr_get(extent) - chunksize, false);
(uintptr_t)extent_addr_get(extent) - PAGE, false);
if (prev != NULL) {
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
extent_heaps, cache);
@ -914,7 +964,7 @@ chunk_boot(void)
if (have_dss && chunk_dss_boot())
return (true);
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk)))
LG_PAGE)))
return (true);
return (false);

View File

@ -216,8 +216,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
bool commit = true;
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit)) ==
NULL)
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit, false))
== NULL)
return (true);
}