Convert rtree from per chunk to per page.
Refactor [de]registration to maintain interior rtree entries for slabs.
This commit is contained in:
parent
5c6be2bdd3
commit
47613afc34
@ -54,10 +54,10 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
|||||||
|
|
||||||
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero);
|
bool *zero, bool slab);
|
||||||
extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit);
|
bool *zero, bool *commit, bool slab);
|
||||||
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_t *extent);
|
chunk_hooks_t *chunk_hooks, extent_t *extent);
|
||||||
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
@ -146,7 +146,6 @@ JEMALLOC_INLINE bool
|
|||||||
extent_retained_get(const extent_t *extent)
|
extent_retained_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(!extent->e_slab);
|
|
||||||
return (qr_next(&extent->rd, rd_link) == &extent->rd);
|
return (qr_next(&extent->rd, rd_link) == &extent->rd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +160,6 @@ JEMALLOC_INLINE bool
|
|||||||
extent_committed_get(const extent_t *extent)
|
extent_committed_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(!extent->e_slab);
|
|
||||||
return (extent->e_committed);
|
return (extent->e_committed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
28
src/arena.c
28
src/arena.c
@ -223,13 +223,13 @@ arena_chunk_dirty_npages(const extent_t *extent)
|
|||||||
static extent_t *
|
static extent_t *
|
||||||
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero)
|
bool *zero, bool slab)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
|
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero));
|
alignment, zero, slab));
|
||||||
}
|
}
|
||||||
|
|
||||||
extent_t *
|
extent_t *
|
||||||
@ -241,7 +241,7 @@ arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
|||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
|
||||||
new_addr, size, alignment, zero);
|
new_addr, size, alignment, zero, false);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
return (extent);
|
return (extent);
|
||||||
@ -575,7 +575,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
|
||||||
chunksize, zero, commit);
|
chunksize, zero, commit, true);
|
||||||
if (extent != NULL && !*commit) {
|
if (extent != NULL && !*commit) {
|
||||||
/* Commit header. */
|
/* Commit header. */
|
||||||
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
|
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
|
||||||
@ -585,9 +585,6 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (extent != NULL)
|
|
||||||
extent_slab_set(extent, true);
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
return (extent);
|
return (extent);
|
||||||
@ -601,11 +598,9 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
|||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
chunksize, chunksize, zero);
|
chunksize, chunksize, zero, true);
|
||||||
if (extent != NULL) {
|
if (extent != NULL)
|
||||||
extent_slab_set(extent, true);
|
|
||||||
*commit = true;
|
*commit = true;
|
||||||
}
|
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = arena_chunk_alloc_internal_hard(tsdn, arena,
|
extent = arena_chunk_alloc_internal_hard(tsdn, arena,
|
||||||
&chunk_hooks, zero, commit);
|
&chunk_hooks, zero, commit);
|
||||||
@ -699,7 +694,6 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
|||||||
extent_committed_set(extent,
|
extent_committed_set(extent,
|
||||||
(arena_mapbits_decommitted_get((arena_chunk_t *)
|
(arena_mapbits_decommitted_get((arena_chunk_t *)
|
||||||
extent_addr_get(extent), map_bias) == 0));
|
extent_addr_get(extent), map_bias) == 0));
|
||||||
extent_slab_set(extent, false);
|
|
||||||
if (!extent_committed_get(extent)) {
|
if (!extent_committed_get(extent)) {
|
||||||
/*
|
/*
|
||||||
* Decommit the header. Mark the chunk as decommitted even if
|
* Decommit the header. Mark the chunk as decommitted even if
|
||||||
@ -828,7 +822,7 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
||||||
alignment, zero, &commit);
|
alignment, zero, &commit, false);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
@ -861,7 +855,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
arena_nactive_add(arena, usize >> LG_PAGE);
|
arena_nactive_add(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
csize, alignment, zero);
|
csize, alignment, zero, false);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||||
@ -1429,7 +1423,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
zero = false;
|
zero = false;
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
||||||
chunk_hooks, extent_addr_get(chunkselm),
|
chunk_hooks, extent_addr_get(chunkselm),
|
||||||
extent_size_get(chunkselm), chunksize, &zero);
|
extent_size_get(chunkselm), chunksize, &zero,
|
||||||
|
false);
|
||||||
assert(extent == chunkselm);
|
assert(extent == chunkselm);
|
||||||
assert(zero == extent_zeroed_get(chunkselm));
|
assert(zero == extent_zeroed_get(chunkselm));
|
||||||
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
@ -2561,9 +2556,8 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
zero);
|
zero);
|
||||||
} else if (likely(alignment <= chunksize))
|
} else if (likely(alignment <= chunksize))
|
||||||
ret = huge_malloc(tsdn, arena, usize, zero);
|
ret = huge_malloc(tsdn, arena, usize, zero);
|
||||||
else {
|
else
|
||||||
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
108
src/chunk.c
108
src/chunk.c
@ -168,10 +168,9 @@ extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
|
|||||||
return (true);
|
return (true);
|
||||||
assert(*r_elm_a != NULL);
|
assert(*r_elm_a != NULL);
|
||||||
|
|
||||||
if (extent_size_get(extent) > chunksize) {
|
if (extent_size_get(extent) > PAGE) {
|
||||||
uintptr_t last =
|
uintptr_t last =
|
||||||
(CHUNK_CEILING((uintptr_t)extent_past_get(extent) -
|
(CHUNK_CEILING((uintptr_t)extent_past_get(extent)) - PAGE);
|
||||||
chunksize));
|
|
||||||
|
|
||||||
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last,
|
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last,
|
||||||
dependent, init_missing);
|
dependent, init_missing);
|
||||||
@ -203,6 +202,20 @@ extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
|
|||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
|
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_interior_register(tsdn_t *tsdn, const extent_t *extent)
|
||||||
|
{
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
|
||||||
|
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
||||||
|
rtree_write(tsdn, &chunks_rtree,
|
||||||
|
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
|
||||||
|
LG_PAGE), extent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
@ -211,6 +224,8 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
|
if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
|
||||||
return (true);
|
return (true);
|
||||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
|
||||||
|
if (extent_slab_get(extent))
|
||||||
|
chunk_interior_register(tsdn, extent);
|
||||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
|
|
||||||
if (config_prof && opt_prof && extent_active_get(extent)) {
|
if (config_prof && opt_prof && extent_active_get(extent)) {
|
||||||
@ -232,6 +247,20 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_interior_deregister(tsdn_t *tsdn, const extent_t *extent)
|
||||||
|
{
|
||||||
|
size_t i;
|
||||||
|
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
|
||||||
|
for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
|
||||||
|
rtree_clear(tsdn, &chunks_rtree,
|
||||||
|
(uintptr_t)extent_addr_get(extent) + (uintptr_t)(i <<
|
||||||
|
LG_PAGE));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
@ -239,6 +268,8 @@ chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
|
|
||||||
extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
|
extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
|
||||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
|
||||||
|
if (extent_slab_get(extent))
|
||||||
|
chunk_interior_deregister(tsdn, extent);
|
||||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
|
|
||||||
if (config_prof && opt_prof && extent_active_get(extent)) {
|
if (config_prof && opt_prof && extent_active_get(extent)) {
|
||||||
@ -290,7 +321,7 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
|
|||||||
static extent_t *
|
static extent_t *
|
||||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
|
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
|
||||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
size_t size, size_t alignment, bool *zero, bool *commit, bool slab)
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
@ -374,6 +405,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
extent_active_set(extent, true);
|
extent_active_set(extent, true);
|
||||||
|
if (slab) {
|
||||||
|
extent_slab_set(extent, slab);
|
||||||
|
chunk_interior_register(tsdn, extent);
|
||||||
|
}
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
|
|
||||||
@ -431,7 +466,7 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
|
|
||||||
extent_t *
|
extent_t *
|
||||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool slab)
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool commit;
|
bool commit;
|
||||||
@ -443,7 +478,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
commit = true;
|
commit = true;
|
||||||
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
||||||
true, new_addr, size, alignment, zero, &commit);
|
true, new_addr, size, alignment, zero, &commit, slab);
|
||||||
if (extent == NULL)
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(commit);
|
assert(commit);
|
||||||
@ -484,7 +519,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
|
|
||||||
static extent_t *
|
static extent_t *
|
||||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||||
|
bool slab)
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
|
|
||||||
@ -494,7 +530,7 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
||||||
false, new_addr, size, alignment, zero, commit);
|
false, new_addr, size, alignment, zero, commit, slab);
|
||||||
|
|
||||||
if (config_stats && extent != NULL)
|
if (config_stats && extent != NULL)
|
||||||
arena->stats.retained -= size;
|
arena->stats.retained -= size;
|
||||||
@ -502,33 +538,44 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (extent);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static extent_t *
|
||||||
|
chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero, bool *commit, bool slab)
|
||||||
|
{
|
||||||
|
extent_t *extent;
|
||||||
|
void *addr;
|
||||||
|
|
||||||
|
extent = extent_alloc(tsdn, arena);
|
||||||
|
if (extent == NULL)
|
||||||
|
return (NULL);
|
||||||
|
addr = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
||||||
|
arena->ind);
|
||||||
|
if (addr == NULL)
|
||||||
|
return (NULL);
|
||||||
|
extent_init(extent, arena, addr, size, true, zero, commit, slab);
|
||||||
|
if (chunk_register(tsdn, extent)) {
|
||||||
|
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (extent);
|
||||||
|
}
|
||||||
|
|
||||||
extent_t *
|
extent_t *
|
||||||
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||||
|
bool slab)
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero, commit);
|
alignment, zero, commit, slab);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
void *chunk;
|
extent = chunk_alloc_wrapper_hard(tsdn, arena, chunk_hooks,
|
||||||
|
new_addr, size, alignment, zero, commit, slab);
|
||||||
extent = extent_alloc(tsdn, arena);
|
|
||||||
if (extent == NULL)
|
|
||||||
return (NULL);
|
|
||||||
chunk = chunk_hooks->alloc(new_addr, size, alignment,
|
|
||||||
zero, commit, arena->ind);
|
|
||||||
if (chunk == NULL)
|
|
||||||
return (NULL);
|
|
||||||
extent_init(extent, arena, chunk, size, true, zero, commit,
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chunk_register(tsdn, extent)) {
|
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
|
|
||||||
return (NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return (extent);
|
return (extent);
|
||||||
@ -593,7 +640,10 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((extent_size_get(extent) & chunksize_mask) == 0);
|
assert((extent_size_get(extent) & chunksize_mask) == 0);
|
||||||
extent_active_set(extent, false);
|
extent_active_set(extent, false);
|
||||||
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
||||||
extent_slab_set(extent, false);
|
if (extent_slab_get(extent)) {
|
||||||
|
chunk_interior_deregister(tsdn, extent);
|
||||||
|
extent_slab_set(extent, false);
|
||||||
|
}
|
||||||
|
|
||||||
assert(chunk_lookup(tsdn, extent_addr_get(extent), true) == extent);
|
assert(chunk_lookup(tsdn, extent_addr_get(extent), true) == extent);
|
||||||
extent_heaps_insert(extent_heaps, extent);
|
extent_heaps_insert(extent_heaps, extent);
|
||||||
@ -609,7 +659,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = rtree_read(tsdn, &chunks_rtree,
|
prev = rtree_read(tsdn, &chunks_rtree,
|
||||||
(uintptr_t)extent_addr_get(extent) - chunksize, false);
|
(uintptr_t)extent_addr_get(extent) - PAGE, false);
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
|
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
|
||||||
extent_heaps, cache);
|
extent_heaps, cache);
|
||||||
@ -914,7 +964,7 @@ chunk_boot(void)
|
|||||||
if (have_dss && chunk_dss_boot())
|
if (have_dss && chunk_dss_boot())
|
||||||
return (true);
|
return (true);
|
||||||
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||||
opt_lg_chunk)))
|
LG_PAGE)))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
|
@ -216,8 +216,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
|
cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
|
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit)) ==
|
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit, false))
|
||||||
NULL)
|
== NULL)
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user