Use rtree rather than [sz]ad trees for chunk split/coalesce operations.
This commit is contained in:
parent
25845db7c9
commit
ffa45a5331
@ -418,16 +418,11 @@ struct arena_s {
|
|||||||
malloc_mutex_t huge_mtx;
|
malloc_mutex_t huge_mtx;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Trees of chunks that were previously allocated (trees differ only in
|
* Trees of chunks that were previously allocated. These are used when
|
||||||
* extent ordering). These are used when allocating chunks, in an
|
* allocating chunks, in an attempt to re-use address space.
|
||||||
* attempt to re-use address space. Depending on function, different
|
|
||||||
* tree orderings are needed, which is why there are two trees with the
|
|
||||||
* same contents.
|
|
||||||
*/
|
*/
|
||||||
extent_tree_t chunks_szad_cached;
|
extent_tree_t chunks_szad_cached;
|
||||||
extent_tree_t chunks_ad_cached;
|
|
||||||
extent_tree_t chunks_szad_retained;
|
extent_tree_t chunks_szad_retained;
|
||||||
extent_tree_t chunks_ad_retained;
|
|
||||||
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
/* Cache of extent structures that were allocated via base_alloc(). */
|
/* Cache of extent structures that were allocated via base_alloc(). */
|
||||||
|
@ -55,9 +55,6 @@ struct extent_s {
|
|||||||
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
/* Linkage for arena's achunks, huge, and node_cache lists. */
|
||||||
ql_elm(extent_t) ql_link;
|
ql_elm(extent_t) ql_link;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
|
||||||
rb_node(extent_t) ad_link;
|
|
||||||
};
|
};
|
||||||
typedef rb_tree(extent_t) extent_tree_t;
|
typedef rb_tree(extent_t) extent_tree_t;
|
||||||
|
|
||||||
@ -67,8 +64,6 @@ typedef rb_tree(extent_t) extent_tree_t;
|
|||||||
|
|
||||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
|
rb_proto(, extent_tree_szad_, extent_tree_t, extent_t)
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_t)
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
@ -79,6 +74,7 @@ void *extent_addr_get(const extent_t *extent);
|
|||||||
size_t extent_size_get(const extent_t *extent);
|
size_t extent_size_get(const extent_t *extent);
|
||||||
void *extent_past_get(const extent_t *extent);
|
void *extent_past_get(const extent_t *extent);
|
||||||
bool extent_active_get(const extent_t *extent);
|
bool extent_active_get(const extent_t *extent);
|
||||||
|
bool extent_retained_get(const extent_t *extent);
|
||||||
bool extent_zeroed_get(const extent_t *extent);
|
bool extent_zeroed_get(const extent_t *extent);
|
||||||
bool extent_committed_get(const extent_t *extent);
|
bool extent_committed_get(const extent_t *extent);
|
||||||
bool extent_slab_get(const extent_t *extent);
|
bool extent_slab_get(const extent_t *extent);
|
||||||
@ -134,6 +130,14 @@ extent_active_get(const extent_t *extent)
|
|||||||
return (extent->e_active);
|
return (extent->e_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE bool
|
||||||
|
extent_retained_get(const extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(!extent->e_slab);
|
||||||
|
return (qr_next(&extent->rd, rd_link) == &extent->rd);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
extent_zeroed_get(const extent_t *extent)
|
extent_zeroed_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
@ -219,29 +219,11 @@ extent_init
|
|||||||
extent_past_get
|
extent_past_get
|
||||||
extent_prof_tctx_get
|
extent_prof_tctx_get
|
||||||
extent_prof_tctx_set
|
extent_prof_tctx_set
|
||||||
|
extent_retained_get
|
||||||
extent_size_get
|
extent_size_get
|
||||||
extent_size_set
|
extent_size_set
|
||||||
extent_slab_get
|
extent_slab_get
|
||||||
extent_slab_set
|
extent_slab_set
|
||||||
extent_tree_ad_destroy
|
|
||||||
extent_tree_ad_destroy_recurse
|
|
||||||
extent_tree_ad_empty
|
|
||||||
extent_tree_ad_first
|
|
||||||
extent_tree_ad_insert
|
|
||||||
extent_tree_ad_iter
|
|
||||||
extent_tree_ad_iter_recurse
|
|
||||||
extent_tree_ad_iter_start
|
|
||||||
extent_tree_ad_last
|
|
||||||
extent_tree_ad_new
|
|
||||||
extent_tree_ad_next
|
|
||||||
extent_tree_ad_nsearch
|
|
||||||
extent_tree_ad_prev
|
|
||||||
extent_tree_ad_psearch
|
|
||||||
extent_tree_ad_remove
|
|
||||||
extent_tree_ad_reverse_iter
|
|
||||||
extent_tree_ad_reverse_iter_recurse
|
|
||||||
extent_tree_ad_reverse_iter_start
|
|
||||||
extent_tree_ad_search
|
|
||||||
extent_tree_szad_destroy
|
extent_tree_szad_destroy
|
||||||
extent_tree_szad_destroy_recurse
|
extent_tree_szad_destroy_recurse
|
||||||
extent_tree_szad_empty
|
extent_tree_szad_empty
|
||||||
|
@ -3427,9 +3427,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_cached);
|
extent_tree_szad_new(&arena->chunks_szad_cached);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_cached);
|
|
||||||
extent_tree_szad_new(&arena->chunks_szad_retained);
|
extent_tree_szad_new(&arena->chunks_szad_retained);
|
||||||
extent_tree_ad_new(&arena->chunks_ad_retained);
|
|
||||||
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
|
||||||
WITNESS_RANK_ARENA_CHUNKS))
|
WITNESS_RANK_ARENA_CHUNKS))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
375
src/chunk.c
375
src/chunk.c
@ -50,9 +50,8 @@ const chunk_hooks_t chunk_hooks_default = {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
|
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, bool cache,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
|
void *chunk, size_t size, bool zeroed, bool committed);
|
||||||
bool committed);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -140,39 +139,65 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
|
|||||||
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
|
chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
|
||||||
|
bool init_missing, rtree_elm_t **r_elm_a, rtree_elm_t **r_elm_b)
|
||||||
|
{
|
||||||
|
|
||||||
|
*r_elm_a = rtree_elm_acquire(tsdn, &chunks_rtree,
|
||||||
|
(uintptr_t)extent_addr_get(extent), dependent, init_missing);
|
||||||
|
if (!dependent && *r_elm_a == NULL)
|
||||||
|
return (true);
|
||||||
|
assert(*r_elm_a != NULL);
|
||||||
|
|
||||||
|
if (extent_size_get(extent) > chunksize) {
|
||||||
|
uintptr_t last =
|
||||||
|
(CHUNK_CEILING((uintptr_t)extent_past_get(extent) -
|
||||||
|
chunksize));
|
||||||
|
|
||||||
|
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last,
|
||||||
|
dependent, init_missing);
|
||||||
|
if (!dependent && *r_elm_b == NULL)
|
||||||
|
return (true);
|
||||||
|
assert(*r_elm_b != NULL);
|
||||||
|
} else
|
||||||
|
*r_elm_b = NULL;
|
||||||
|
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_elm_t *elm_a,
|
||||||
|
rtree_elm_t *elm_b, const extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
|
||||||
|
if (elm_b != NULL)
|
||||||
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_b, extent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
|
||||||
|
{
|
||||||
|
|
||||||
|
rtree_elm_release(tsdn, &chunks_rtree, elm_a);
|
||||||
|
if (elm_b != NULL)
|
||||||
|
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
const void *addr;
|
rtree_elm_t *elm_a, *elm_b;
|
||||||
size_t size;
|
|
||||||
rtree_elm_t *elm_a;
|
|
||||||
|
|
||||||
addr = extent_addr_get(extent);
|
if (extent_rtree_acquire(tsdn, extent, false, true, &elm_a, &elm_b))
|
||||||
size = extent_size_get(extent);
|
|
||||||
|
|
||||||
if ((elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr,
|
|
||||||
false, true)) == NULL)
|
|
||||||
return (true);
|
return (true);
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, extent);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
|
||||||
if (size > chunksize) {
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
uintptr_t last = ((uintptr_t)addr +
|
|
||||||
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
|
||||||
rtree_elm_t *elm_b;
|
|
||||||
|
|
||||||
if ((elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last, false,
|
|
||||||
true)) == NULL) {
|
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a,
|
|
||||||
NULL);
|
|
||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_a);
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_b, extent);
|
|
||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
|
|
||||||
}
|
|
||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_a);
|
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
size_t nadd = (extent_size_get(extent) == 0) ? 1 :
|
||||||
|
extent_size_get(extent) / chunksize;
|
||||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||||
size_t high = atomic_read_z(&highchunks);
|
size_t high = atomic_read_z(&highchunks);
|
||||||
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||||
@ -192,29 +217,15 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
void
|
void
|
||||||
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
const void *addr;
|
rtree_elm_t *elm_a, *elm_b;
|
||||||
size_t size;
|
|
||||||
rtree_elm_t *elm_a;
|
|
||||||
|
|
||||||
addr = extent_addr_get(extent);
|
extent_rtree_acquire(tsdn, extent, true, false, &elm_a, &elm_b);
|
||||||
size = extent_size_get(extent);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
|
||||||
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
elm_a = rtree_elm_acquire(tsdn, &chunks_rtree, (uintptr_t)addr, true,
|
|
||||||
false);
|
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_a, NULL);
|
|
||||||
if (size > chunksize) {
|
|
||||||
uintptr_t last = ((uintptr_t)addr +
|
|
||||||
(uintptr_t)(CHUNK_CEILING(size - chunksize)));
|
|
||||||
rtree_elm_t *elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
|
|
||||||
last, true, false);
|
|
||||||
|
|
||||||
rtree_elm_write_acquired(tsdn, &chunks_rtree, elm_b, NULL);
|
|
||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
|
|
||||||
}
|
|
||||||
rtree_elm_release(tsdn, &chunks_rtree, elm_a);
|
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
size_t nsub = (extent_size_get(extent) == 0) ? 1 :
|
||||||
|
extent_size_get(extent) / chunksize;
|
||||||
assert(atomic_read_z(&curchunks) >= nsub);
|
assert(atomic_read_z(&curchunks) >= nsub);
|
||||||
atomic_sub_z(&curchunks, nsub);
|
atomic_sub_z(&curchunks, nsub);
|
||||||
}
|
}
|
||||||
@ -234,8 +245,7 @@ chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
* fits.
|
* fits.
|
||||||
*/
|
*/
|
||||||
static extent_t *
|
static extent_t *
|
||||||
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, size_t size)
|
||||||
extent_tree_t *chunks_ad, size_t size)
|
|
||||||
{
|
{
|
||||||
extent_t key;
|
extent_t key;
|
||||||
|
|
||||||
@ -245,11 +255,25 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
|
||||||
|
void *addr, size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Leak chunk after making sure its pages have already been purged, so
|
||||||
|
* that this is only a virtual memory leak.
|
||||||
|
*/
|
||||||
|
if (cache) {
|
||||||
|
chunk_purge_wrapper(tsdn, arena, chunk_hooks, addr, size, 0,
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szad, bool cache, void *new_addr, size_t size,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
|
||||||
bool dalloc_extent)
|
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
@ -271,14 +295,21 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
extent_t key;
|
rtree_elm_t *elm;
|
||||||
extent_init(&key, arena, new_addr, alloc_size, false, false,
|
|
||||||
false, false);
|
elm = rtree_elm_acquire(tsdn, &chunks_rtree,
|
||||||
extent = extent_tree_ad_search(chunks_ad, &key);
|
(uintptr_t)new_addr, false, false);
|
||||||
} else {
|
if (elm != NULL) {
|
||||||
extent = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
extent = rtree_elm_read_acquired(tsdn, &chunks_rtree,
|
||||||
alloc_size);
|
elm);
|
||||||
}
|
if (extent != NULL && (extent_active_get(extent) ||
|
||||||
|
extent_retained_get(extent) == cache))
|
||||||
|
extent = NULL;
|
||||||
|
rtree_elm_release(tsdn, &chunks_rtree, elm);
|
||||||
|
} else
|
||||||
|
extent = NULL;
|
||||||
|
} else
|
||||||
|
extent = chunk_first_best_fit(arena, chunks_szad, alloc_size);
|
||||||
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
|
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
|
||||||
size)) {
|
size)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -304,15 +335,20 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Remove extent from the tree. */
|
/* Remove extent from the tree. */
|
||||||
|
chunk_deregister(tsdn, extent);
|
||||||
extent_tree_szad_remove(chunks_szad, extent);
|
extent_tree_szad_remove(chunks_szad, extent);
|
||||||
extent_tree_ad_remove(chunks_ad, extent);
|
|
||||||
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
/* Insert the leading space as a smaller chunk. */
|
/* Insert the leading space as a smaller chunk. */
|
||||||
extent_size_set(extent, leadsize);
|
extent_size_set(extent, leadsize);
|
||||||
extent_tree_szad_insert(chunks_szad, extent);
|
if (chunk_register(tsdn, extent)) {
|
||||||
extent_tree_ad_insert(chunks_ad, extent);
|
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
extent_addr_get(extent), extent_size_get(extent));
|
||||||
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
|
} else {
|
||||||
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
|
}
|
||||||
extent = NULL;
|
extent = NULL;
|
||||||
}
|
}
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
@ -323,8 +359,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
arena_extent_dalloc(tsdn, arena, extent);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
|
||||||
chunks_ad, cache, ret, size + trailsize, zeroed,
|
cache, ret, size + trailsize, zeroed, committed);
|
||||||
committed);
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
@ -333,22 +368,27 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks,
|
chunk_record(tsdn, arena, chunk_hooks,
|
||||||
chunks_szad, chunks_ad, cache, ret, size +
|
chunks_szad, cache, ret, size + trailsize,
|
||||||
trailsize, zeroed, committed);
|
zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
|
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
|
||||||
trailsize, false, zeroed, committed, false);
|
trailsize, false, zeroed, committed, false);
|
||||||
extent_tree_szad_insert(chunks_szad, extent);
|
if (chunk_register(tsdn, extent)) {
|
||||||
extent_tree_ad_insert(chunks_ad, extent);
|
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
extent_addr_get(extent), extent_size_get(extent));
|
||||||
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
|
} else {
|
||||||
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
|
}
|
||||||
extent = NULL;
|
extent = NULL;
|
||||||
}
|
}
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szad, cache, ret,
|
||||||
cache, ret, size, zeroed, committed);
|
size, zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
@ -441,8 +481,8 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
commit = true;
|
commit = true;
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
&arena->chunks_szad_cached, true, new_addr, size, alignment, zero,
|
||||||
new_addr, size, alignment, zero, &commit, dalloc_extent);
|
&commit, dalloc_extent);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(commit);
|
assert(commit);
|
||||||
@ -493,8 +533,8 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
&arena->chunks_szad_retained, false, new_addr, size, alignment,
|
||||||
new_addr, size, alignment, zero, commit, true);
|
zero, commit, true);
|
||||||
|
|
||||||
if (config_stats && ret != NULL)
|
if (config_stats && ret != NULL)
|
||||||
arena->stats.retained -= size;
|
arena->stats.retained -= size;
|
||||||
@ -522,89 +562,118 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_can_coalesce(const extent_t *a, const extent_t *b)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert((void *)CHUNK_CEILING((uintptr_t)extent_past_get(a)) ==
|
||||||
|
extent_addr_get(b));
|
||||||
|
|
||||||
|
if (extent_arena_get(a) != extent_arena_get(b))
|
||||||
|
return (false);
|
||||||
|
if (extent_active_get(a) != extent_active_get(b))
|
||||||
|
return (false);
|
||||||
|
if (extent_committed_get(a) != extent_committed_get(b))
|
||||||
|
return (false);
|
||||||
|
if (extent_retained_get(a) != extent_retained_get(b))
|
||||||
|
return (false);
|
||||||
|
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
|
extent_t *a, extent_t *b, extent_tree_t *chunks_szad, bool cache)
|
||||||
|
{
|
||||||
|
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
||||||
|
|
||||||
|
if (!chunk_can_coalesce(a, b))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
|
||||||
|
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
|
||||||
|
arena->ind))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The rtree writes must happen while all the relevant elements are
|
||||||
|
* owned, so the following code uses decomposed helper functions rather
|
||||||
|
* than chunk_{,de}register() to do things in the right order.
|
||||||
|
*/
|
||||||
|
extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
|
||||||
|
extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
|
||||||
|
|
||||||
|
if (a_elm_b != NULL) {
|
||||||
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, a_elm_b, NULL);
|
||||||
|
rtree_elm_release(tsdn, &chunks_rtree, a_elm_b);
|
||||||
|
}
|
||||||
|
if (b_elm_b != NULL) {
|
||||||
|
rtree_elm_write_acquired(tsdn, &chunks_rtree, b_elm_a, NULL);
|
||||||
|
rtree_elm_release(tsdn, &chunks_rtree, b_elm_a);
|
||||||
|
} else
|
||||||
|
b_elm_b = b_elm_a;
|
||||||
|
|
||||||
|
extent_tree_szad_remove(chunks_szad, a);
|
||||||
|
extent_tree_szad_remove(chunks_szad, b);
|
||||||
|
|
||||||
|
arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
|
||||||
|
arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
|
||||||
|
|
||||||
|
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
|
||||||
|
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
|
||||||
|
|
||||||
|
extent_tree_szad_insert(chunks_szad, a);
|
||||||
|
|
||||||
|
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
|
||||||
|
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
|
||||||
|
|
||||||
|
arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
|
||||||
|
|
||||||
|
arena_extent_dalloc(tsdn, extent_arena_get(b), b);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szad, bool cache, void *chunk, size_t size,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
bool unzeroed;
|
extent_t *extent, *prev, *next;
|
||||||
extent_t *extent, *prev;
|
|
||||||
extent_t key;
|
|
||||||
|
|
||||||
assert(!cache || !zeroed);
|
assert(!cache || !zeroed);
|
||||||
unzeroed = cache || !zeroed;
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
extent_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false,
|
|
||||||
false, false, false);
|
/* Create/initialize/insert extent. */
|
||||||
extent = extent_tree_ad_nsearch(chunks_ad, &key);
|
extent = arena_extent_alloc(tsdn, arena);
|
||||||
|
if (extent == NULL) {
|
||||||
|
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
|
||||||
|
goto label_return;
|
||||||
|
}
|
||||||
|
extent_init(extent, arena, chunk, size, false, !cache && zeroed,
|
||||||
|
committed, false);
|
||||||
|
if (chunk_register(tsdn, extent)) {
|
||||||
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
|
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
|
||||||
|
goto label_return;
|
||||||
|
}
|
||||||
|
extent_tree_szad_insert(chunks_szad, extent);
|
||||||
|
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
||||||
|
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
if (extent != NULL && extent_addr_get(extent) == extent_addr_get(&key)
|
next = rtree_read(tsdn, &chunks_rtree,
|
||||||
&& extent_committed_get(extent) == committed &&
|
CHUNK_CEILING((uintptr_t)extent_past_get(extent)), false);
|
||||||
!chunk_hooks->merge(chunk, size, extent_addr_get(extent),
|
if (next != NULL) {
|
||||||
extent_size_get(extent), false, arena->ind)) {
|
chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next,
|
||||||
/*
|
chunks_szad, cache);
|
||||||
* Coalesce chunk with the following address range. This does
|
|
||||||
* not change the position within chunks_ad, so only
|
|
||||||
* remove/insert from/into chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(chunks_szad, extent);
|
|
||||||
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
|
||||||
extent_addr_set(extent, chunk);
|
|
||||||
extent_size_set(extent, size + extent_size_get(extent));
|
|
||||||
extent_zeroed_set(extent, extent_zeroed_get(extent) &&
|
|
||||||
!unzeroed);
|
|
||||||
extent_tree_szad_insert(chunks_szad, extent);
|
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
|
||||||
} else {
|
|
||||||
/* Coalescing forward failed, so insert a new extent. */
|
|
||||||
extent = arena_extent_alloc(tsdn, arena);
|
|
||||||
if (extent == NULL) {
|
|
||||||
/*
|
|
||||||
* Node allocation failed, which is an exceedingly
|
|
||||||
* unlikely failure. Leak chunk after making sure its
|
|
||||||
* pages have already been purged, so that this is only
|
|
||||||
* a virtual memory leak.
|
|
||||||
*/
|
|
||||||
if (cache) {
|
|
||||||
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
|
|
||||||
chunk, size, 0, size);
|
|
||||||
}
|
|
||||||
goto label_return;
|
|
||||||
}
|
|
||||||
extent_init(extent, arena, chunk, size, false, !unzeroed,
|
|
||||||
committed, false);
|
|
||||||
extent_tree_ad_insert(chunks_ad, extent);
|
|
||||||
extent_tree_szad_insert(chunks_szad, extent);
|
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = extent_tree_ad_prev(chunks_ad, extent);
|
prev = rtree_read(tsdn, &chunks_rtree,
|
||||||
if (prev != NULL && (void *)((uintptr_t)extent_addr_get(prev) +
|
(uintptr_t)extent_addr_get(extent) - chunksize, false);
|
||||||
extent_size_get(prev)) == chunk && extent_committed_get(prev) ==
|
if (prev != NULL) {
|
||||||
committed && !chunk_hooks->merge(extent_addr_get(prev),
|
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
|
||||||
extent_size_get(prev), chunk, size, false, arena->ind)) {
|
chunks_szad, cache);
|
||||||
/*
|
|
||||||
* Coalesce chunk with the previous address range. This does
|
|
||||||
* not change the position within chunks_ad, so only
|
|
||||||
* remove/insert extent from/into chunks_szad.
|
|
||||||
*/
|
|
||||||
extent_tree_szad_remove(chunks_szad, prev);
|
|
||||||
extent_tree_ad_remove(chunks_ad, prev);
|
|
||||||
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
|
||||||
extent_tree_szad_remove(chunks_szad, extent);
|
|
||||||
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
|
||||||
extent_addr_set(extent, extent_addr_get(prev));
|
|
||||||
extent_size_set(extent, extent_size_get(prev) +
|
|
||||||
extent_size_get(extent));
|
|
||||||
extent_zeroed_set(extent, extent_zeroed_get(prev) &&
|
|
||||||
extent_zeroed_get(extent));
|
|
||||||
extent_tree_szad_insert(chunks_szad, extent);
|
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
|
||||||
|
|
||||||
arena_extent_dalloc(tsdn, arena, prev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
label_return:
|
label_return:
|
||||||
@ -621,8 +690,8 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
|
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, true,
|
||||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
chunk, size, false, committed);
|
||||||
arena_maybe_purge(tsdn, arena);
|
arena_maybe_purge(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -658,7 +727,7 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
|
chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
false, chunk, size, zeroed, committed);
|
||||||
|
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
arena->stats.retained += size;
|
arena->stats.retained += size;
|
||||||
|
12
src/extent.c
12
src/extent.c
@ -39,15 +39,3 @@ extent_szad_comp(const extent_t *a, const extent_t *b)
|
|||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
|
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
|
||||||
extent_szad_comp)
|
extent_szad_comp)
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
|
||||||
extent_ad_comp(const extent_t *a, const extent_t *b)
|
|
||||||
{
|
|
||||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
|
||||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
|
||||||
|
|
||||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generate red-black tree functions. */
|
|
||||||
rb_gen(, extent_tree_ad_, extent_tree_t, extent_t, ad_link, extent_ad_comp)
|
|
||||||
|
Loading…
Reference in New Issue
Block a user