Use first-fit rather than first-best-fit run/chunk allocation.
This tends to more effectively pack active memory toward low addresses. However, additional tree searches are required in many cases, so whether this change stands the test of time will depend on real-world benchmarks.
This commit is contained in:
parent
5707d6f952
commit
97c04a9383
@ -312,7 +312,7 @@ struct arena_s {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Size/address-ordered tree of this arena's available runs. The tree
|
* Size/address-ordered tree of this arena's available runs. The tree
|
||||||
* is used for first-best-fit run allocation.
|
* is used for first-fit run allocation.
|
||||||
*/
|
*/
|
||||||
arena_avail_tree_t runs_avail;
|
arena_avail_tree_t runs_avail;
|
||||||
|
|
||||||
|
68
src/arena.c
68
src/arena.c
@ -907,23 +907,55 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
|
|||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Do first-fit run selection. */
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
|
arena_run_first_fit(arena_t *arena, size_t size)
|
||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_run_t *run;
|
||||||
arena_chunk_map_misc_t *key;
|
index_t index, max_index;
|
||||||
size_t usize;
|
|
||||||
|
assert(size == s2u(size));
|
||||||
|
assert(size == PAGE_CEILING(size));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over all size classes that are at least large enough to
|
||||||
|
* satisfy the request, search for the lowest run of each size class,
|
||||||
|
* and choose the lowest of the runs found.
|
||||||
|
*/
|
||||||
|
run = NULL;
|
||||||
|
for (index = size2index(size), max_index = size2index(arena_maxclass);
|
||||||
|
index <= max_index;) {
|
||||||
|
arena_run_t *currun;
|
||||||
|
arena_chunk_t *currun_chunk;
|
||||||
|
size_t currun_pageind, currun_size;
|
||||||
|
size_t usize = PAGE_CEILING(index2size(index));
|
||||||
|
arena_chunk_map_misc_t *key = (arena_chunk_map_misc_t *)(usize |
|
||||||
|
CHUNK_MAP_KEY);
|
||||||
|
arena_chunk_map_misc_t *miscelm =
|
||||||
|
arena_avail_tree_nsearch(&arena->runs_avail, key);
|
||||||
|
if (miscelm == NULL)
|
||||||
|
break;
|
||||||
|
currun = &miscelm->run;
|
||||||
|
if (run == NULL || (uintptr_t)currun < (uintptr_t)run)
|
||||||
|
run = currun;
|
||||||
|
currun_chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(currun);
|
||||||
|
currun_pageind = arena_miscelm_to_pageind(miscelm);
|
||||||
|
currun_size = arena_mapbits_unallocated_size_get(currun_chunk,
|
||||||
|
currun_pageind);
|
||||||
|
assert(size2index(currun_size) + 1 > index);
|
||||||
|
index = size2index(currun_size) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
usize = s2u(size);
|
|
||||||
key = (arena_chunk_map_misc_t *)(usize | CHUNK_MAP_KEY);
|
|
||||||
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
|
||||||
if (miscelm != NULL) {
|
|
||||||
arena_run_t *run = &miscelm->run;
|
|
||||||
arena_run_split_large(arena, &miscelm->run, size, zero);
|
|
||||||
return (run);
|
return (run);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (NULL);
|
static arena_run_t *
|
||||||
|
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
|
||||||
|
{
|
||||||
|
arena_run_t *run = arena_run_first_fit(arena, s2u(size));
|
||||||
|
if (run != NULL)
|
||||||
|
arena_run_split_large(arena, run, size, zero);
|
||||||
|
return (run);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
@ -961,22 +993,12 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
|
|||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
|
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
|
||||||
{
|
{
|
||||||
arena_run_t *run;
|
arena_run_t *run = arena_run_first_fit(arena, PAGE_CEILING(size));
|
||||||
arena_chunk_map_misc_t *miscelm;
|
if (run != NULL)
|
||||||
arena_chunk_map_misc_t *key;
|
|
||||||
|
|
||||||
assert(size == s2u(size));
|
|
||||||
key = (arena_chunk_map_misc_t *)(PAGE_CEILING(size) | CHUNK_MAP_KEY);
|
|
||||||
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
|
||||||
if (miscelm != NULL) {
|
|
||||||
run = &miscelm->run;
|
|
||||||
arena_run_split_small(arena, run, size, binind);
|
arena_run_split_small(arena, run, size, binind);
|
||||||
return (run);
|
return (run);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
|
arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
|
||||||
{
|
{
|
||||||
|
43
src/chunk.c
43
src/chunk.c
@ -62,6 +62,39 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Do first-fit chunk selection. */
|
||||||
|
static extent_node_t *
|
||||||
|
chunk_first_fit(arena_t *arena, extent_tree_t *chunks_szad, size_t size)
|
||||||
|
{
|
||||||
|
extent_node_t *node;
|
||||||
|
index_t index;
|
||||||
|
|
||||||
|
assert(size == CHUNK_CEILING(size));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Iterate over all size classes that are at least large enough to
|
||||||
|
* satisfy the request, search for the lowest chunk of each size class,
|
||||||
|
* and choose the lowest of the chunks found.
|
||||||
|
*/
|
||||||
|
node = NULL;
|
||||||
|
for (index = size2index(size); index < NSIZES;) {
|
||||||
|
extent_node_t *curnode;
|
||||||
|
extent_node_t key;
|
||||||
|
extent_node_init(&key, arena, NULL,
|
||||||
|
CHUNK_CEILING(index2size(index)), false);
|
||||||
|
curnode = extent_tree_szad_nsearch(chunks_szad, &key);
|
||||||
|
if (curnode == NULL)
|
||||||
|
break;
|
||||||
|
if (node == NULL || (uintptr_t)extent_node_addr_get(curnode) <
|
||||||
|
(uintptr_t)extent_node_addr_get(node))
|
||||||
|
node = curnode;
|
||||||
|
assert(size2index(extent_node_size_get(curnode)) + 1 > index);
|
||||||
|
index = size2index(extent_node_size_get(curnode)) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (node);
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||||
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
|
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
|
||||||
@ -69,7 +102,6 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
extent_node_t key;
|
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
|
|
||||||
@ -80,10 +112,13 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
extent_node_init(&key, arena, new_addr, alloc_size, false);
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(&arena->chunks_mtx);
|
||||||
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
if (new_addr != NULL || size == chunksize) {
|
||||||
extent_tree_szad_nsearch(chunks_szad, &key);
|
extent_node_t key;
|
||||||
|
extent_node_init(&key, arena, new_addr, alloc_size, false);
|
||||||
|
node = extent_tree_ad_search(chunks_ad, &key);
|
||||||
|
} else
|
||||||
|
node = chunk_first_fit(arena, chunks_szad, alloc_size);
|
||||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||||
size)) {
|
size)) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||||
|
Loading…
Reference in New Issue
Block a user