Quantize szad trees by size class.
Treat sizes that round down to the same size class as size-equivalent in trees that are used to search for first best fit, so that there are only as many "firsts" as there are size classes. This comes closer to the ideal of first fit.
This commit is contained in:
parent
f044bb219e
commit
5707d6f952
36
src/arena.c
36
src/arena.c
@ -59,21 +59,35 @@ JEMALLOC_INLINE_C int
|
|||||||
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
|
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
uintptr_t a_miscelm = (uintptr_t)a;
|
||||||
size_t a_size;
|
size_t a_size;
|
||||||
size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
|
size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
|
||||||
uintptr_t a_miscelm = (uintptr_t)a;
|
index_t a_index, b_index;
|
||||||
uintptr_t b_miscelm = (uintptr_t)b;
|
|
||||||
|
|
||||||
if (a_miscelm & CHUNK_MAP_KEY)
|
if (a_miscelm & CHUNK_MAP_KEY) {
|
||||||
a_size = a_miscelm & ~PAGE_MASK;
|
a_size = a_miscelm & ~PAGE_MASK;
|
||||||
else
|
assert(a_size == s2u(a_size));
|
||||||
|
} else
|
||||||
a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
|
a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
|
||||||
|
|
||||||
ret = (a_size > b_size) - (a_size < b_size);
|
/*
|
||||||
|
* Compute the index of the largest size class that the run can satisfy
|
||||||
|
* a request for.
|
||||||
|
*/
|
||||||
|
a_index = size2index(a_size + 1) - 1;
|
||||||
|
b_index = size2index(b_size + 1) - 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Compare based on size class index rather than size, in order to
|
||||||
|
* sort equally useful runs only by address.
|
||||||
|
*/
|
||||||
|
ret = (a_index > b_index) - (a_index < b_index);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
if (!(a_miscelm & CHUNK_MAP_KEY))
|
if (!(a_miscelm & CHUNK_MAP_KEY)) {
|
||||||
|
uintptr_t b_miscelm = (uintptr_t)b;
|
||||||
|
|
||||||
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
|
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
|
||||||
else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Treat keys as if they are lower than anything else.
|
* Treat keys as if they are lower than anything else.
|
||||||
*/
|
*/
|
||||||
@ -898,8 +912,10 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
|
|||||||
{
|
{
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
arena_chunk_map_misc_t *key;
|
arena_chunk_map_misc_t *key;
|
||||||
|
size_t usize;
|
||||||
|
|
||||||
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
|
usize = s2u(size);
|
||||||
|
key = (arena_chunk_map_misc_t *)(usize | CHUNK_MAP_KEY);
|
||||||
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
||||||
if (miscelm != NULL) {
|
if (miscelm != NULL) {
|
||||||
arena_run_t *run = &miscelm->run;
|
arena_run_t *run = &miscelm->run;
|
||||||
@ -949,7 +965,8 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
|
|||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
arena_chunk_map_misc_t *key;
|
arena_chunk_map_misc_t *key;
|
||||||
|
|
||||||
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
|
assert(size == s2u(size));
|
||||||
|
key = (arena_chunk_map_misc_t *)(PAGE_CEILING(size) | CHUNK_MAP_KEY);
|
||||||
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
|
||||||
if (miscelm != NULL) {
|
if (miscelm != NULL) {
|
||||||
run = &miscelm->run;
|
run = &miscelm->run;
|
||||||
@ -2778,6 +2795,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
bin_info->reg_interval;
|
bin_info->reg_interval;
|
||||||
}
|
}
|
||||||
assert(actual_nregs > 0);
|
assert(actual_nregs > 0);
|
||||||
|
assert(actual_run_size == s2u(actual_run_size));
|
||||||
|
|
||||||
/* Copy final settings. */
|
/* Copy final settings. */
|
||||||
bin_info->run_size = actual_run_size;
|
bin_info->run_size = actual_run_size;
|
||||||
|
@ -73,7 +73,7 @@ void *
|
|||||||
base_alloc(size_t size)
|
base_alloc(size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize;
|
size_t csize, usize;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
|
|
||||||
@ -83,7 +83,8 @@ base_alloc(size_t size)
|
|||||||
*/
|
*/
|
||||||
csize = CACHELINE_CEILING(size);
|
csize = CACHELINE_CEILING(size);
|
||||||
|
|
||||||
extent_node_init(&key, NULL, NULL, csize, false);
|
usize = s2u(csize);
|
||||||
|
extent_node_init(&key, NULL, NULL, usize, false);
|
||||||
malloc_mutex_lock(&base_mtx);
|
malloc_mutex_lock(&base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
|
@ -76,7 +76,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
assert(dalloc_node || new_addr != NULL);
|
assert(dalloc_node || new_addr != NULL);
|
||||||
|
|
||||||
alloc_size = size + alignment - chunksize;
|
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
@ -9,8 +9,14 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
|||||||
int ret;
|
int ret;
|
||||||
size_t a_size = extent_node_size_get(a);
|
size_t a_size = extent_node_size_get(a);
|
||||||
size_t b_size = extent_node_size_get(b);
|
size_t b_size = extent_node_size_get(b);
|
||||||
|
/*
|
||||||
|
* Compute the index of the largest size class that the chunk can
|
||||||
|
* satisfy a request for.
|
||||||
|
*/
|
||||||
|
size_t a_index = size2index(a_size + 1) - 1;
|
||||||
|
size_t b_index = size2index(b_size + 1) - 1;
|
||||||
|
|
||||||
ret = (a_size > b_size) - (a_size < b_size);
|
ret = (a_index > b_index) - (a_index < b_index);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||||
|
Loading…
Reference in New Issue
Block a user