Convert arena_bin_t's runs from a tree to a heap.

This commit is contained in:
Jason Evans 2016-03-08 01:04:48 -08:00
parent 4a0dbb5ac8
commit 613cdc80f6
2 changed files with 23 additions and 49 deletions

View File

@ -147,11 +147,6 @@ struct arena_runs_dirty_link_s {
qr(arena_runs_dirty_link_t) rd_link; qr(arena_runs_dirty_link_t) rd_link;
}; };
struct arena_avail_links_s {
arena_runs_dirty_link_t rd;
ph_node_t ph_link;
};
/* /*
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
* like arena_chunk_map_bits_t. Two separate arrays are stored within each * like arena_chunk_map_bits_t. Two separate arrays are stored within each
@ -159,17 +154,17 @@ struct arena_avail_links_s {
*/ */
struct arena_chunk_map_misc_s { struct arena_chunk_map_misc_s {
/* /*
* Linkage for run trees. There are two disjoint uses: * Linkage for run heaps. There are two disjoint uses:
* *
* 1) arena_t's runs_avail tree. * 1) arena_t's runs_avail heaps.
* 2) arena_run_t conceptually uses this linkage for in-use non-full * 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage. * runs, rather than directly embedding linkage.
*/ */
rb_node(arena_chunk_map_misc_t) rb_link; ph_node_t ph_link;
union { union {
/* Linkage for list of dirty runs. */ /* Linkage for list of dirty runs. */
arena_avail_links_t avail; arena_runs_dirty_link_t rd;
/* Profile counters, used for large object runs. */ /* Profile counters, used for large object runs. */
union { union {
@ -181,7 +176,6 @@ struct arena_chunk_map_misc_s {
arena_run_t run; arena_run_t run;
}; };
}; };
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */ #endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B #ifdef JEMALLOC_ARENA_STRUCTS_B
@ -278,13 +272,13 @@ struct arena_bin_s {
arena_run_t *runcur; arena_run_t *runcur;
/* /*
* Tree of non-full runs. This tree is used when looking for an * Heap of non-full runs. This heap is used when looking for an
* existing run when runcur is no longer usable. We choose the * existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep * non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of * objects packed well, and it can also help reduce the number of
* almost-empty chunks. * almost-empty chunks.
*/ */
arena_run_tree_t runs; ph_heap_t runs;
/* Bin statistics. */ /* Bin statistics. */
malloc_bin_stats_t stats; malloc_bin_stats_t stats;
@ -709,7 +703,7 @@ JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
{ {
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
*)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, avail)); *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
@ -721,7 +715,7 @@ JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_ph_to_miscelm(ph_node_t *ph) arena_ph_to_miscelm(ph_node_t *ph)
{ {
arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *) arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t *)
((uintptr_t)ph - offsetof(arena_chunk_map_misc_t, avail.ph_link)); ((uintptr_t)ph - offsetof(arena_chunk_map_misc_t, ph_link));
assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);

View File

@ -59,23 +59,6 @@ arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
return (arena_mapbits_size_decode(mapbits)); return (arena_mapbits_size_decode(mapbits));
} }
JEMALLOC_INLINE_C int
arena_run_addr_comp(const arena_chunk_map_misc_t *a,
const arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
rb_link, arena_run_addr_comp)
static size_t static size_t
run_quantize_floor_compute(size_t size) run_quantize_floor_compute(size_t size)
{ {
@ -218,7 +201,7 @@ arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
ph_insert(arena_runs_avail_get(arena, ind), ph_insert(arena_runs_avail_get(arena, ind),
&arena_miscelm_get(chunk, pageind)->avail.ph_link); &arena_miscelm_get(chunk, pageind)->ph_link);
} }
static void static void
@ -230,7 +213,7 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE)); LG_PAGE));
ph_remove(arena_runs_avail_get(arena, ind), ph_remove(arena_runs_avail_get(arena, ind),
&arena_miscelm_get(chunk, pageind)->avail.ph_link); &arena_miscelm_get(chunk, pageind)->ph_link);
} }
static void static void
@ -245,8 +228,8 @@ arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY); CHUNK_MAP_DIRTY);
qr_new(&miscelm->avail.rd, rd_link); qr_new(&miscelm->rd, rd_link);
qr_meld(&arena->runs_dirty, &miscelm->avail.rd, rd_link); qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
arena->ndirty += npages; arena->ndirty += npages;
} }
@ -262,7 +245,7 @@ arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY); CHUNK_MAP_DIRTY);
qr_remove(&miscelm->avail.rd, rd_link); qr_remove(&miscelm->rd, rd_link);
assert(arena->ndirty >= npages); assert(arena->ndirty >= npages);
arena->ndirty -= npages; arena->ndirty -= npages;
} }
@ -2069,11 +2052,14 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
static arena_run_t * static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin) arena_bin_runs_first(arena_bin_t *bin)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); ph_node_t *node;
if (miscelm != NULL) arena_chunk_map_misc_t *miscelm;
return (&miscelm->run);
return (NULL); node = ph_first(&bin->runs);
if (node == NULL)
return (NULL);
miscelm = arena_ph_to_miscelm(node);
return (&miscelm->run);
} }
static void static void
@ -2081,9 +2067,7 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); ph_insert(&bin->runs, &miscelm->ph_link);
arena_run_tree_insert(&bin->runs, miscelm);
} }
static void static void
@ -2091,9 +2075,7 @@ arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{ {
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); ph_remove(&bin->runs, &miscelm->ph_link);
arena_run_tree_remove(&bin->runs, miscelm);
} }
static arena_run_t * static arena_run_t *
@ -2676,8 +2658,6 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
{ {
assert(run != bin->runcur); assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
NULL);
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
/******************************/ /******************************/
@ -3414,7 +3394,7 @@ arena_new(unsigned ind)
if (malloc_mutex_init(&bin->lock)) if (malloc_mutex_init(&bin->lock))
return (NULL); return (NULL);
bin->runcur = NULL; bin->runcur = NULL;
arena_run_tree_new(&bin->runs); ph_new(&bin->runs);
if (config_stats) if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
} }