Prefer old/low extent_t structures during reuse.

Rather than using a LIFO queue to track available extent_t structures,
use a red-black tree, and always choose the oldest/lowest available
during reuse.
This commit is contained in:
Jason Evans 2017-04-16 22:31:16 -07:00
parent 76b35f4b2f
commit 881fbf762f
10 changed files with 80 additions and 37 deletions

View File

@ -2691,14 +2691,14 @@ struct extent_hooks_s {
counters</link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.mutexes.extent_freelist">
<varlistentry id="stats.arenas.i.mutexes.extent_avail">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_freelist.{counter}</mallctl>
<mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_avail.{counter}</mallctl>
(<type>counter specific type</type>) <literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_freelist
</varname> mutex (arena scope; extent freelist related).
<listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_avail
</varname> mutex (arena scope; extent avail related).
<mallctl>{counter}</mallctl> is one of the counters in <link
linkend="mutex_counters">mutex profiling
counters</link>.</para></listitem>

View File

@ -233,12 +233,13 @@ struct arena_s {
atomic_u_t extent_grow_next;
/*
* Freelist of extent structures that were allocated via base_alloc().
* Available extent structures that were allocated via
* base_alloc_extent().
*
* Synchronization: extent_freelist_mtx.
* Synchronization: extent_avail_mtx.
*/
extent_list_t extent_freelist;
malloc_mutex_t extent_freelist_mtx;
extent_tree_t extent_avail;
malloc_mutex_t extent_avail_mtx;
/*
* bins is used to store heaps of free regions.

View File

@ -14,7 +14,7 @@ typedef enum {
#define ARENA_PROF_MUTEXES \
OP(large) \
OP(extent_freelist) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/ph.h"
extern rtree_t extents_rtree;
@ -17,6 +18,7 @@ size_t extent_size_quantize_floor(size_t size);
size_t extent_size_quantize_ceil(size_t size);
#endif
rb_proto(, extent_avail_, extent_tree_t, extent_t)
ph_proto(, extent_heap_, extent_heap_t, extent_t)
bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,

View File

@ -53,8 +53,10 @@ void extent_list_replace(extent_list_t *list, extent_t *to_remove,
extent_t *to_insert);
void extent_list_remove(extent_list_t *list, extent_t *extent);
int extent_sn_comp(const extent_t *a, const extent_t *b);
int extent_esn_comp(const extent_t *a, const extent_t *b);
int extent_ad_comp(const extent_t *a, const extent_t *b);
int extent_snad_comp(const extent_t *a, const extent_t *b);
int extent_esnead_comp(const extent_t *a, const extent_t *b);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
@ -378,6 +380,14 @@ extent_sn_comp(const extent_t *a, const extent_t *b) {
return (a_sn > b_sn) - (a_sn < b_sn);
}
JEMALLOC_INLINE int
extent_esn_comp(const extent_t *a, const extent_t *b) {
size_t a_esn = extent_esn_get(a);
size_t b_esn = extent_esn_get(b);
return (a_esn > b_esn) - (a_esn < b_esn);
}
JEMALLOC_INLINE int
extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
@ -386,6 +396,14 @@ extent_ad_comp(const extent_t *a, const extent_t *b) {
return (a_addr > b_addr) - (a_addr < b_addr);
}
JEMALLOC_INLINE int
extent_ead_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_eaddr = (uintptr_t)a;
uintptr_t b_eaddr = (uintptr_t)b;
return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}
JEMALLOC_INLINE int
extent_snad_comp(const extent_t *a, const extent_t *b) {
int ret;
@ -398,6 +416,19 @@ extent_snad_comp(const extent_t *a, const extent_t *b) {
ret = extent_ad_comp(a, b);
return ret;
}
JEMALLOC_INLINE int
extent_esnead_comp(const extent_t *a, const extent_t *b) {
int ret;
ret = extent_esn_comp(a, b);
if (ret != 0) {
return ret;
}
ret = extent_ead_comp(a, b);
return ret;
}
#endif
#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */

View File

@ -2,8 +2,9 @@
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/ph.h"
typedef enum {
extent_state_active = 0,
@ -117,15 +118,18 @@ struct extent_s {
size_t e_bsize;
};
/*
* List linkage, used by a variety of lists:
* - arena_bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
* - arena's extent structure freelist
*/
ql_elm(extent_t) ql_link;
union {
/*
* List linkage, used by a variety of lists:
* - arena_bin_t's slabs_full
* - extents_t's LRU
* - stashed dirty extents
* - arena's large allocations
*/
ql_elm(extent_t) ql_link;
/* Red-black tree linkage, used by arena's extent_avail. */
rb_node(extent_t) rb_link;
};
/* Linkage for per size class sn/address-ordered heaps. */
phn(extent_t) ph_link;
@ -142,6 +146,7 @@ struct extent_s {
};
};
typedef ql_head(extent_t) extent_list_t;
typedef rb_tree(extent_t) extent_tree_t;
typedef ph(extent_t) extent_heap_t;
/* Quantized collection of extents, with built-in LRU queue. */

View File

@ -160,8 +160,11 @@ extent_dss_boot
extent_dss_mergeable
extent_dss_prec_get
extent_dss_prec_set
extent_ead_comp
extent_esn_comp
extent_esn_get
extent_esn_set
extent_esnead_comp
extent_heap_empty
extent_heap_first
extent_heap_insert

View File

@ -314,8 +314,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(extent_freelist_mtx,
arena_prof_mutex_extent_freelist)
READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
arena_prof_mutex_extents_dirty)
READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
@ -1937,8 +1937,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
ATOMIC_RELAXED);
}
extent_list_init(&arena->extent_freelist);
if (malloc_mutex_init(&arena->extent_freelist_mtx, "extent_freelist",
extent_avail_new(&arena->extent_avail);
if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
WITNESS_RANK_EXTENT_FREELIST)) {
goto label_error;
}
@ -2007,7 +2007,7 @@ arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_prefork(tsdn, &arena->extent_freelist_mtx);
malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
}
void
@ -2036,7 +2036,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
malloc_mutex_postfork_parent(tsdn, &arena->extent_freelist_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
extents_postfork_parent(tsdn, &arena->extents_dirty);
extents_postfork_parent(tsdn, &arena->extents_muzzy);
extents_postfork_parent(tsdn, &arena->extents_retained);
@ -2056,7 +2056,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
malloc_mutex_postfork_child(tsdn, &arena->extent_freelist_mtx);
malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
extents_postfork_child(tsdn, &arena->extents_dirty);
extents_postfork_child(tsdn, &arena->extents_muzzy);
extents_postfork_child(tsdn, &arena->extents_retained);

View File

@ -2475,7 +2475,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->extent_freelist_mtx);
MUTEX_PROF_RESET(arena->extent_avail_mtx);
MUTEX_PROF_RESET(arena->extents_dirty.mtx);
MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
MUTEX_PROF_RESET(arena->extents_retained.mtx);

View File

@ -88,20 +88,21 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/
rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
extent_esnead_comp)
extent_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
extent_t *extent;
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
malloc_mutex_lock(tsdn, &arena->extent_freelist_mtx);
extent = extent_list_last(&arena->extent_freelist);
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_t *extent = extent_avail_first(&arena->extent_avail);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return base_alloc_extent(tsdn, arena->base);
}
extent_list_remove(&arena->extent_freelist, extent);
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);
extent_avail_remove(&arena->extent_avail, extent);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
return extent;
}
@ -109,9 +110,9 @@ void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
malloc_mutex_lock(tsdn, &arena->extent_freelist_mtx);
extent_list_append(&arena->extent_freelist, extent);
malloc_mutex_unlock(tsdn, &arena->extent_freelist_mtx);
malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
extent_avail_insert(&arena->extent_avail, extent);
malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
}
extent_hooks_t *