diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 895b2d4d..3f9ba201 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -1069,6 +1069,22 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay", for related dynamic control options. + + + opt.lg_extent_max_active_fit + (size_t) + r- + + When reusing dirty extents, this determines the (log + base 2 of the) maximum ratio between the size of the active extent + selected (to split off from) and the size of the requested allocation. + This prevents the splitting of large active extents for smaller + allocations, which can reduce fragmentation over the long run + (especially for non-active extents). Lower value may reduce + fragmentation, at the cost of extra active extents. The default value + is 6, which gives a maximum ratio of 64 (2^6). + + opt.stats_print diff --git a/include/jemalloc/internal/extent_externs.h b/include/jemalloc/internal/extent_externs.h index 132d8903..a76d4e4a 100644 --- a/include/jemalloc/internal/extent_externs.h +++ b/include/jemalloc/internal/extent_externs.h @@ -6,9 +6,11 @@ #include "jemalloc/internal/ph.h" #include "jemalloc/internal/rtree.h" -extern rtree_t extents_rtree; -extern const extent_hooks_t extent_hooks_default; -extern mutex_pool_t extent_mutex_pool; +extern size_t opt_lg_extent_max_active_fit; + +extern rtree_t extents_rtree; +extern const extent_hooks_t extent_hooks_default; +extern mutex_pool_t extent_mutex_pool; extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); diff --git a/include/jemalloc/internal/extent_types.h b/include/jemalloc/internal/extent_types.h index 7efcd3a4..c0561d99 100644 --- a/include/jemalloc/internal/extent_types.h +++ b/include/jemalloc/internal/extent_types.h @@ -8,4 +8,10 @@ typedef struct extents_s extents_t; #define EXTENT_GROW_MAX_PIND (NPSIZES - 1) +/* + * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) + * is the max ratio between the size of the active extent and the new extent. + */ +#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 + #endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/src/ctl.c b/src/ctl.c index 11cd68db..1fdb772d 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -95,6 +95,7 @@ CTL_PROTO(opt_zero) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) +CTL_PROTO(opt_lg_extent_max_active_fit) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) @@ -293,6 +294,7 @@ static const ctl_named_node_t opt_node[] = { {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, @@ -1597,6 +1599,8 @@ CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, + size_t) CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) diff --git a/src/extent.c b/src/extent.c index 466e0b2a..548a93e2 100644 --- a/src/extent.c +++ b/src/extent.c @@ -17,6 +17,8 @@ rtree_t extents_rtree; /* Keyed by the address of the extent_t being protected. */ mutex_pool_t extent_mutex_pool; +size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; + static const bitmap_info_t extents_bitmap_info = BITMAP_INFO_INITIALIZER(NPSIZES+1); @@ -369,6 +371,13 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); if (i < NPSIZES+1) { + /* + * In order to reduce fragmentation, avoid reusing and splitting + * large extents for much smaller sizes. + */ + if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + return NULL; + } assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); assert(extent_size_get(extent) >= size); diff --git a/src/jemalloc.c b/src/jemalloc.c index f29fc7da..f4fd805e 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1146,6 +1146,9 @@ malloc_conf_init(void) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, + "lg_extent_max_active_fit", 0, + (sizeof(size_t) << 3), yes, yes, false) CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) if (strncmp("percpu_arena", k, klen) == 0) { diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 94f801e3..4cfd981a 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -172,6 +172,7 @@ TEST_BEGIN(test_mallctl_opt) { TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); TEST_MALLCTL_OPT(bool, tcache, always); + TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always); TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof);