Implement huge arena: opt.huge_threshold.

The feature allows using a dedicated arena for huge allocations.  We want the
addtional arena to separate huge allocation because: 1) mixing small extents
with huge ones causes fragmentation over the long run (this feature reduces VM
size significantly); 2) with many arenas, huge extents rarely get reused across
threads; and 3) huge allocations happen way less frequently, therefore no
concerns for lock contention.
This commit is contained in:
Qi Wang
2018-05-21 13:33:48 -07:00
committed by Qi Wang
parent 77a71ef2b7
commit 94a88c26f4
8 changed files with 106 additions and 6 deletions

View File

@@ -42,6 +42,10 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
static div_info_t arena_binind_div_info[NBINS];
size_t opt_huge_threshold = HUGE_THRESHOLD_DEFAULT;
size_t huge_threshold = HUGE_THRESHOLD_DEFAULT;
static unsigned huge_arena_ind;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -1378,7 +1382,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
assert(!tsdn_null(tsdn) || arena != NULL);
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena);
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
}
if (unlikely(arena == NULL)) {
return NULL;
@@ -1939,6 +1943,58 @@ label_error:
return NULL;
}
arena_t *
arena_choose_huge(tsd_t *tsd) {
/* huge_arena_ind can be 0 during init (will use a0). */
if (huge_arena_ind == 0) {
assert(!malloc_initialized());
}
arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
if (huge_arena == NULL) {
/* Create the huge arena on demand. */
assert(huge_arena_ind != 0);
huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
if (huge_arena == NULL) {
return NULL;
}
/*
* Purge eagerly for huge allocations, because: 1) number of
* huge allocations is usually small, which means ticker based
* decay is not reliable; and 2) less immediate reuse is
* expected for huge allocations.
*/
if (arena_dirty_decay_ms_default_get() > 0) {
arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
}
if (arena_muzzy_decay_ms_default_get() > 0) {
arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
}
}
return huge_arena;
}
bool
arena_init_huge(void) {
bool huge_enabled;
/* The threshold should be large size class. */
if (opt_huge_threshold > LARGE_MAXCLASS ||
opt_huge_threshold < LARGE_MINCLASS) {
opt_huge_threshold = 0;
huge_threshold = LARGE_MAXCLASS + PAGE;
huge_enabled = false;
} else {
/* Reserve the index for the huge arena. */
huge_arena_ind = narenas_total_get();
huge_threshold = opt_huge_threshold;
huge_enabled = true;
}
return huge_enabled;
}
void
arena_boot(void) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);

View File

@@ -327,7 +327,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
*/
arena = arena_get(tsdn, ind, false);
if (arena != NULL) {
assert(ind < narenas_auto);
assert(arena_is_auto(arena));
return arena;
}
@@ -1142,11 +1142,15 @@ malloc_conf_init(void) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
CONF_HANDLE_BOOL(opt_tcache, "tcache")
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SIZE_T(opt_huge_threshold, "huge_threshold",
LARGE_MINCLASS, LARGE_MAXCLASS, yes, yes, false)
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
"lg_extent_max_active_fit", 0,
(sizeof(size_t) << 3), yes, yes, false)
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
-1, (sizeof(size_t) << 3) - 1)
if (strncmp("percpu_arena", k, klen) == 0) {
bool match = false;
for (int i = percpu_arena_mode_names_base; i <
@@ -1465,6 +1469,9 @@ malloc_init_narenas(void) {
narenas_auto);
}
narenas_total_set(narenas_auto);
if (arena_init_huge()) {
narenas_total_inc();
}
return false;
}

View File

@@ -42,7 +42,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose(tsdn_tsd(tsdn), arena);
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {