Implement huge arena: opt.huge_threshold.
The feature allows using a dedicated arena for huge allocations. We want the addtional arena to separate huge allocation because: 1) mixing small extents with huge ones causes fragmentation over the long run (this feature reduces VM size significantly); 2) with many arenas, huge extents rarely get reused across threads; and 3) huge allocations happen way less frequently, therefore no concerns for lock contention.
This commit is contained in:
@@ -17,6 +17,9 @@ extern const char *percpu_arena_mode_names[];
|
||||
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
||||
extern malloc_mutex_t arenas_lock;
|
||||
|
||||
extern size_t opt_huge_threshold;
|
||||
extern size_t huge_threshold;
|
||||
|
||||
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
||||
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
||||
@@ -81,6 +84,8 @@ void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
size_t arena_extent_sn_next(arena_t *arena);
|
||||
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
||||
bool arena_init_huge(void);
|
||||
arena_t *arena_choose_huge(tsd_t *tsd);
|
||||
void arena_boot(void);
|
||||
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
||||
|
@@ -8,6 +8,27 @@
|
||||
#include "jemalloc/internal/sz.h"
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
|
||||
if (arena != NULL) {
|
||||
return arena;
|
||||
}
|
||||
|
||||
/*
|
||||
* For huge allocations, use the dedicated huge arena if both are true:
|
||||
* 1) is using auto arena selection (i.e. arena == NULL), and 2) the
|
||||
* thread is not assigned to a manual arena.
|
||||
*/
|
||||
if (unlikely(size >= huge_threshold)) {
|
||||
arena_t *tsd_arena = tsd_arena_get(tsd);
|
||||
if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
|
||||
return arena_choose_huge(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
return arena_choose(tsd, NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
|
@@ -40,4 +40,10 @@ typedef enum {
|
||||
#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base)
|
||||
#define PERCPU_ARENA_DEFAULT percpu_arena_disabled
|
||||
|
||||
/*
|
||||
* When allocation_size >= huge_threshold, use the dedicated huge arena (unless
|
||||
* have explicitly spicified arena index). 0 disables the feature.
|
||||
*/
|
||||
#define HUGE_THRESHOLD_DEFAULT 0
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
|
||||
|
@@ -71,7 +71,9 @@ arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||
static inline bool
|
||||
arena_is_auto(arena_t *arena) {
|
||||
assert(narenas_auto > 0);
|
||||
return (arena_ind_get(arena) < narenas_auto);
|
||||
unsigned offset = (opt_huge_threshold != 0) ? 1 : 0;
|
||||
|
||||
return (arena_ind_get(arena) < narenas_auto + offset);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
|
Reference in New Issue
Block a user