2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
|
|
|
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2017-04-25 09:05:15 +08:00
|
|
|
#include "jemalloc/internal/pages.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
#include "jemalloc/internal/size_classes.h"
|
2017-04-21 04:38:12 +08:00
|
|
|
#include "jemalloc/internal/stats.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
extern ssize_t opt_dirty_decay_ms;
|
|
|
|
extern ssize_t opt_muzzy_decay_ms;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
extern const arena_bin_info_t arena_bin_info[NBINS];
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
extern percpu_arena_mode_t opt_percpu_arena;
|
|
|
|
extern const char *percpu_arena_mode_names[];
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
2017-06-08 06:49:09 +08:00
|
|
|
extern malloc_mutex_t arenas_lock;
|
2017-03-18 03:42:33 +08:00
|
|
|
|
2017-02-13 09:43:33 +08:00
|
|
|
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
|
|
|
szind_t szind, uint64_t nrequests);
|
2017-02-14 02:35:41 +08:00
|
|
|
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
|
|
|
size_t size);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
2017-05-18 01:47:00 +08:00
|
|
|
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
|
|
|
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
2017-02-13 09:43:33 +08:00
|
|
|
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2017-05-18 01:47:00 +08:00
|
|
|
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
|
|
|
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
|
2017-01-11 10:06:31 +08:00
|
|
|
extent_hooks_t **r_extent_hooks, extent_t *extent);
|
|
|
|
#ifdef JEMALLOC_JET
|
2017-03-18 03:42:33 +08:00
|
|
|
size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif
|
2017-03-18 03:42:33 +08:00
|
|
|
extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
2017-01-11 10:06:31 +08:00
|
|
|
size_t usize, size_t alignment, bool *zero);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
2017-02-13 09:43:33 +08:00
|
|
|
extent_t *extent);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
2017-01-11 10:06:31 +08:00
|
|
|
extent_t *extent, size_t oldsize);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
2017-01-11 10:06:31 +08:00
|
|
|
extent_t *extent, size_t oldsize);
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t arena_dirty_decay_ms_get(arena_t *arena);
|
|
|
|
bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
|
|
|
ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
|
|
|
|
bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
|
|
|
bool all);
|
|
|
|
void arena_reset(tsd_t *tsd, arena_t *arena);
|
|
|
|
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
|
|
|
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
2017-08-11 05:27:58 +08:00
|
|
|
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
|
2017-01-11 10:06:31 +08:00
|
|
|
bool zero);
|
2017-05-02 14:10:42 +08:00
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
|
2017-05-02 14:10:42 +08:00
|
|
|
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
|
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
2017-01-11 10:06:31 +08:00
|
|
|
szind_t ind, bool zero);
|
2017-03-18 03:42:33 +08:00
|
|
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2017-01-11 10:06:31 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
|
|
|
|
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2017-03-21 02:00:07 +08:00
|
|
|
bool slow_path);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
2017-01-11 10:06:31 +08:00
|
|
|
extent_t *extent, void *ptr);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
2017-03-21 02:00:07 +08:00
|
|
|
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|
|
|
size_t extra, bool zero);
|
|
|
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
|
|
|
size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
2017-03-18 03:42:33 +08:00
|
|
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
|
|
|
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t arena_dirty_decay_ms_default_get(void);
|
|
|
|
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
|
|
|
ssize_t arena_muzzy_decay_ms_default_get(void);
|
|
|
|
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
2017-11-03 08:48:39 +08:00
|
|
|
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
|
|
|
size_t *old_limit, size_t *new_limit);
|
2017-03-18 03:42:33 +08:00
|
|
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
|
|
|
size_t arena_extent_sn_next(arena_t *arena);
|
|
|
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
|
|
|
void arena_boot(void);
|
|
|
|
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
|
2017-06-30 07:01:35 +08:00
|
|
|
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
|