2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
|
|
|
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
|
|
|
|
|
2017-10-02 08:22:06 +08:00
|
|
|
#include "jemalloc/internal/bin.h"
|
2021-01-30 08:06:28 +08:00
|
|
|
#include "jemalloc/internal/div.h"
|
2017-05-24 05:36:09 +08:00
|
|
|
#include "jemalloc/internal/extent_dss.h"
|
2018-04-24 09:07:40 +08:00
|
|
|
#include "jemalloc/internal/hook.h"
|
2017-04-25 09:05:15 +08:00
|
|
|
#include "jemalloc/internal/pages.h"
|
2017-04-21 04:38:12 +08:00
|
|
|
#include "jemalloc/internal/stats.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
extern ssize_t opt_dirty_decay_ms;
|
|
|
|
extern ssize_t opt_muzzy_decay_ms;
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-06-01 07:45:14 +08:00
|
|
|
extern percpu_arena_mode_t opt_percpu_arena;
|
|
|
|
extern const char *percpu_arena_mode_names[];
|
Implement per-CPU arena.
The new feature, opt.percpu_arena, determines thread-arena association
dynamically based CPU id. Three modes are supported: "percpu", "phycpu"
and disabled.
"percpu" uses the current core id (with help from sched_getcpu())
directly as the arena index, while "phycpu" will assign threads on the
same physical CPU to the same arena. In other words, "percpu" means # of
arenas == # of CPUs, while "phycpu" has # of arenas == 1/2 * (# of
CPUs). Note that no runtime check on whether hyper threading is enabled
is added yet.
When enabled, threads will be migrated between arenas when a CPU change
is detected. In the current design, to reduce overhead from reading CPU
id, each arena tracks the thread accessed most recently. When a new
thread comes in, we will read CPU id and update arena if necessary.
2017-02-03 09:02:05 +08:00
|
|
|
|
2021-01-30 08:06:28 +08:00
|
|
|
extern div_info_t arena_binind_div_info[SC_NBINS];
|
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
|
2017-06-08 06:49:09 +08:00
|
|
|
extern malloc_mutex_t arenas_lock;
|
2020-03-15 01:49:34 +08:00
|
|
|
extern emap_t arena_emap_global;
|
2017-03-18 03:42:33 +08:00
|
|
|
|
2019-01-25 08:15:04 +08:00
|
|
|
extern size_t opt_oversize_threshold;
|
|
|
|
extern size_t oversize_threshold;
|
2018-05-22 04:33:48 +08:00
|
|
|
|
2021-01-31 07:35:33 +08:00
|
|
|
/*
|
|
|
|
* arena_bin_offsets[binind] is the offset of the first bin shard for size class
|
|
|
|
* binind.
|
|
|
|
*/
|
|
|
|
extern uint32_t arena_bin_offsets[SC_NBINS];
|
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
2017-05-18 01:47:00 +08:00
|
|
|
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
|
|
|
|
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
|
2017-02-13 09:43:33 +08:00
|
|
|
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
2017-05-18 01:47:00 +08:00
|
|
|
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
|
2019-11-06 12:43:59 +08:00
|
|
|
bin_stats_data_t *bstats, arena_stats_large_t *lstats,
|
2020-10-17 04:14:59 +08:00
|
|
|
pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
|
2020-03-12 02:36:38 +08:00
|
|
|
void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
2020-03-15 09:10:29 +08:00
|
|
|
size_t usize, size_t alignment, bool zero);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata, size_t oldsize);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
|
2019-12-10 06:36:45 +08:00
|
|
|
edata_t *edata, size_t oldsize);
|
2020-06-04 09:30:33 +08:00
|
|
|
bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
|
|
|
|
ssize_t decay_ms);
|
|
|
|
ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
|
|
|
|
bool all);
|
2021-06-05 07:07:27 +08:00
|
|
|
void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_reset(tsd_t *tsd, arena_t *arena);
|
|
|
|
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
2020-04-08 08:49:50 +08:00
|
|
|
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
|
|
|
|
const unsigned nfill);
|
2017-05-02 14:10:42 +08:00
|
|
|
|
2017-03-18 03:42:33 +08:00
|
|
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
2017-01-11 10:06:31 +08:00
|
|
|
szind_t ind, bool zero);
|
2017-03-18 03:42:33 +08:00
|
|
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
2017-01-11 10:06:31 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache);
|
2019-03-23 03:53:11 +08:00
|
|
|
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
2017-03-21 02:00:07 +08:00
|
|
|
bool slow_path);
|
2020-02-08 06:53:36 +08:00
|
|
|
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
|
2021-01-30 08:06:28 +08:00
|
|
|
|
|
|
|
void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
edata_t *slab, bin_t *bin);
|
|
|
|
void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
|
|
|
|
edata_t *slab, bin_t *bin);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
2017-03-21 02:00:07 +08:00
|
|
|
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
2018-06-05 04:36:06 +08:00
|
|
|
size_t extra, bool zero, size_t *newsize);
|
2017-03-21 02:00:07 +08:00
|
|
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
2018-04-24 09:07:40 +08:00
|
|
|
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
|
|
|
hook_ralloc_args_t *hook_args);
|
2017-03-18 03:42:33 +08:00
|
|
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
2019-12-03 06:19:22 +08:00
|
|
|
ehooks_t *arena_get_ehooks(arena_t *arena);
|
2019-11-19 04:59:34 +08:00
|
|
|
extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
|
|
|
|
extent_hooks_t *extent_hooks);
|
2017-03-18 03:42:33 +08:00
|
|
|
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
2017-05-18 01:47:00 +08:00
|
|
|
ssize_t arena_dirty_decay_ms_default_get(void);
|
|
|
|
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
|
|
|
|
ssize_t arena_muzzy_decay_ms_default_get(void);
|
|
|
|
bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
|
2017-11-03 08:48:39 +08:00
|
|
|
bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
|
|
|
|
size_t *old_limit, size_t *new_limit);
|
2017-03-18 03:42:33 +08:00
|
|
|
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_inc(arena_t *arena, bool internal);
|
|
|
|
void arena_nthreads_dec(arena_t *arena, bool internal);
|
|
|
|
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
|
2018-05-22 04:33:48 +08:00
|
|
|
bool arena_init_huge(void);
|
2019-01-15 06:16:09 +08:00
|
|
|
bool arena_is_huge(unsigned arena_ind);
|
2018-05-22 04:33:48 +08:00
|
|
|
arena_t *arena_choose_huge(tsd_t *tsd);
|
2020-04-23 08:22:43 +08:00
|
|
|
bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
2018-11-13 07:56:04 +08:00
|
|
|
unsigned *binshard);
|
2020-04-23 09:13:06 +08:00
|
|
|
size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
2020-07-23 08:01:44 +08:00
|
|
|
void **ptrs, size_t nfill, bool zero);
|
2018-07-20 08:08:10 +08:00
|
|
|
void arena_boot(sc_data_t *sc_data);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
|
2017-06-30 07:01:35 +08:00
|
|
|
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
|
2020-10-17 04:14:59 +08:00
|
|
|
void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
|
2017-03-18 03:42:33 +08:00
|
|
|
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
|
|
|
|
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
|