2010-01-28 05:10:55 +08:00
|
|
|
#define JEMALLOC_CTL_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2010-11-25 07:44:21 +08:00
|
|
|
/*
|
|
|
|
* ctl_mtx protects the following:
|
|
|
|
* - ctl_stats.*
|
|
|
|
*/
|
2010-01-28 05:10:55 +08:00
|
|
|
static malloc_mutex_t ctl_mtx;
|
|
|
|
static bool ctl_initialized;
|
|
|
|
static uint64_t ctl_epoch;
|
|
|
|
static ctl_stats_t ctl_stats;
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Helpers for named and indexed nodes. */
|
|
|
|
|
2014-10-31 07:38:08 +08:00
|
|
|
JEMALLOC_INLINE_C const ctl_named_node_t *
|
2012-04-20 14:38:42 +08:00
|
|
|
ctl_named_node(const ctl_node_t *node)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
|
|
|
|
}
|
|
|
|
|
2014-10-31 07:38:08 +08:00
|
|
|
JEMALLOC_INLINE_C const ctl_named_node_t *
|
2016-02-25 03:00:40 +08:00
|
|
|
ctl_named_children(const ctl_named_node_t *node, size_t index)
|
2012-04-20 14:38:42 +08:00
|
|
|
{
|
|
|
|
const ctl_named_node_t *children = ctl_named_node(node->children);
|
|
|
|
|
|
|
|
return (children ? &children[index] : NULL);
|
|
|
|
}
|
|
|
|
|
2014-10-31 07:38:08 +08:00
|
|
|
JEMALLOC_INLINE_C const ctl_indexed_node_t *
|
2012-04-20 14:38:42 +08:00
|
|
|
ctl_indexed_node(const ctl_node_t *node)
|
|
|
|
{
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
|
2012-04-20 14:38:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
|
|
|
|
|
|
|
#define CTL_PROTO(n) \
|
2016-04-14 14:36:15 +08:00
|
|
|
static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
#define INDEX_PROTO(n) \
|
2016-04-14 14:36:15 +08:00
|
|
|
static const ctl_named_node_t *n##_index(tsd_t *tsd, \
|
|
|
|
const size_t *mib, size_t miblen, size_t i);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
static bool ctl_arena_init(ctl_arena_stats_t *astats);
|
|
|
|
static void ctl_arena_clear(ctl_arena_stats_t *astats);
|
2016-04-14 14:36:15 +08:00
|
|
|
static void ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats,
|
2010-03-14 12:32:56 +08:00
|
|
|
arena_t *arena);
|
|
|
|
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
|
|
|
|
ctl_arena_stats_t *astats);
|
2016-04-14 14:36:15 +08:00
|
|
|
static void ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i);
|
|
|
|
static bool ctl_grow(tsd_t *tsd);
|
|
|
|
static void ctl_refresh(tsd_t *tsd);
|
|
|
|
static bool ctl_init(tsd_t *tsd);
|
|
|
|
static int ctl_lookup(tsd_t *tsd, const char *name,
|
|
|
|
ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2010-03-03 05:01:16 +08:00
|
|
|
CTL_PROTO(version)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(epoch)
|
2012-03-27 09:54:44 +08:00
|
|
|
CTL_PROTO(thread_tcache_enabled)
|
2012-03-17 08:09:32 +08:00
|
|
|
CTL_PROTO(thread_tcache_flush)
|
2014-08-19 07:22:13 +08:00
|
|
|
CTL_PROTO(thread_prof_name)
|
|
|
|
CTL_PROTO(thread_prof_active)
|
2010-08-14 06:42:29 +08:00
|
|
|
CTL_PROTO(thread_arena)
|
2010-10-21 08:39:18 +08:00
|
|
|
CTL_PROTO(thread_allocated)
|
2010-12-04 07:55:47 +08:00
|
|
|
CTL_PROTO(thread_allocatedp)
|
2010-10-21 08:39:18 +08:00
|
|
|
CTL_PROTO(thread_deallocated)
|
2010-12-04 07:55:47 +08:00
|
|
|
CTL_PROTO(thread_deallocatedp)
|
2015-07-18 07:38:25 +08:00
|
|
|
CTL_PROTO(config_cache_oblivious)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(config_debug)
|
|
|
|
CTL_PROTO(config_fill)
|
|
|
|
CTL_PROTO(config_lazy_lock)
|
2016-02-08 06:23:22 +08:00
|
|
|
CTL_PROTO(config_malloc_conf)
|
2012-04-17 08:52:27 +08:00
|
|
|
CTL_PROTO(config_munmap)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(config_prof)
|
|
|
|
CTL_PROTO(config_prof_libgcc)
|
|
|
|
CTL_PROTO(config_prof_libunwind)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(config_stats)
|
|
|
|
CTL_PROTO(config_tcache)
|
|
|
|
CTL_PROTO(config_tls)
|
2012-04-06 04:36:17 +08:00
|
|
|
CTL_PROTO(config_utrace)
|
2012-04-06 15:35:09 +08:00
|
|
|
CTL_PROTO(config_valgrind)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(config_xmalloc)
|
|
|
|
CTL_PROTO(opt_abort)
|
2012-10-12 04:53:15 +08:00
|
|
|
CTL_PROTO(opt_dss)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_lg_chunk)
|
|
|
|
CTL_PROTO(opt_narenas)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(opt_purge)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_lg_dirty_mult)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(opt_decay_time)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_stats_print)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(opt_junk)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_zero)
|
2012-04-06 15:35:09 +08:00
|
|
|
CTL_PROTO(opt_quarantine)
|
|
|
|
CTL_PROTO(opt_redzone)
|
2012-04-06 04:36:17 +08:00
|
|
|
CTL_PROTO(opt_utrace)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(opt_xmalloc)
|
2010-03-08 07:34:14 +08:00
|
|
|
CTL_PROTO(opt_tcache)
|
2012-04-05 07:16:09 +08:00
|
|
|
CTL_PROTO(opt_lg_tcache_max)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(opt_prof)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_prof_prefix)
|
2010-04-01 09:43:24 +08:00
|
|
|
CTL_PROTO(opt_prof_active)
|
2014-10-04 14:25:30 +08:00
|
|
|
CTL_PROTO(opt_prof_thread_active_init)
|
2010-03-02 12:15:26 +08:00
|
|
|
CTL_PROTO(opt_lg_prof_sample)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(opt_lg_prof_interval)
|
2010-10-24 09:37:06 +08:00
|
|
|
CTL_PROTO(opt_prof_gdump)
|
2012-04-18 07:39:33 +08:00
|
|
|
CTL_PROTO(opt_prof_final)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(opt_prof_leak)
|
2010-10-03 06:18:50 +08:00
|
|
|
CTL_PROTO(opt_prof_accum)
|
2015-01-30 07:30:47 +08:00
|
|
|
CTL_PROTO(tcache_create)
|
|
|
|
CTL_PROTO(tcache_flush)
|
|
|
|
CTL_PROTO(tcache_destroy)
|
2016-04-14 14:36:15 +08:00
|
|
|
static void arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all);
|
2012-10-12 04:53:15 +08:00
|
|
|
CTL_PROTO(arena_i_purge)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(arena_i_decay)
|
2012-10-12 04:53:15 +08:00
|
|
|
CTL_PROTO(arena_i_dss)
|
2015-03-19 09:55:33 +08:00
|
|
|
CTL_PROTO(arena_i_lg_dirty_mult)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(arena_i_decay_time)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
CTL_PROTO(arena_i_chunk_hooks)
|
2012-10-12 04:53:15 +08:00
|
|
|
INDEX_PROTO(arena_i)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(arenas_bin_i_size)
|
|
|
|
CTL_PROTO(arenas_bin_i_nregs)
|
|
|
|
CTL_PROTO(arenas_bin_i_run_size)
|
|
|
|
INDEX_PROTO(arenas_bin_i)
|
|
|
|
CTL_PROTO(arenas_lrun_i_size)
|
|
|
|
INDEX_PROTO(arenas_lrun_i)
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_PROTO(arenas_hchunk_i_size)
|
|
|
|
INDEX_PROTO(arenas_hchunk_i)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(arenas_narenas)
|
|
|
|
CTL_PROTO(arenas_initialized)
|
2015-03-19 09:55:33 +08:00
|
|
|
CTL_PROTO(arenas_lg_dirty_mult)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(arenas_decay_time)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(arenas_quantum)
|
2012-04-02 22:04:34 +08:00
|
|
|
CTL_PROTO(arenas_page)
|
2010-03-18 07:27:39 +08:00
|
|
|
CTL_PROTO(arenas_tcache_max)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(arenas_nbins)
|
2010-03-18 07:27:39 +08:00
|
|
|
CTL_PROTO(arenas_nhbins)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(arenas_nlruns)
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_PROTO(arenas_nhchunks)
|
2012-10-12 04:53:15 +08:00
|
|
|
CTL_PROTO(arenas_extend)
|
2014-10-04 14:25:30 +08:00
|
|
|
CTL_PROTO(prof_thread_active_init)
|
2010-04-01 09:43:24 +08:00
|
|
|
CTL_PROTO(prof_active)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(prof_dump)
|
2015-01-26 13:16:57 +08:00
|
|
|
CTL_PROTO(prof_gdump)
|
2014-08-19 07:22:13 +08:00
|
|
|
CTL_PROTO(prof_reset)
|
2010-02-12 05:19:21 +08:00
|
|
|
CTL_PROTO(prof_interval)
|
2014-08-19 07:22:13 +08:00
|
|
|
CTL_PROTO(lg_prof_sample)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_small_allocated)
|
|
|
|
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
2010-03-14 12:32:56 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_small_nrequests)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_large_allocated)
|
|
|
|
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
2010-03-18 07:27:39 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_large_nrequests)
|
2014-05-16 13:22:27 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_huge_allocated)
|
|
|
|
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_huge_nrequests)
|
2010-03-14 12:32:56 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_curregs)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nfills)
|
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
|
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nruns)
|
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
|
|
|
|
CTL_PROTO(stats_arenas_i_bins_j_curruns)
|
|
|
|
INDEX_PROTO(stats_arenas_i_bins_j)
|
2010-03-18 07:27:39 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
|
|
|
|
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
|
|
|
|
INDEX_PROTO(stats_arenas_i_lruns_j)
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
|
|
|
|
CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
|
|
|
|
CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
|
|
|
|
INDEX_PROTO(stats_arenas_i_hchunks_j)
|
2011-03-19 04:41:33 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_nthreads)
|
2012-10-12 04:53:15 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_dss)
|
2015-03-25 07:36:12 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_decay_time)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_pactive)
|
|
|
|
CTL_PROTO(stats_arenas_i_pdirty)
|
|
|
|
CTL_PROTO(stats_arenas_i_mapped)
|
|
|
|
CTL_PROTO(stats_arenas_i_npurge)
|
|
|
|
CTL_PROTO(stats_arenas_i_nmadvise)
|
|
|
|
CTL_PROTO(stats_arenas_i_purged)
|
2014-11-28 03:22:36 +08:00
|
|
|
CTL_PROTO(stats_arenas_i_metadata_mapped)
|
|
|
|
CTL_PROTO(stats_arenas_i_metadata_allocated)
|
2010-01-28 05:10:55 +08:00
|
|
|
INDEX_PROTO(stats_arenas_i)
|
2011-03-19 08:56:14 +08:00
|
|
|
CTL_PROTO(stats_cactive)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_allocated)
|
|
|
|
CTL_PROTO(stats_active)
|
2014-11-28 03:22:36 +08:00
|
|
|
CTL_PROTO(stats_metadata)
|
2015-03-24 08:25:57 +08:00
|
|
|
CTL_PROTO(stats_resident)
|
2010-01-28 05:10:55 +08:00
|
|
|
CTL_PROTO(stats_mapped)
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* mallctl tree. */
|
|
|
|
|
|
|
|
/* Maximum tree depth. */
|
|
|
|
#define CTL_MAX_DEPTH 6
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
#define NAME(n) {true}, n
|
2012-04-24 10:31:45 +08:00
|
|
|
#define CHILD(t, c) \
|
|
|
|
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
|
|
|
|
(ctl_node_t *)c##_node, \
|
|
|
|
NULL
|
2012-04-20 14:38:42 +08:00
|
|
|
#define CTL(c) 0, NULL, c##_ctl
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only handles internal indexed nodes, since there are currently no external
|
|
|
|
* ones.
|
|
|
|
*/
|
2012-04-20 14:38:42 +08:00
|
|
|
#define INDEX(i) {false}, i##_index
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
static const ctl_named_node_t thread_tcache_node[] = {
|
2012-03-27 09:54:44 +08:00
|
|
|
{NAME("enabled"), CTL(thread_tcache_enabled)},
|
2012-03-17 08:09:32 +08:00
|
|
|
{NAME("flush"), CTL(thread_tcache_flush)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
static const ctl_named_node_t thread_prof_node[] = {
|
|
|
|
{NAME("name"), CTL(thread_prof_name)},
|
|
|
|
{NAME("active"), CTL(thread_prof_active)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t thread_node[] = {
|
2012-02-11 12:22:09 +08:00
|
|
|
{NAME("arena"), CTL(thread_arena)},
|
2010-10-21 08:39:18 +08:00
|
|
|
{NAME("allocated"), CTL(thread_allocated)},
|
2010-12-04 07:55:47 +08:00
|
|
|
{NAME("allocatedp"), CTL(thread_allocatedp)},
|
|
|
|
{NAME("deallocated"), CTL(thread_deallocated)},
|
2012-03-17 08:09:32 +08:00
|
|
|
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
|
2014-08-19 07:22:13 +08:00
|
|
|
{NAME("tcache"), CHILD(named, thread_tcache)},
|
|
|
|
{NAME("prof"), CHILD(named, thread_prof)}
|
2010-08-14 06:42:29 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t config_node[] = {
|
2015-07-18 07:38:25 +08:00
|
|
|
{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("debug"), CTL(config_debug)},
|
|
|
|
{NAME("fill"), CTL(config_fill)},
|
|
|
|
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
2016-02-08 06:23:22 +08:00
|
|
|
{NAME("malloc_conf"), CTL(config_malloc_conf)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("munmap"), CTL(config_munmap)},
|
|
|
|
{NAME("prof"), CTL(config_prof)},
|
|
|
|
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
|
|
|
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
|
|
|
|
{NAME("stats"), CTL(config_stats)},
|
|
|
|
{NAME("tcache"), CTL(config_tcache)},
|
|
|
|
{NAME("tls"), CTL(config_tls)},
|
|
|
|
{NAME("utrace"), CTL(config_utrace)},
|
|
|
|
{NAME("valgrind"), CTL(config_valgrind)},
|
|
|
|
{NAME("xmalloc"), CTL(config_xmalloc)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t opt_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("abort"), CTL(opt_abort)},
|
|
|
|
{NAME("dss"), CTL(opt_dss)},
|
|
|
|
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
|
|
|
|
{NAME("narenas"), CTL(opt_narenas)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("purge"), CTL(opt_purge)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("decay_time"), CTL(opt_decay_time)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("stats_print"), CTL(opt_stats_print)},
|
|
|
|
{NAME("junk"), CTL(opt_junk)},
|
|
|
|
{NAME("zero"), CTL(opt_zero)},
|
|
|
|
{NAME("quarantine"), CTL(opt_quarantine)},
|
|
|
|
{NAME("redzone"), CTL(opt_redzone)},
|
|
|
|
{NAME("utrace"), CTL(opt_utrace)},
|
|
|
|
{NAME("xmalloc"), CTL(opt_xmalloc)},
|
|
|
|
{NAME("tcache"), CTL(opt_tcache)},
|
|
|
|
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
|
|
|
|
{NAME("prof"), CTL(opt_prof)},
|
|
|
|
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
|
|
|
|
{NAME("prof_active"), CTL(opt_prof_active)},
|
2014-10-04 14:25:30 +08:00
|
|
|
{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
|
|
|
|
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
|
|
|
|
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
|
|
|
|
{NAME("prof_final"), CTL(opt_prof_final)},
|
|
|
|
{NAME("prof_leak"), CTL(opt_prof_leak)},
|
|
|
|
{NAME("prof_accum"), CTL(opt_prof_accum)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
static const ctl_named_node_t tcache_node[] = {
|
|
|
|
{NAME("create"), CTL(tcache_create)},
|
|
|
|
{NAME("flush"), CTL(tcache_flush)},
|
|
|
|
{NAME("destroy"), CTL(tcache_destroy)}
|
|
|
|
};
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static const ctl_named_node_t arena_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("purge"), CTL(arena_i_purge)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("decay"), CTL(arena_i_decay)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("dss"), CTL(arena_i_dss)},
|
2015-03-19 09:55:33 +08:00
|
|
|
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("decay_time"), CTL(arena_i_decay_time)},
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
|
2012-10-12 04:53:15 +08:00
|
|
|
};
|
|
|
|
static const ctl_named_node_t super_arena_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, arena_i)}
|
2012-10-12 04:53:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const ctl_indexed_node_t arena_node[] = {
|
|
|
|
{INDEX(arena_i)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t arenas_bin_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("size"), CTL(arenas_bin_i_size)},
|
|
|
|
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
|
|
|
|
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_arenas_bin_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, arenas_bin_i)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_indexed_node_t arenas_bin_node[] = {
|
2010-01-28 05:10:55 +08:00
|
|
|
{INDEX(arenas_bin_i)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t arenas_lrun_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("size"), CTL(arenas_lrun_i_size)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, arenas_lrun_i)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_indexed_node_t arenas_lrun_node[] = {
|
2010-01-28 05:10:55 +08:00
|
|
|
{INDEX(arenas_lrun_i)}
|
|
|
|
};
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
static const ctl_named_node_t arenas_hchunk_i_node[] = {
|
|
|
|
{NAME("size"), CTL(arenas_hchunk_i_size)}
|
|
|
|
};
|
|
|
|
static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
|
|
|
|
{NAME(""), CHILD(named, arenas_hchunk_i)}
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ctl_indexed_node_t arenas_hchunk_node[] = {
|
|
|
|
{INDEX(arenas_hchunk_i)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t arenas_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("narenas"), CTL(arenas_narenas)},
|
|
|
|
{NAME("initialized"), CTL(arenas_initialized)},
|
2015-03-19 09:55:33 +08:00
|
|
|
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("decay_time"), CTL(arenas_decay_time)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("quantum"), CTL(arenas_quantum)},
|
|
|
|
{NAME("page"), CTL(arenas_page)},
|
|
|
|
{NAME("tcache_max"), CTL(arenas_tcache_max)},
|
|
|
|
{NAME("nbins"), CTL(arenas_nbins)},
|
|
|
|
{NAME("nhbins"), CTL(arenas_nhbins)},
|
|
|
|
{NAME("bin"), CHILD(indexed, arenas_bin)},
|
|
|
|
{NAME("nlruns"), CTL(arenas_nlruns)},
|
|
|
|
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
|
|
|
|
{NAME("nhchunks"), CTL(arenas_nhchunks)},
|
|
|
|
{NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
|
|
|
|
{NAME("extend"), CTL(arenas_extend)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t prof_node[] = {
|
2014-10-04 14:25:30 +08:00
|
|
|
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
|
2010-04-01 09:43:24 +08:00
|
|
|
{NAME("active"), CTL(prof_active)},
|
2010-02-12 05:19:21 +08:00
|
|
|
{NAME("dump"), CTL(prof_dump)},
|
2015-01-26 13:16:57 +08:00
|
|
|
{NAME("gdump"), CTL(prof_gdump)},
|
2014-08-19 07:22:13 +08:00
|
|
|
{NAME("reset"), CTL(prof_reset)},
|
|
|
|
{NAME("interval"), CTL(prof_interval)},
|
|
|
|
{NAME("lg_sample"), CTL(lg_prof_sample)}
|
2010-02-12 05:19:21 +08:00
|
|
|
};
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
|
|
|
|
{NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
|
|
|
|
{NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
|
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_large_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
|
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
|
2014-05-16 13:22:27 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
|
|
|
|
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
|
|
|
|
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
|
|
|
|
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
|
|
|
|
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
|
|
|
|
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
|
|
|
|
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
|
2010-01-28 05:10:55 +08:00
|
|
|
{INDEX(stats_arenas_i_bins_j)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
|
|
|
|
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
|
2010-01-28 05:10:55 +08:00
|
|
|
{INDEX(stats_arenas_i_lruns_j)}
|
|
|
|
};
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
|
|
|
|
{NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
|
|
|
|
{NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
|
|
|
|
{NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
|
|
|
|
{NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
|
|
|
|
};
|
|
|
|
static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
|
|
|
|
{NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
|
|
|
|
{INDEX(stats_arenas_i_hchunks_j)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_arenas_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
|
|
|
|
{NAME("dss"), CTL(stats_arenas_i_dss)},
|
2015-03-25 07:36:12 +08:00
|
|
|
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
|
2016-02-20 12:09:31 +08:00
|
|
|
{NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
|
|
|
|
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
|
|
|
|
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
|
|
|
|
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
|
|
|
|
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
|
|
|
|
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
2014-11-28 03:22:36 +08:00
|
|
|
{NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
|
|
|
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
|
|
|
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
|
|
|
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
|
|
|
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
|
|
|
|
{NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_stats_arenas_i_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME(""), CHILD(named, stats_arenas_i)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_indexed_node_t stats_arenas_node[] = {
|
2010-01-28 05:10:55 +08:00
|
|
|
{INDEX(stats_arenas_i)}
|
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t stats_node[] = {
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("cactive"), CTL(stats_cactive)},
|
|
|
|
{NAME("allocated"), CTL(stats_allocated)},
|
|
|
|
{NAME("active"), CTL(stats_active)},
|
2014-11-28 03:22:36 +08:00
|
|
|
{NAME("metadata"), CTL(stats_metadata)},
|
2015-03-24 08:25:57 +08:00
|
|
|
{NAME("resident"), CTL(stats_resident)},
|
2014-10-13 13:53:59 +08:00
|
|
|
{NAME("mapped"), CTL(stats_mapped)},
|
|
|
|
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t root_node[] = {
|
2010-03-03 05:01:16 +08:00
|
|
|
{NAME("version"), CTL(version)},
|
2010-01-28 05:10:55 +08:00
|
|
|
{NAME("epoch"), CTL(epoch)},
|
2012-04-24 10:31:45 +08:00
|
|
|
{NAME("thread"), CHILD(named, thread)},
|
|
|
|
{NAME("config"), CHILD(named, config)},
|
|
|
|
{NAME("opt"), CHILD(named, opt)},
|
2015-01-30 07:30:47 +08:00
|
|
|
{NAME("tcache"), CHILD(named, tcache)},
|
2012-10-12 04:53:15 +08:00
|
|
|
{NAME("arena"), CHILD(indexed, arena)},
|
2012-04-24 10:31:45 +08:00
|
|
|
{NAME("arenas"), CHILD(named, arenas)},
|
|
|
|
{NAME("prof"), CHILD(named, prof)},
|
|
|
|
{NAME("stats"), CHILD(named, stats)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
2012-04-20 14:38:42 +08:00
|
|
|
static const ctl_named_node_t super_root_node[] = {
|
2012-04-24 10:31:45 +08:00
|
|
|
{NAME(""), CHILD(named, root)}
|
2010-01-28 05:10:55 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#undef NAME
|
|
|
|
#undef CHILD
|
|
|
|
#undef CTL
|
|
|
|
#undef INDEX
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
static bool
|
|
|
|
ctl_arena_init(ctl_arena_stats_t *astats)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (astats->lstats == NULL) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
|
2014-11-28 03:22:36 +08:00
|
|
|
sizeof(malloc_large_stats_t));
|
2010-01-28 05:10:55 +08:00
|
|
|
if (astats->lstats == NULL)
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
if (astats->hstats == NULL) {
|
|
|
|
astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
|
2014-11-28 03:22:36 +08:00
|
|
|
sizeof(malloc_huge_stats_t));
|
2014-10-13 13:53:59 +08:00
|
|
|
if (astats->hstats == NULL)
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ctl_arena_clear(ctl_arena_stats_t *astats)
|
|
|
|
{
|
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
astats->nthreads = 0;
|
2012-10-12 04:53:15 +08:00
|
|
|
astats->dss = dss_prec_names[dss_prec_limit];
|
2015-03-25 07:36:12 +08:00
|
|
|
astats->lg_dirty_mult = -1;
|
2016-02-20 12:09:31 +08:00
|
|
|
astats->decay_time = -1;
|
2010-01-28 05:10:55 +08:00
|
|
|
astats->pactive = 0;
|
|
|
|
astats->pdirty = 0;
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
memset(&astats->astats, 0, sizeof(arena_stats_t));
|
|
|
|
astats->allocated_small = 0;
|
|
|
|
astats->nmalloc_small = 0;
|
|
|
|
astats->ndalloc_small = 0;
|
|
|
|
astats->nrequests_small = 0;
|
2012-02-29 08:50:47 +08:00
|
|
|
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
|
2012-02-11 12:22:09 +08:00
|
|
|
memset(astats->lstats, 0, nlclasses *
|
|
|
|
sizeof(malloc_large_stats_t));
|
2014-10-13 13:53:59 +08:00
|
|
|
memset(astats->hstats, 0, nhclasses *
|
|
|
|
sizeof(malloc_huge_stats_t));
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2010-03-14 12:32:56 +08:00
|
|
|
static void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, arena_t *arena)
|
2010-03-14 12:32:56 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
if (config_stats) {
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_stats_merge(tsd, arena, &cstats->nthreads, &cstats->dss,
|
2016-02-28 12:40:13 +08:00
|
|
|
&cstats->lg_dirty_mult, &cstats->decay_time,
|
|
|
|
&cstats->pactive, &cstats->pdirty, &cstats->astats,
|
|
|
|
cstats->bstats, cstats->lstats, cstats->hstats);
|
|
|
|
|
|
|
|
for (i = 0; i < NBINS; i++) {
|
|
|
|
cstats->allocated_small += cstats->bstats[i].curregs *
|
|
|
|
index2size(i);
|
|
|
|
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
|
|
|
|
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
|
|
|
|
cstats->nrequests_small += cstats->bstats[i].nrequests;
|
|
|
|
}
|
|
|
|
} else {
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_basic_stats_merge(tsd, arena, &cstats->nthreads,
|
|
|
|
&cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
|
2016-02-28 12:40:13 +08:00
|
|
|
&cstats->pactive, &cstats->pdirty);
|
2010-03-14 12:32:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
sstats->nthreads += astats->nthreads;
|
2010-03-14 12:32:56 +08:00
|
|
|
sstats->pactive += astats->pactive;
|
|
|
|
sstats->pdirty += astats->pdirty;
|
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
if (config_stats) {
|
|
|
|
sstats->astats.mapped += astats->astats.mapped;
|
|
|
|
sstats->astats.npurge += astats->astats.npurge;
|
|
|
|
sstats->astats.nmadvise += astats->astats.nmadvise;
|
|
|
|
sstats->astats.purged += astats->astats.purged;
|
|
|
|
|
|
|
|
sstats->astats.metadata_mapped +=
|
|
|
|
astats->astats.metadata_mapped;
|
|
|
|
sstats->astats.metadata_allocated +=
|
|
|
|
astats->astats.metadata_allocated;
|
|
|
|
|
|
|
|
sstats->allocated_small += astats->allocated_small;
|
|
|
|
sstats->nmalloc_small += astats->nmalloc_small;
|
|
|
|
sstats->ndalloc_small += astats->ndalloc_small;
|
|
|
|
sstats->nrequests_small += astats->nrequests_small;
|
|
|
|
|
|
|
|
sstats->astats.allocated_large +=
|
|
|
|
astats->astats.allocated_large;
|
|
|
|
sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
|
|
|
|
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
|
|
|
sstats->astats.nrequests_large +=
|
|
|
|
astats->astats.nrequests_large;
|
|
|
|
|
|
|
|
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
|
|
|
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
|
|
|
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
|
|
|
|
|
|
|
for (i = 0; i < NBINS; i++) {
|
|
|
|
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
|
|
|
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
|
|
|
|
sstats->bstats[i].nrequests +=
|
|
|
|
astats->bstats[i].nrequests;
|
|
|
|
sstats->bstats[i].curregs += astats->bstats[i].curregs;
|
|
|
|
if (config_tcache) {
|
|
|
|
sstats->bstats[i].nfills +=
|
|
|
|
astats->bstats[i].nfills;
|
|
|
|
sstats->bstats[i].nflushes +=
|
|
|
|
astats->bstats[i].nflushes;
|
|
|
|
}
|
|
|
|
sstats->bstats[i].nruns += astats->bstats[i].nruns;
|
|
|
|
sstats->bstats[i].reruns += astats->bstats[i].reruns;
|
|
|
|
sstats->bstats[i].curruns += astats->bstats[i].curruns;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
for (i = 0; i < nlclasses; i++) {
|
|
|
|
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
|
|
|
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
|
|
|
sstats->lstats[i].nrequests +=
|
|
|
|
astats->lstats[i].nrequests;
|
|
|
|
sstats->lstats[i].curruns += astats->lstats[i].curruns;
|
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-02-28 12:40:13 +08:00
|
|
|
for (i = 0; i < nhclasses; i++) {
|
|
|
|
sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
|
|
|
|
sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
|
|
|
|
sstats->hstats[i].curhchunks +=
|
|
|
|
astats->hstats[i].curhchunks;
|
|
|
|
}
|
2014-10-13 13:53:59 +08:00
|
|
|
}
|
2010-03-14 12:32:56 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
ctl_arena_clear(astats);
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_arena_stats_amerge(tsd, astats, arena);
|
2016-02-28 12:40:13 +08:00
|
|
|
/* Merge into sum stats as well. */
|
|
|
|
ctl_arena_stats_smerge(sstats, astats);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static bool
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_grow(tsd_t *tsd)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
ctl_arena_stats_t *astats;
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Initialize new arena. */
|
2016-04-14 14:36:15 +08:00
|
|
|
if (arena_init(tsd, ctl_stats.narenas) == NULL)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (true);
|
2014-09-23 12:09:23 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Allocate extended arena stats. */
|
|
|
|
astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
|
2014-11-28 03:22:36 +08:00
|
|
|
sizeof(ctl_arena_stats_t));
|
2013-10-21 05:09:54 +08:00
|
|
|
if (astats == NULL)
|
|
|
|
return (true);
|
|
|
|
|
|
|
|
/* Initialize the new astats element. */
|
|
|
|
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
|
|
|
|
sizeof(ctl_arena_stats_t));
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
2013-10-21 05:09:54 +08:00
|
|
|
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(astats);
|
2012-10-12 04:53:15 +08:00
|
|
|
return (true);
|
2013-10-21 05:09:54 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
/* Swap merged stats to their new location. */
|
|
|
|
{
|
|
|
|
ctl_arena_stats_t tstats;
|
|
|
|
memcpy(&tstats, &astats[ctl_stats.narenas],
|
|
|
|
sizeof(ctl_arena_stats_t));
|
|
|
|
memcpy(&astats[ctl_stats.narenas],
|
|
|
|
&astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
|
|
|
|
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
|
|
|
|
sizeof(ctl_arena_stats_t));
|
|
|
|
}
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(ctl_stats.arenas);
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_stats.arenas = astats;
|
|
|
|
ctl_stats.narenas++;
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_refresh(tsd_t *tsd)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
2012-10-12 04:53:15 +08:00
|
|
|
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/*
|
2010-01-31 19:57:29 +08:00
|
|
|
* Clear sum stats, since they will be merged into by
|
2010-01-28 05:10:55 +08:00
|
|
|
* ctl_arena_refresh().
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0; i < ctl_stats.narenas; i++)
|
2016-04-14 14:36:15 +08:00
|
|
|
tarenas[i] = arena_get(tsd, i, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0; i < ctl_stats.narenas; i++) {
|
2010-01-28 05:10:55 +08:00
|
|
|
bool initialized = (tarenas[i] != NULL);
|
|
|
|
|
|
|
|
ctl_stats.arenas[i].initialized = initialized;
|
|
|
|
if (initialized)
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_arena_refresh(tsd, tarenas[i], i);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
2015-03-24 08:25:57 +08:00
|
|
|
size_t base_allocated, base_resident, base_mapped;
|
2016-04-14 14:36:15 +08:00
|
|
|
base_stats_get(tsd, &base_allocated, &base_resident,
|
|
|
|
&base_mapped);
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_stats.allocated =
|
2015-03-24 08:25:57 +08:00
|
|
|
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_stats.active =
|
2014-05-16 13:22:27 +08:00
|
|
|
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
2015-03-24 08:25:57 +08:00
|
|
|
ctl_stats.metadata = base_allocated +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats
|
2014-11-28 03:22:36 +08:00
|
|
|
.metadata_allocated;
|
2015-03-24 08:25:57 +08:00
|
|
|
ctl_stats.resident = base_resident +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
|
|
|
|
((ctl_stats.arenas[ctl_stats.narenas].pactive +
|
|
|
|
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
|
|
|
|
ctl_stats.mapped = base_mapped +
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
ctl_epoch++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_init(tsd_t *tsd)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
2010-11-25 07:44:21 +08:00
|
|
|
bool ret;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!ctl_initialized) {
|
2010-01-28 05:10:55 +08:00
|
|
|
/*
|
|
|
|
* Allocate space for one extra arena stats element, which
|
|
|
|
* contains summed stats across all arenas.
|
|
|
|
*/
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
ctl_stats.narenas = narenas_total_get();
|
|
|
|
ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
|
2014-11-28 03:22:36 +08:00
|
|
|
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
|
2010-11-25 07:44:21 +08:00
|
|
|
if (ctl_stats.arenas == NULL) {
|
|
|
|
ret = true;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-11-25 07:44:21 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
|
2010-01-28 05:10:55 +08:00
|
|
|
sizeof(ctl_arena_stats_t));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize all stats structures, regardless of whether they
|
|
|
|
* ever get used. Lazy initialization would allow errors to
|
|
|
|
* cause inconsistent state to be viewable by the application.
|
|
|
|
*/
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
unsigned i;
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0; i <= ctl_stats.narenas; i++) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (ctl_arena_init(&ctl_stats.arenas[i])) {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
unsigned j;
|
|
|
|
for (j = 0; j < i; j++) {
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
ctl_stats.arenas[j].lstats);
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(
|
2014-10-13 13:53:59 +08:00
|
|
|
ctl_stats.arenas[j].hstats);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(ctl_stats.arenas);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
ctl_stats.arenas = NULL;
|
2012-02-11 12:22:09 +08:00
|
|
|
ret = true;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-11-25 07:44:21 +08:00
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
ctl_epoch = 0;
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_refresh(tsd);
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_initialized = true;
|
|
|
|
}
|
|
|
|
|
2010-11-25 07:44:21 +08:00
|
|
|
ret = false;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-11-25 07:44:21 +08:00
|
|
|
return (ret);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_lookup(tsd_t *tsd, const char *name, ctl_node_t const **nodesp,
|
|
|
|
size_t *mibp, size_t *depthp)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *elm, *tdot, *dot;
|
|
|
|
size_t elen, i, j;
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_named_node_t *node;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
elm = name;
|
|
|
|
/* Equivalent to strchrnul(). */
|
|
|
|
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
|
|
|
|
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
|
|
|
|
if (elen == 0) {
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
node = super_root_node;
|
|
|
|
for (i = 0; i < *depthp; i++) {
|
2012-04-20 14:38:42 +08:00
|
|
|
assert(node);
|
|
|
|
assert(node->nchildren > 0);
|
|
|
|
if (ctl_named_node(node->children) != NULL) {
|
|
|
|
const ctl_named_node_t *pnode = node;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/* Children are named. */
|
2012-04-20 14:38:42 +08:00
|
|
|
for (j = 0; j < node->nchildren; j++) {
|
|
|
|
const ctl_named_node_t *child =
|
|
|
|
ctl_named_children(node, j);
|
|
|
|
if (strlen(child->name) == elen &&
|
|
|
|
strncmp(elm, child->name, elen) == 0) {
|
2010-01-28 05:10:55 +08:00
|
|
|
node = child;
|
|
|
|
if (nodesp != NULL)
|
2012-04-20 14:38:42 +08:00
|
|
|
nodesp[i] =
|
|
|
|
(const ctl_node_t *)node;
|
2010-01-28 05:10:55 +08:00
|
|
|
mibp[i] = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (node == pnode) {
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
} else {
|
2012-02-03 14:04:57 +08:00
|
|
|
uintmax_t index;
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_indexed_node_t *inode;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/* Children are indexed. */
|
2012-02-03 14:04:57 +08:00
|
|
|
index = malloc_strtoumax(elm, NULL, 10);
|
|
|
|
if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
inode = ctl_indexed_node(node->children);
|
2016-04-14 14:36:15 +08:00
|
|
|
node = inode->index(tsd, mibp, *depthp, (size_t)index);
|
2010-01-28 05:10:55 +08:00
|
|
|
if (node == NULL) {
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nodesp != NULL)
|
2012-04-20 14:38:42 +08:00
|
|
|
nodesp[i] = (const ctl_node_t *)node;
|
2010-01-28 05:10:55 +08:00
|
|
|
mibp[i] = (size_t)index;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (node->ctl != NULL) {
|
|
|
|
/* Terminal node. */
|
|
|
|
if (*dot != '\0') {
|
|
|
|
/*
|
|
|
|
* The name contains more elements than are
|
|
|
|
* in this path through the tree.
|
|
|
|
*/
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
/* Complete lookup successful. */
|
|
|
|
*depthp = i + 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update elm. */
|
|
|
|
if (*dot == '\0') {
|
|
|
|
/* No more elements. */
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
elm = &dot[1];
|
|
|
|
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
|
|
|
|
strchr(elm, '\0');
|
|
|
|
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
|
|
|
void *newp, size_t newlen)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
size_t depth;
|
|
|
|
ctl_node_t const *nodes[CTL_MAX_DEPTH];
|
|
|
|
size_t mib[CTL_MAX_DEPTH];
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_named_node_t *node;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (!ctl_initialized && ctl_init(tsd)) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = EAGAIN;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
depth = CTL_MAX_DEPTH;
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_lookup(tsd, name, nodes, mib, &depth);
|
2010-01-28 05:10:55 +08:00
|
|
|
if (ret != 0)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-04-20 14:38:42 +08:00
|
|
|
node = ctl_named_node(nodes[depth-1]);
|
|
|
|
if (node != NULL && node->ctl)
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
|
2012-04-20 14:38:42 +08:00
|
|
|
else {
|
2010-01-28 05:10:55 +08:00
|
|
|
/* The name refers to a partial path through the ctl tree. */
|
|
|
|
ret = ENOENT;
|
|
|
|
}
|
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-01-28 05:10:55 +08:00
|
|
|
return(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (!ctl_initialized && ctl_init(tsd)) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = EAGAIN;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_lookup(tsd, name, NULL, mibp, miblenp);
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-01-28 05:10:55 +08:00
|
|
|
return(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_named_node_t *node;
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t i;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (!ctl_initialized && ctl_init(tsd)) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = EAGAIN;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterate down the tree. */
|
|
|
|
node = super_root_node;
|
|
|
|
for (i = 0; i < miblen; i++) {
|
2012-04-20 14:38:42 +08:00
|
|
|
assert(node);
|
|
|
|
assert(node->nchildren > 0);
|
|
|
|
if (ctl_named_node(node->children) != NULL) {
|
2010-01-28 05:10:55 +08:00
|
|
|
/* Children are named. */
|
2016-02-25 03:00:40 +08:00
|
|
|
if (node->nchildren <= (unsigned)mib[i]) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
2012-04-20 14:38:42 +08:00
|
|
|
node = ctl_named_children(node, mib[i]);
|
2010-01-28 05:10:55 +08:00
|
|
|
} else {
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_indexed_node_t *inode;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
/* Indexed element. */
|
2012-04-20 14:38:42 +08:00
|
|
|
inode = ctl_indexed_node(node->children);
|
2016-04-14 14:36:15 +08:00
|
|
|
node = inode->index(tsd, mib, miblen, mib[i]);
|
2010-01-28 05:10:55 +08:00
|
|
|
if (node == NULL) {
|
|
|
|
ret = ENOENT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call the ctl function. */
|
2012-04-20 14:38:42 +08:00
|
|
|
if (node && node->ctl)
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
|
2012-04-20 14:38:42 +08:00
|
|
|
else {
|
2010-01-28 05:10:55 +08:00
|
|
|
/* Partial MIB. */
|
|
|
|
ret = ENOENT;
|
|
|
|
}
|
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-01-28 05:10:55 +08:00
|
|
|
return(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ctl_boot(void)
|
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
|
2010-01-28 05:10:55 +08:00
|
|
|
return (true);
|
|
|
|
|
|
|
|
ctl_initialized = false;
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2012-10-10 05:46:22 +08:00
|
|
|
void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_prefork(tsd_t *tsd)
|
2012-10-10 05:46:22 +08:00
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_prefork(tsd, &ctl_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_postfork_parent(tsd_t *tsd)
|
2012-10-10 05:46:22 +08:00
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_postfork_parent(tsd, &ctl_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_postfork_child(tsd_t *tsd)
|
2012-10-10 05:46:22 +08:00
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_postfork_child(tsd, &ctl_mtx);
|
2012-10-10 05:46:22 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* *_ctl() functions. */
|
|
|
|
|
|
|
|
#define READONLY() do { \
|
|
|
|
if (newp != NULL || newlen != 0) { \
|
|
|
|
ret = EPERM; \
|
2012-04-26 04:12:46 +08:00
|
|
|
goto label_return; \
|
2010-01-28 05:10:55 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2010-03-03 03:57:30 +08:00
|
|
|
#define WRITEONLY() do { \
|
2010-01-28 05:10:55 +08:00
|
|
|
if (oldp != NULL || oldlenp != NULL) { \
|
|
|
|
ret = EPERM; \
|
2012-04-26 04:12:46 +08:00
|
|
|
goto label_return; \
|
2010-01-28 05:10:55 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
#define READ_XOR_WRITE() do { \
|
|
|
|
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
|
|
|
|
newlen != 0)) { \
|
|
|
|
ret = EPERM; \
|
|
|
|
goto label_return; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
#define READ(v, t) do { \
|
|
|
|
if (oldp != NULL && oldlenp != NULL) { \
|
|
|
|
if (*oldlenp != sizeof(t)) { \
|
|
|
|
size_t copylen = (sizeof(t) <= *oldlenp) \
|
|
|
|
? sizeof(t) : *oldlenp; \
|
2012-11-30 14:13:04 +08:00
|
|
|
memcpy(oldp, (void *)&(v), copylen); \
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = EINVAL; \
|
2012-04-26 04:12:46 +08:00
|
|
|
goto label_return; \
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
} \
|
|
|
|
*(t *)oldp = (v); \
|
2010-01-28 05:10:55 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define WRITE(v, t) do { \
|
|
|
|
if (newp != NULL) { \
|
|
|
|
if (newlen != sizeof(t)) { \
|
|
|
|
ret = EINVAL; \
|
2012-04-26 04:12:46 +08:00
|
|
|
goto label_return; \
|
2010-01-28 05:10:55 +08:00
|
|
|
} \
|
2012-11-30 14:13:04 +08:00
|
|
|
(v) = *(t *)newp; \
|
2010-01-28 05:10:55 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* There's a lot of code duplication in the following macros due to limitations
|
|
|
|
* in how nested cpp macros are expanded.
|
|
|
|
*/
|
|
|
|
#define CTL_RO_CLGEN(c, l, n, v, t) \
|
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2012-02-11 12:22:09 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
t oldval; \
|
|
|
|
\
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!(c)) \
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ENOENT); \
|
|
|
|
if (l) \
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx); \
|
2012-02-11 12:22:09 +08:00
|
|
|
READONLY(); \
|
2012-11-30 14:13:04 +08:00
|
|
|
oldval = (v); \
|
2012-02-11 12:22:09 +08:00
|
|
|
READ(oldval, t); \
|
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2012-02-11 12:22:09 +08:00
|
|
|
if (l) \
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx); \
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CTL_RO_CGEN(c, n, v, t) \
|
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2012-02-11 12:22:09 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
t oldval; \
|
|
|
|
\
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!(c)) \
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ENOENT); \
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx); \
|
2012-02-11 12:22:09 +08:00
|
|
|
READONLY(); \
|
2012-11-30 14:13:04 +08:00
|
|
|
oldval = (v); \
|
2012-02-11 12:22:09 +08:00
|
|
|
READ(oldval, t); \
|
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx); \
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
#define CTL_RO_GEN(n, v, t) \
|
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2010-11-25 07:44:21 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
t oldval; \
|
|
|
|
\
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx); \
|
2010-11-25 07:44:21 +08:00
|
|
|
READONLY(); \
|
2012-11-30 14:13:04 +08:00
|
|
|
oldval = (v); \
|
2010-11-25 07:44:21 +08:00
|
|
|
READ(oldval, t); \
|
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx); \
|
2010-11-25 07:44:21 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ctl_mtx is not acquired, under the assumption that no pertinent data will
|
|
|
|
* mutate during the call.
|
|
|
|
*/
|
2012-02-11 12:22:09 +08:00
|
|
|
#define CTL_RO_NL_CGEN(c, n, v, t) \
|
2010-11-25 07:44:21 +08:00
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2010-01-28 05:10:55 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
t oldval; \
|
|
|
|
\
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!(c)) \
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ENOENT); \
|
2010-01-28 05:10:55 +08:00
|
|
|
READONLY(); \
|
2012-11-30 14:13:04 +08:00
|
|
|
oldval = (v); \
|
2010-01-28 05:10:55 +08:00
|
|
|
READ(oldval, t); \
|
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
#define CTL_RO_NL_GEN(n, v, t) \
|
2010-01-28 05:10:55 +08:00
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2010-01-28 05:10:55 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
2012-02-11 12:22:09 +08:00
|
|
|
t oldval; \
|
2010-01-28 05:10:55 +08:00
|
|
|
\
|
|
|
|
READONLY(); \
|
2012-11-30 14:13:04 +08:00
|
|
|
oldval = (v); \
|
2012-02-11 12:22:09 +08:00
|
|
|
READ(oldval, t); \
|
2010-01-28 05:10:55 +08:00
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
|
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2014-09-23 12:09:23 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
|
|
|
t oldval; \
|
|
|
|
\
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!(c)) \
|
2014-09-23 12:09:23 +08:00
|
|
|
return (ENOENT); \
|
|
|
|
READONLY(); \
|
|
|
|
oldval = (m(tsd)); \
|
|
|
|
READ(oldval, t); \
|
|
|
|
\
|
|
|
|
ret = 0; \
|
|
|
|
label_return: \
|
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
2016-02-08 06:23:22 +08:00
|
|
|
#define CTL_RO_CONFIG_GEN(n, t) \
|
2010-01-28 05:10:55 +08:00
|
|
|
static int \
|
2016-04-14 14:36:15 +08:00
|
|
|
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen) \
|
2010-01-28 05:10:55 +08:00
|
|
|
{ \
|
|
|
|
int ret; \
|
2016-02-08 06:23:22 +08:00
|
|
|
t oldval; \
|
2010-01-28 05:10:55 +08:00
|
|
|
\
|
|
|
|
READONLY(); \
|
2012-02-11 12:22:09 +08:00
|
|
|
oldval = n; \
|
2016-02-08 06:23:22 +08:00
|
|
|
READ(oldval, t); \
|
2010-01-28 05:10:55 +08:00
|
|
|
\
|
|
|
|
ret = 0; \
|
2012-04-26 04:12:46 +08:00
|
|
|
label_return: \
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret); \
|
|
|
|
}
|
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2010-11-25 07:44:21 +08:00
|
|
|
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
|
2010-03-03 05:01:16 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2013-10-20 08:19:49 +08:00
|
|
|
UNUSED uint64_t newval;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2010-01-28 05:10:55 +08:00
|
|
|
WRITE(newval, uint64_t);
|
2012-04-26 04:12:46 +08:00
|
|
|
if (newp != NULL)
|
2016-04-14 14:36:15 +08:00
|
|
|
ctl_refresh(tsd);
|
2010-01-28 05:10:55 +08:00
|
|
|
READ(ctl_epoch, uint64_t);
|
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
/******************************************************************************/
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-02-08 06:23:22 +08:00
|
|
|
CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_debug, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_fill, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
|
|
|
|
CTL_RO_CONFIG_GEN(config_munmap, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_prof, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_stats, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_tcache, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_tls, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_utrace, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_valgrind, bool)
|
|
|
|
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
/******************************************************************************/
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
|
|
|
|
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
|
|
|
|
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
|
2016-02-25 03:03:40 +08:00
|
|
|
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
2014-12-09 05:12:41 +08:00
|
|
|
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
|
|
|
|
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
2014-10-04 14:25:30 +08:00
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
|
|
|
|
opt_prof_thread_active_init, bool)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
|
|
|
|
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
/******************************************************************************/
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2010-08-14 06:42:29 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2010-08-14 06:42:29 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2015-01-30 07:30:47 +08:00
|
|
|
arena_t *oldarena;
|
2010-08-14 06:42:29 +08:00
|
|
|
unsigned newind, oldind;
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
oldarena = arena_choose(tsd, NULL);
|
|
|
|
if (oldarena == NULL)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (EAGAIN);
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2015-01-30 07:30:47 +08:00
|
|
|
newind = oldind = oldarena->ind;
|
2011-03-15 02:39:49 +08:00
|
|
|
WRITE(newind, unsigned);
|
|
|
|
READ(oldind, unsigned);
|
2010-08-14 06:42:29 +08:00
|
|
|
if (newind != oldind) {
|
2015-01-30 07:30:47 +08:00
|
|
|
arena_t *newarena;
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (newind >= ctl_stats.narenas) {
|
2010-08-14 06:42:29 +08:00
|
|
|
/* New arena index is out of range. */
|
|
|
|
ret = EFAULT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-08-14 06:42:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize arena if necessary. */
|
2016-04-14 14:36:15 +08:00
|
|
|
newarena = arena_get(tsd, newind, true);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (newarena == NULL) {
|
2010-08-14 06:42:29 +08:00
|
|
|
ret = EAGAIN;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-08-14 06:42:29 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Set new arena/tcache associations. */
|
|
|
|
arena_migrate(tsd, oldind, newind);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_tcache) {
|
2014-09-23 12:09:23 +08:00
|
|
|
tcache_t *tcache = tsd_tcache_get(tsd);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (tcache != NULL) {
|
2016-04-14 14:36:15 +08:00
|
|
|
tcache_arena_reassociate(tsd, tcache, oldarena,
|
2015-01-30 07:30:47 +08:00
|
|
|
newarena);
|
|
|
|
}
|
2010-12-30 04:21:05 +08:00
|
|
|
}
|
2010-08-14 06:42:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-08-14 06:42:29 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
|
|
|
|
uint64_t)
|
|
|
|
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
|
|
|
|
uint64_t *)
|
|
|
|
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
|
|
|
|
uint64_t)
|
|
|
|
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
|
|
|
|
tsd_thread_deallocatedp_get, uint64_t *)
|
2010-10-21 08:39:18 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
2013-12-20 13:40:41 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
bool oldval;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_tcache)
|
2013-12-20 13:40:41 +08:00
|
|
|
return (ENOENT);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
oldval = tcache_enabled_get();
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(bool)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
tcache_enabled_set(*(bool *)newp);
|
|
|
|
}
|
|
|
|
READ(oldval, bool);
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
2013-12-20 13:40:41 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_tcache)
|
2013-12-20 13:40:41 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
READONLY();
|
|
|
|
WRITEONLY();
|
|
|
|
|
|
|
|
tcache_flush();
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2014-08-19 07:22:13 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_prof)
|
2014-08-19 07:22:13 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
READ_XOR_WRITE();
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(const char *)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
|
|
|
|
0)
|
2014-08-19 07:22:13 +08:00
|
|
|
goto label_return;
|
2014-10-04 14:25:30 +08:00
|
|
|
} else {
|
2016-04-14 14:36:15 +08:00
|
|
|
const char *oldname = prof_thread_name_get(tsd);
|
2014-10-04 14:25:30 +08:00
|
|
|
READ(oldname, const char *);
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2014-08-19 07:22:13 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
bool oldval;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_prof)
|
2014-08-19 07:22:13 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_thread_active_get(tsd);
|
2014-08-19 07:22:13 +08:00
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(bool)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
if (prof_thread_active_set(tsd, *(bool *)newp)) {
|
2014-08-19 07:22:13 +08:00
|
|
|
ret = EAGAIN;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
READ(oldval, bool);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2015-01-30 07:30:47 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned tcache_ind;
|
|
|
|
|
|
|
|
if (!config_tcache)
|
|
|
|
return (ENOENT);
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2015-01-30 07:30:47 +08:00
|
|
|
READONLY();
|
|
|
|
if (tcaches_create(tsd, &tcache_ind)) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
READ(tcache_ind, unsigned);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2015-01-30 07:30:47 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2015-01-30 07:30:47 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned tcache_ind;
|
|
|
|
|
|
|
|
if (!config_tcache)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
WRITEONLY();
|
|
|
|
tcache_ind = UINT_MAX;
|
|
|
|
WRITE(tcache_ind, unsigned);
|
|
|
|
if (tcache_ind == UINT_MAX) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
tcaches_flush(tsd, tcache_ind);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned tcache_ind;
|
|
|
|
|
|
|
|
if (!config_tcache)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
WRITEONLY();
|
|
|
|
tcache_ind = UINT_MAX;
|
|
|
|
WRITE(tcache_ind, unsigned);
|
|
|
|
if (tcache_ind == UINT_MAX) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
tcaches_destroy(tsd, tcache_ind);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2012-11-04 12:18:28 +08:00
|
|
|
static void
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
unsigned narenas = ctl_stats.narenas;
|
|
|
|
|
|
|
|
if (arena_ind == narenas) {
|
|
|
|
unsigned i;
|
|
|
|
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0; i < narenas; i++)
|
2016-04-14 14:36:15 +08:00
|
|
|
tarenas[i] = arena_get(tsd, i, false);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No further need to hold ctl_mtx, since narenas and
|
|
|
|
* tarenas contain everything needed below.
|
|
|
|
*/
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (tarenas[i] != NULL)
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_purge(tsd, tarenas[i], all);
|
2016-02-20 12:09:31 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
arena_t *tarena;
|
|
|
|
|
|
|
|
assert(arena_ind < narenas);
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tarena = arena_get(tsd, arena_ind, false);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
/* No further need to hold ctl_mtx. */
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
if (tarena != NULL)
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_purge(tsd, tarena, all);
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
READONLY();
|
|
|
|
WRITEONLY();
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_purge(tsd, (unsigned)mib[1], true);
|
2016-02-20 12:09:31 +08:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2016-02-20 12:09:31 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
READONLY();
|
|
|
|
WRITEONLY();
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_purge(tsd, (unsigned)mib[1], false);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-11-04 12:18:28 +08:00
|
|
|
ret = 0;
|
2012-10-12 04:53:15 +08:00
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
2014-08-16 03:20:20 +08:00
|
|
|
int ret;
|
|
|
|
const char *dss = NULL;
|
2016-02-25 03:00:40 +08:00
|
|
|
unsigned arena_ind = (unsigned)mib[1];
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t dss_prec_old = dss_prec_limit;
|
|
|
|
dss_prec_t dss_prec = dss_prec_limit;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
WRITE(dss, const char *);
|
2014-08-16 03:20:20 +08:00
|
|
|
if (dss != NULL) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
|
|
|
|
for (i = 0; i < dss_prec_limit; i++) {
|
|
|
|
if (strcmp(dss_prec_names[i], dss) == 0) {
|
|
|
|
dss_prec = i;
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!match) {
|
2014-08-16 03:20:20 +08:00
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arena_ind < ctl_stats.narenas) {
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_t *arena = arena_get(tsd, arena_ind, false);
|
2014-08-16 03:20:20 +08:00
|
|
|
if (arena == NULL || (dss_prec != dss_prec_limit &&
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_dss_prec_set(tsd, arena, dss_prec))) {
|
2014-08-16 03:20:20 +08:00
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
dss_prec_old = arena_dss_prec_get(tsd, arena);
|
2012-10-12 04:53:15 +08:00
|
|
|
} else {
|
2014-08-16 03:20:20 +08:00
|
|
|
if (dss_prec != dss_prec_limit &&
|
2016-04-14 14:36:15 +08:00
|
|
|
chunk_dss_prec_set(tsd, dss_prec)) {
|
2014-08-16 03:20:20 +08:00
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
dss_prec_old = chunk_dss_prec_get(tsd);
|
2012-10-12 04:53:15 +08:00
|
|
|
}
|
2014-08-16 03:20:20 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss = dss_prec_names[dss_prec_old];
|
|
|
|
READ(dss, const char *);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-05-06 06:16:56 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
2014-05-06 06:16:56 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2016-02-25 03:00:40 +08:00
|
|
|
unsigned arena_ind = (unsigned)mib[1];
|
2014-05-06 06:16:56 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
arena = arena_get(tsd, arena_ind, false);
|
2015-03-19 09:55:33 +08:00
|
|
|
if (arena == NULL) {
|
2014-05-06 06:16:56 +08:00
|
|
|
ret = EFAULT;
|
2015-03-19 09:55:33 +08:00
|
|
|
goto label_return;
|
2014-05-06 06:16:56 +08:00
|
|
|
}
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
if (oldp != NULL && oldlenp != NULL) {
|
2016-04-14 14:36:15 +08:00
|
|
|
size_t oldval = arena_lg_dirty_mult_get(tsd, arena);
|
2015-03-19 09:55:33 +08:00
|
|
|
READ(oldval, ssize_t);
|
2014-05-06 06:16:56 +08:00
|
|
|
}
|
2015-03-19 09:55:33 +08:00
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(ssize_t)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
if (arena_lg_dirty_mult_set(tsd, arena, *(ssize_t *)newp)) {
|
2015-03-19 09:55:33 +08:00
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-06 06:16:56 +08:00
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
2016-02-25 03:00:40 +08:00
|
|
|
unsigned arena_ind = (unsigned)mib[1];
|
2016-02-20 12:09:31 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
arena = arena_get(tsd, arena_ind, false);
|
2016-02-20 12:09:31 +08:00
|
|
|
if (arena == NULL) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oldp != NULL && oldlenp != NULL) {
|
2016-04-14 14:36:15 +08:00
|
|
|
size_t oldval = arena_decay_time_get(tsd, arena);
|
2016-02-20 12:09:31 +08:00
|
|
|
READ(oldval, ssize_t);
|
|
|
|
}
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(ssize_t)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
if (arena_decay_time_set(tsd, arena, *(ssize_t *)newp)) {
|
2016-02-20 12:09:31 +08:00
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2016-02-25 03:00:40 +08:00
|
|
|
unsigned arena_ind = (unsigned)mib[1];
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (arena_ind < narenas_total_get() && (arena =
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_get(tsd, arena_ind, false)) != NULL) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (newp != NULL) {
|
|
|
|
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
|
|
|
|
WRITE(new_chunk_hooks, chunk_hooks_t);
|
2016-04-14 14:36:15 +08:00
|
|
|
old_chunk_hooks = chunk_hooks_set(tsd, arena,
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
&new_chunk_hooks);
|
|
|
|
READ(old_chunk_hooks, chunk_hooks_t);
|
|
|
|
} else {
|
2016-04-14 14:36:15 +08:00
|
|
|
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(tsd,
|
|
|
|
arena);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
READ(old_chunk_hooks, chunk_hooks_t);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
return (ret);
|
2015-03-19 09:55:33 +08:00
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
arena_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
const ctl_named_node_t *ret;
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
if (i > ctl_stats.narenas) {
|
|
|
|
ret = NULL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = super_arena_i_node;
|
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2012-10-12 04:53:15 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned narenas;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
READONLY();
|
|
|
|
if (*oldlenp != sizeof(unsigned)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
narenas = ctl_stats.narenas;
|
|
|
|
READ(narenas, unsigned);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned nread, i;
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2010-01-28 05:10:55 +08:00
|
|
|
READONLY();
|
2012-10-12 04:53:15 +08:00
|
|
|
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
|
2010-01-28 05:10:55 +08:00
|
|
|
ret = EINVAL;
|
2012-10-12 04:53:15 +08:00
|
|
|
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
|
2016-02-25 03:00:40 +08:00
|
|
|
? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
|
2010-01-28 05:10:55 +08:00
|
|
|
} else {
|
|
|
|
ret = 0;
|
2012-10-12 04:53:15 +08:00
|
|
|
nread = ctl_stats.narenas;
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nread; i++)
|
|
|
|
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
|
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
2015-03-19 09:55:33 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (oldp != NULL && oldlenp != NULL) {
|
|
|
|
size_t oldval = arena_lg_dirty_mult_default_get();
|
|
|
|
READ(oldval, ssize_t);
|
|
|
|
}
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(ssize_t)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (oldp != NULL && oldlenp != NULL) {
|
|
|
|
size_t oldval = arena_decay_time_default_get();
|
|
|
|
READ(oldval, ssize_t);
|
|
|
|
}
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(ssize_t)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
if (arena_decay_time_default_set(*(ssize_t *)newp)) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto label_return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-11-25 07:44:21 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
|
2012-04-02 22:04:34 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
|
2012-02-29 08:50:47 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
|
|
|
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
|
|
|
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
|
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_bin_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
|
2013-12-20 13:40:41 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (i > NBINS)
|
|
|
|
return (NULL);
|
|
|
|
return (super_arenas_bin_i_node);
|
|
|
|
}
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
|
2016-02-25 04:42:23 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
|
2013-12-20 13:40:41 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_lrun_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
|
2013-12-20 13:40:41 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (i > nlclasses)
|
|
|
|
return (NULL);
|
|
|
|
return (super_arenas_lrun_i_node);
|
|
|
|
}
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
|
2016-02-25 04:42:23 +08:00
|
|
|
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
|
|
|
|
size_t)
|
2014-10-13 13:53:59 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_hchunk_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
|
2014-10-13 13:53:59 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (i > nhclasses)
|
|
|
|
return (NULL);
|
|
|
|
return (super_arenas_hchunk_i_node);
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2012-10-12 04:53:15 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-11-30 14:13:04 +08:00
|
|
|
unsigned narenas;
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2012-10-12 04:53:15 +08:00
|
|
|
READONLY();
|
2016-04-14 14:36:15 +08:00
|
|
|
if (ctl_grow(tsd)) {
|
2012-10-12 04:53:15 +08:00
|
|
|
ret = EAGAIN;
|
|
|
|
goto label_return;
|
2010-10-01 07:55:08 +08:00
|
|
|
}
|
2012-11-30 14:13:04 +08:00
|
|
|
narenas = ctl_stats.narenas - 1;
|
|
|
|
READ(narenas, unsigned);
|
2010-10-01 07:55:08 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-10-01 07:55:08 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen)
|
2014-10-04 14:25:30 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
bool oldval;
|
|
|
|
|
|
|
|
if (!config_prof)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(bool)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_thread_active_init_set(tsd, *(bool *)newp);
|
2014-10-04 14:25:30 +08:00
|
|
|
} else
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_thread_active_init_get(tsd);
|
2014-10-04 14:25:30 +08:00
|
|
|
READ(oldval, bool);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-04-01 09:43:24 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2010-04-01 09:43:24 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
bool oldval;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_prof)
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
2010-04-01 09:43:24 +08:00
|
|
|
if (newp != NULL) {
|
2014-10-04 14:25:30 +08:00
|
|
|
if (newlen != sizeof(bool)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_active_set(tsd, *(bool *)newp);
|
2014-10-04 14:25:30 +08:00
|
|
|
} else
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_active_get(tsd);
|
2010-04-01 09:43:24 +08:00
|
|
|
READ(oldval, bool);
|
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-04-01 09:43:24 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-02-12 05:19:21 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2010-02-12 05:19:21 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2010-03-03 03:57:30 +08:00
|
|
|
const char *filename = NULL;
|
2010-02-12 05:19:21 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_prof)
|
2012-02-11 12:22:09 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
2010-03-03 03:57:30 +08:00
|
|
|
WRITEONLY();
|
|
|
|
WRITE(filename, const char *);
|
2010-02-12 05:19:21 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (prof_mdump(tsd, filename)) {
|
2010-03-03 03:57:30 +08:00
|
|
|
ret = EFAULT;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-03-03 03:57:30 +08:00
|
|
|
}
|
2010-02-12 05:19:21 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2010-02-12 05:19:21 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2015-01-26 13:16:57 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
bool oldval;
|
|
|
|
|
|
|
|
if (!config_prof)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
if (newp != NULL) {
|
|
|
|
if (newlen != sizeof(bool)) {
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_gdump_set(tsd, *(bool *)newp);
|
2015-01-26 13:16:57 +08:00
|
|
|
} else
|
2016-04-14 14:36:15 +08:00
|
|
|
oldval = prof_gdump_get(tsd);
|
2015-01-26 13:16:57 +08:00
|
|
|
READ(oldval, bool);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
static int
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
2014-08-19 07:22:13 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
size_t lg_sample = lg_prof_sample;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!config_prof)
|
2014-08-19 07:22:13 +08:00
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
WRITEONLY();
|
|
|
|
WRITE(lg_sample, size_t);
|
|
|
|
if (lg_sample >= (sizeof(uint64_t) << 3))
|
|
|
|
lg_sample = (sizeof(uint64_t) << 3) - 1;
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_reset(tsd, lg_sample);
|
2014-08-19 07:22:13 +08:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
label_return:
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
|
2014-08-19 07:22:13 +08:00
|
|
|
CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
|
2010-02-12 05:19:21 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
|
2014-11-28 03:22:36 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
|
2015-03-24 08:25:57 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
|
|
|
|
|
|
|
|
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
2015-03-25 07:36:12 +08:00
|
|
|
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
|
|
|
|
ssize_t)
|
2016-02-20 12:09:31 +08:00
|
|
|
CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
|
|
|
|
ssize_t)
|
2013-12-20 13:40:41 +08:00
|
|
|
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
|
|
|
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
|
|
|
|
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
|
2014-11-28 03:22:36 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
|
2013-12-20 13:40:41 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].allocated_small, size_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
2010-03-18 07:27:39 +08:00
|
|
|
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
2014-05-16 13:22:27 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
|
|
|
|
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
|
2014-10-13 13:53:59 +08:00
|
|
|
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
|
2010-03-14 12:32:56 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
|
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
stats_arenas_i_bins_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
size_t j)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
if (j > NBINS)
|
2010-01-28 05:10:55 +08:00
|
|
|
return (NULL);
|
|
|
|
return (super_stats_arenas_i_bins_j_node);
|
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
|
2010-03-18 07:27:39 +08:00
|
|
|
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
|
2010-03-18 07:27:39 +08:00
|
|
|
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
|
2012-02-11 12:22:09 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
|
2010-01-28 05:10:55 +08:00
|
|
|
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
stats_arenas_i_lruns_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
size_t j)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (j > nlclasses)
|
|
|
|
return (NULL);
|
|
|
|
return (super_stats_arenas_i_lruns_j_node);
|
|
|
|
}
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
|
|
|
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
|
|
|
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
|
|
|
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
|
|
|
|
uint64_t)
|
|
|
|
CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
|
|
|
|
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
|
|
|
|
|
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
stats_arenas_i_hchunks_j_index(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|
|
|
size_t j)
|
2014-10-13 13:53:59 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (j > nhclasses)
|
|
|
|
return (NULL);
|
|
|
|
return (super_stats_arenas_i_hchunks_j_node);
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
static const ctl_named_node_t *
|
2016-04-14 14:36:15 +08:00
|
|
|
stats_arenas_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
2012-04-20 14:38:42 +08:00
|
|
|
const ctl_named_node_t * ret;
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_lock(tsd, &ctl_mtx);
|
2014-10-04 01:16:09 +08:00
|
|
|
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
|
2010-11-25 07:44:21 +08:00
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-11-25 07:44:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = super_stats_arenas_i_node;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_unlock(tsd, &ctl_mtx);
|
2010-11-25 07:44:21 +08:00
|
|
|
return (ret);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|