2017-04-21 04:38:12 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_STATS_H
|
|
|
|
#define JEMALLOC_INTERNAL_STATS_H
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-11 10:04:40 +08:00
|
|
|
#include "jemalloc/internal/atomic.h"
|
2017-04-27 09:37:44 +08:00
|
|
|
#include "jemalloc/internal/mutex_prof.h"
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
2017-04-20 06:09:01 +08:00
|
|
|
#include "jemalloc/internal/size_classes.h"
|
2017-04-11 10:04:40 +08:00
|
|
|
|
2017-05-28 06:35:36 +08:00
|
|
|
/* OPTION(opt, var_name, default, set_value_to) */
|
|
|
|
#define STATS_PRINT_OPTIONS \
|
|
|
|
OPTION('J', json, false, true) \
|
|
|
|
OPTION('g', general, true, false) \
|
|
|
|
OPTION('m', merged, config_stats, false) \
|
|
|
|
OPTION('d', destroyed, config_stats, false) \
|
|
|
|
OPTION('a', unmerged, config_stats, false) \
|
|
|
|
OPTION('b', bins, true, false) \
|
|
|
|
OPTION('l', large, true, false) \
|
|
|
|
OPTION('x', mutex, true, false)
|
|
|
|
|
|
|
|
enum {
|
|
|
|
#define OPTION(o, v, d, s) stats_print_option_num_##v,
|
|
|
|
STATS_PRINT_OPTIONS
|
|
|
|
#undef OPTION
|
|
|
|
stats_print_tot_num_options
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Options for stats_print. */
|
2017-04-21 04:38:12 +08:00
|
|
|
extern bool opt_stats_print;
|
2017-05-28 06:35:36 +08:00
|
|
|
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
|
2017-04-21 04:38:12 +08:00
|
|
|
|
|
|
|
/* Implements je_malloc_stats_print. */
|
|
|
|
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
|
|
const char *opts);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In those architectures that support 64-bit atomics, we use atomic updates for
|
|
|
|
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
|
|
|
* externally.
|
|
|
|
*/
|
2017-03-09 07:56:31 +08:00
|
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
|
|
|
typedef atomic_u64_t arena_stats_u64_t;
|
|
|
|
#else
|
|
|
|
/* Must hold the arena stats mutex while reading atomically. */
|
|
|
|
typedef uint64_t arena_stats_u64_t;
|
|
|
|
#endif
|
|
|
|
|
2017-04-21 04:38:12 +08:00
|
|
|
typedef struct malloc_bin_stats_s {
|
2010-03-14 12:32:56 +08:00
|
|
|
/*
|
|
|
|
* Total number of allocation/deallocation requests served directly by
|
|
|
|
* the bin. Note that tcache may allocate an object, then recycle it
|
|
|
|
* many times, resulting many increments to nrequests, but only one
|
|
|
|
* each to nmalloc and ndalloc.
|
|
|
|
*/
|
|
|
|
uint64_t nmalloc;
|
|
|
|
uint64_t ndalloc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of allocation requests that correspond to the size of this
|
|
|
|
* bin. This includes requests served by tcache, though tcache only
|
|
|
|
* periodically merges into this counter.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
|
|
|
uint64_t nrequests;
|
|
|
|
|
2014-10-13 13:53:59 +08:00
|
|
|
/*
|
|
|
|
* Current number of regions of this size class, including regions
|
|
|
|
* currently cached by tcache.
|
|
|
|
*/
|
|
|
|
size_t curregs;
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Number of tcache fills from this bin. */
|
|
|
|
uint64_t nfills;
|
|
|
|
|
|
|
|
/* Number of tcache flushes to this bin. */
|
|
|
|
uint64_t nflushes;
|
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Total number of slabs created for this bin's size class. */
|
|
|
|
uint64_t nslabs;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/*
|
2016-05-30 09:34:50 +08:00
|
|
|
* Total number of slabs reused by extracting them from the slabs heap
|
|
|
|
* for this bin's size class.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2016-05-30 09:34:50 +08:00
|
|
|
uint64_t reslabs;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-30 09:34:50 +08:00
|
|
|
/* Current number of slabs in this bin. */
|
|
|
|
size_t curslabs;
|
2017-03-04 11:58:43 +08:00
|
|
|
|
2017-03-14 08:29:03 +08:00
|
|
|
mutex_prof_data_t mutex_data;
|
2017-04-21 04:38:12 +08:00
|
|
|
} malloc_bin_stats_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-21 04:38:12 +08:00
|
|
|
typedef struct malloc_large_stats_s {
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
2010-03-18 07:27:39 +08:00
|
|
|
* Total number of allocation/deallocation requests served directly by
|
2016-05-28 15:17:28 +08:00
|
|
|
* the arena.
|
2010-03-18 07:27:39 +08:00
|
|
|
*/
|
2017-03-09 07:56:31 +08:00
|
|
|
arena_stats_u64_t nmalloc;
|
|
|
|
arena_stats_u64_t ndalloc;
|
2010-03-18 07:27:39 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of allocation requests that correspond to this size class.
|
|
|
|
* This includes requests served by tcache, though tcache only
|
|
|
|
* periodically merges into this counter.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2017-03-09 07:56:31 +08:00
|
|
|
arena_stats_u64_t nrequests; /* Partially derived. */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-06-02 04:53:56 +08:00
|
|
|
/* Current number of allocations of this size class. */
|
2017-03-04 12:44:39 +08:00
|
|
|
size_t curlextents; /* Derived. */
|
2017-04-21 04:38:12 +08:00
|
|
|
} malloc_large_stats_t;
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2017-04-21 04:38:12 +08:00
|
|
|
typedef struct decay_stats_s {
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
/* Total number of purge sweeps. */
|
|
|
|
arena_stats_u64_t npurge;
|
|
|
|
/* Total number of madvise calls made. */
|
|
|
|
arena_stats_u64_t nmadvise;
|
|
|
|
/* Total number of pages purged. */
|
|
|
|
arena_stats_u64_t purged;
|
2017-04-21 04:38:12 +08:00
|
|
|
} decay_stats_t;
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/*
|
|
|
|
* Arena stats. Note that fields marked "derived" are not directly maintained
|
|
|
|
* within the arena code; rather their values are derived during stats merge
|
|
|
|
* requests.
|
|
|
|
*/
|
2017-04-21 04:38:12 +08:00
|
|
|
typedef struct arena_stats_s {
|
2017-02-13 09:43:33 +08:00
|
|
|
#ifndef JEMALLOC_ATOMIC_U64
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
malloc_mutex_t mtx;
|
2017-02-13 09:43:33 +08:00
|
|
|
#endif
|
|
|
|
|
2017-01-30 13:57:14 +08:00
|
|
|
/* Number of bytes currently mapped, excluding retained memory. */
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_zu_t mapped; /* Partially derived. */
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-04 13:11:35 +08:00
|
|
|
/*
|
2017-04-27 07:26:12 +08:00
|
|
|
* Number of unused virtual memory bytes currently retained. Retained
|
|
|
|
* bytes are technically mapped (though always decommitted or purged),
|
|
|
|
* but they are excluded from the mapped statistic (above).
|
2016-05-04 13:11:35 +08:00
|
|
|
*/
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_zu_t retained; /* Derived. */
|
2016-05-04 13:11:35 +08:00
|
|
|
|
Implement two-phase decay-based purging.
Split decay-based purging into two phases, the first of which uses lazy
purging to convert dirty pages to "muzzy", and the second of which uses
forced purging, decommit, or unmapping to convert pages to clean or
destroy them altogether. Not all operating systems support lazy
purging, yet the application may provide extent hooks that implement
lazy purging, so care must be taken to dynamically omit the first phase
when necessary.
The mallctl interfaces change as follows:
- opt.decay_time --> opt.{dirty,muzzy}_decay_time
- arena.<i>.decay_time --> arena.<i>.{dirty,muzzy}_decay_time
- arenas.decay_time --> arenas.{dirty,muzzy}_decay_time
- stats.arenas.<i>.pdirty --> stats.arenas.<i>.p{dirty,muzzy}
- stats.arenas.<i>.{npurge,nmadvise,purged} -->
stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
This resolves #521.
2017-03-09 14:42:57 +08:00
|
|
|
decay_stats_t decay_dirty;
|
|
|
|
decay_stats_t decay_muzzy;
|
2017-03-09 07:56:31 +08:00
|
|
|
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_zu_t base; /* Derived. */
|
|
|
|
atomic_zu_t internal;
|
|
|
|
atomic_zu_t resident; /* Derived. */
|
2017-08-26 04:24:49 +08:00
|
|
|
atomic_zu_t metadata_thp;
|
2017-03-09 07:56:31 +08:00
|
|
|
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_zu_t allocated_large; /* Derived. */
|
2017-03-09 07:56:31 +08:00
|
|
|
arena_stats_u64_t nmalloc_large; /* Derived. */
|
|
|
|
arena_stats_u64_t ndalloc_large; /* Derived. */
|
|
|
|
arena_stats_u64_t nrequests_large; /* Derived. */
|
2014-10-13 13:53:59 +08:00
|
|
|
|
2016-04-23 09:37:44 +08:00
|
|
|
/* Number of bytes cached in tcache associated with this arena. */
|
2017-03-14 07:18:40 +08:00
|
|
|
atomic_zu_t tcache_bytes; /* Derived. */
|
2016-04-23 09:37:44 +08:00
|
|
|
|
2017-04-25 08:09:56 +08:00
|
|
|
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
|
2017-03-11 04:14:05 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
/* One element for each large size class. */
|
|
|
|
malloc_large_stats_t lstats[NSIZES - NBINS];
|
2017-05-17 04:56:00 +08:00
|
|
|
|
|
|
|
/* Arena uptime. */
|
|
|
|
nstime_t uptime;
|
2017-04-21 04:38:12 +08:00
|
|
|
} arena_stats_t;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-21 04:38:12 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_STATS_H */
|