Split up and standardize naming of stats code.

The arena-associated stats are now all prefixed with arena_stats_, and live in
their own file.  Likewise, malloc_bin_stats_t -> bin_stats_t, also in its own
file.
This commit is contained in:
David T. Goldblatt 2017-11-04 12:50:19 -07:00 committed by David Goldblatt
parent 901d94a2b0
commit 7f1b02e3fa
10 changed files with 342 additions and 333 deletions

View File

@ -16,17 +16,13 @@ extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock; extern malloc_mutex_t arenas_lock;
void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests);
void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
size_t size);
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); bin_stats_t *bstats, arena_stats_large_t *lstats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, extent_t *extent); extent_hooks_t **r_extent_hooks, extent_t *extent);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET

View File

@ -0,0 +1,237 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/size_classes.h"
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
};
typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
};
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
* requests.
*/
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
atomic_zu_t retained; /* Derived. */
arena_stats_decay_t decay_dirty;
arena_stats_decay_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
arena_stats_large_t lstats[NSIZES - NBINS];
/* Arena uptime. */
nstime_t uptime;
};
static inline bool
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
UNUSED static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
static inline void
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
NBINS].nrequests, nrequests);
arena_stats_unlock(tsdn, arena_stats);
}
static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */

View File

@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
#include "jemalloc/internal/arena_stats.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h" #include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/bitmap.h"
@ -11,7 +12,6 @@
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ticker.h"
struct arena_decay_s { struct arena_decay_s {
@ -69,7 +69,7 @@ struct arena_decay_s {
* arena and ctl code. * arena and ctl code.
* *
* Synchronization: Same as associated arena's stats field. */ * Synchronization: Same as associated arena's stats field. */
decay_stats_t *stats; arena_stats_decay_t *stats;
/* Peak number of pages in associated extents. Used for debug only. */ /* Peak number of pages in associated extents. Used for debug only. */
uint64_t ceil_npages; uint64_t ceil_npages;
}; };

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/extent_types.h" #include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/stats.h" #include "jemalloc/internal/bin_stats.h"
/* /*
* A bin contains a set of extents that are currently being used for slab * A bin contains a set of extents that are currently being used for slab
@ -75,7 +75,7 @@ struct bin_s {
extent_list_t slabs_full; extent_list_t slabs_full;
/* Bin statistics. */ /* Bin statistics. */
malloc_bin_stats_t stats; bin_stats_t stats;
}; };
/* Initializes a bin to empty. Returns true on error. */ /* Initializes a bin to empty. Returns true on error. */
@ -88,7 +88,7 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */ /* Stats. */
static inline void static inline void
bin_stats_merge(tsdn_t *tsdn, malloc_bin_stats_t *dst_bin_stats, bin_t *bin) { bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock); malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
dst_bin_stats->nmalloc += bin->stats.nmalloc; dst_bin_stats->nmalloc += bin->stats.nmalloc;

View File

@ -0,0 +1,51 @@
#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
#define JEMALLOC_INTERNAL_BIN_STATS_H
#include "jemalloc/internal/mutex_prof.h"
typedef struct bin_stats_s bin_stats_t;
struct bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of slabs created for this bin's size class. */
uint64_t nslabs;
/*
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t reslabs;
/* Current number of slabs in this bin. */
size_t curslabs;
mutex_prof_data_t mutex_data;
};
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */

View File

@ -40,8 +40,8 @@ typedef struct ctl_arena_stats_s {
uint64_t ndalloc_small; uint64_t ndalloc_small;
uint64_t nrequests_small; uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS]; bin_stats_t bstats[NBINS];
malloc_large_stats_t lstats[NSIZES - NBINS]; arena_stats_large_t lstats[NSIZES - NBINS];
} ctl_arena_stats_t; } ctl_arena_stats_t;
typedef struct ctl_stats_s { typedef struct ctl_stats_s {

View File

@ -1,17 +1,6 @@
#ifndef JEMALLOC_INTERNAL_STATS_H #ifndef JEMALLOC_INTERNAL_STATS_H
#define JEMALLOC_INTERNAL_STATS_H #define JEMALLOC_INTERNAL_STATS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
/*
* The synchronization for stats counters may piggyback on existing
* synchronization in the associated data. Therefore, the merging functions for
* a module's stats will lie in the module, instead of with the stats.
*/
/* OPTION(opt, var_name, default, set_value_to) */ /* OPTION(opt, var_name, default, set_value_to) */
#define STATS_PRINT_OPTIONS \ #define STATS_PRINT_OPTIONS \
OPTION('J', json, false, true) \ OPTION('J', json, false, true) \
@ -38,133 +27,4 @@ extern char opt_stats_print_opts[stats_print_tot_num_options+1];
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts); const char *opts);
/*
* In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
* externally.
*/
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif
typedef struct malloc_bin_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Current number of regions of this size class, including regions
* currently cached by tcache.
*/
size_t curregs;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of slabs created for this bin's size class. */
uint64_t nslabs;
/*
* Total number of slabs reused by extracting them from the slabs heap
* for this bin's size class.
*/
uint64_t reslabs;
/* Current number of slabs in this bin. */
size_t curslabs;
mutex_prof_data_t mutex_data;
} malloc_bin_stats_t;
typedef struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
arena_stats_u64_t nmalloc;
arena_stats_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
arena_stats_u64_t nrequests; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
} malloc_large_stats_t;
typedef struct decay_stats_s {
/* Total number of purge sweeps. */
arena_stats_u64_t npurge;
/* Total number of madvise calls made. */
arena_stats_u64_t nmadvise;
/* Total number of pages purged. */
arena_stats_u64_t purged;
} decay_stats_t;
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
* requests.
*/
typedef struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_t mtx;
#endif
/* Number of bytes currently mapped, excluding retained memory. */
atomic_zu_t mapped; /* Partially derived. */
/*
* Number of unused virtual memory bytes currently retained. Retained
* bytes are technically mapped (though always decommitted or purged),
* but they are excluded from the mapped statistic (above).
*/
atomic_zu_t retained; /* Derived. */
decay_stats_t decay_dirty;
decay_stats_t decay_muzzy;
atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
atomic_zu_t resident; /* Derived. */
atomic_zu_t metadata_thp;
atomic_zu_t allocated_large; /* Derived. */
arena_stats_u64_t nmalloc_large; /* Derived. */
arena_stats_u64_t ndalloc_large; /* Derived. */
arena_stats_u64_t nrequests_large; /* Derived. */
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
malloc_large_stats_t lstats[NSIZES - NBINS];
/* Arena uptime. */
nstime_t uptime;
} arena_stats_t;
#endif /* JEMALLOC_INTERNAL_STATS_H */ #endif /* JEMALLOC_INTERNAL_STATS_H */

View File

@ -57,145 +57,6 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
/******************************************************************************/ /******************************************************************************/
static bool
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[i] == 0);
}
}
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}
static void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}
static uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return *p;
#endif
}
static void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p += x;
#endif
}
UNUSED static void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
*p -= x;
assert(*p + x >= *p);
#endif
}
/*
* Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
* the types here are atomic).
*/
static void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
*dst += src;
#endif
}
static size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}
static void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}
static void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r);
#else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}
/* Like the _u64 variant, needs an externally synchronized *dst. */
static void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}
void
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
NBINS].nrequests, nrequests);
arena_stats_unlock(tsdn, arena_stats);
}
void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
arena_stats_unlock(tsdn, arena_stats);
}
void void
arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
@ -213,7 +74,7 @@ void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { bin_stats_t *bstats, arena_stats_large_t *lstats) {
cassert(config_stats); cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
@ -729,7 +590,7 @@ arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
static bool static bool
arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
decay_stats_t *stats) { arena_stats_decay_t *stats) {
if (config_debug) { if (config_debug) {
for (size_t i = 0; i < sizeof(arena_decay_t); i++) { for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
assert(((char *)decay)[i] == 0); assert(((char *)decay)[i] == 0);

View File

@ -29,7 +29,7 @@ bin_init(bin_t *bin) {
extent_heap_new(&bin->slabs_nonfull); extent_heap_new(&bin->slabs_nonfull);
extent_list_init(&bin->slabs_full); extent_list_init(&bin->slabs_full);
if (config_stats) { if (config_stats) {
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); memset(&bin->stats, 0, sizeof(bin_stats_t));
} }
return false; return false;
} }

View File

@ -560,7 +560,7 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex. * synchronized by the ctl mutex.
*/ */
static void static void
accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
@ -572,7 +572,7 @@ accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
/* Likewise: with ctl mutex synchronization, reading is simple. */ /* Likewise: with ctl mutex synchronization, reading is simple. */
static uint64_t static uint64_t
arena_stats_read_u64(arena_stats_u64_t *p) { ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
return atomic_load_u64(p, ATOMIC_RELAXED); return atomic_load_u64(p, ATOMIC_RELAXED);
#else #else
@ -580,7 +580,8 @@ arena_stats_read_u64(arena_stats_u64_t *p) {
#endif #endif
} }
static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { static void
accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
@ -690,9 +691,9 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->ndalloc_small = 0; ctl_arena->astats->ndalloc_small = 0;
ctl_arena->astats->nrequests_small = 0; ctl_arena->astats->nrequests_small = 0;
memset(ctl_arena->astats->bstats, 0, NBINS * memset(ctl_arena->astats->bstats, 0, NBINS *
sizeof(malloc_bin_stats_t)); sizeof(bin_stats_t));
memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
sizeof(malloc_large_stats_t)); sizeof(arena_stats_large_t));
} }
} }
@ -755,18 +756,18 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
&astats->astats.retained); &astats->astats.retained);
} }
accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
&astats->astats.decay_dirty.npurge); &astats->astats.decay_dirty.npurge);
accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
&astats->astats.decay_dirty.nmadvise); &astats->astats.decay_dirty.nmadvise);
accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
&astats->astats.decay_dirty.purged); &astats->astats.decay_dirty.purged);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
&astats->astats.decay_muzzy.npurge); &astats->astats.decay_muzzy.npurge);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
&astats->astats.decay_muzzy.nmadvise); &astats->astats.decay_muzzy.nmadvise);
accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
&astats->astats.decay_muzzy.purged); &astats->astats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \ #define OP(mtx) malloc_mutex_prof_merge( \
@ -806,11 +807,11 @@ MUTEX_PROF_ARENA_MUTEXES
assert(atomic_load_zu(&astats->astats.allocated_large, assert(atomic_load_zu(&astats->astats.allocated_large,
ATOMIC_RELAXED) == 0); ATOMIC_RELAXED) == 0);
} }
accum_arena_stats_u64(&sdstats->astats.nmalloc_large, ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
&astats->astats.nmalloc_large); &astats->astats.nmalloc_large);
accum_arena_stats_u64(&sdstats->astats.ndalloc_large, ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
&astats->astats.ndalloc_large); &astats->astats.ndalloc_large);
accum_arena_stats_u64(&sdstats->astats.nrequests_large, ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
&astats->astats.nrequests_large); &astats->astats.nrequests_large);
accum_atomic_zu(&sdstats->astats.tcache_bytes, accum_atomic_zu(&sdstats->astats.tcache_bytes,
@ -847,11 +848,11 @@ MUTEX_PROF_ARENA_MUTEXES
} }
for (i = 0; i < NSIZES - NBINS; i++) { for (i = 0; i < NSIZES - NBINS; i++) {
accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc); &astats->lstats[i].nmalloc);
accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc); &astats->lstats[i].ndalloc);
accum_arena_stats_u64(&sdstats->lstats[i].nrequests, ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests); &astats->lstats[i].nrequests);
if (!destroyed) { if (!destroyed) {
sdstats->lstats[i].curlextents += sdstats->lstats[i].curlextents +=
@ -2545,24 +2546,24 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
size_t) size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
arena_stats_read_u64( ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
arena_stats_read_u64( ctl_arena_stats_read_u64(
&arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base, CTL_RO_CGEN(config_stats, stats_arenas_i_base,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
@ -2592,14 +2593,17 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
ATOMIC_RELAXED), size_t) ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
/*
* Note: "nmalloc" here instead of "nrequests" in the read. This is intentional.
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), ctl_arena_stats_read_u64(
uint64_t) /* Intentional. */ &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */
/* Lock profiling related APIs below. */ /* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \ #define RO_MUTEX_CTL_GEN(n, l) \
@ -2717,14 +2721,14 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
} }
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), ctl_arena_stats_read_u64(
uint64_t) &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)