Header refactoring: ctl - unify and remove from catchall.

In order to do this, we introduce the mutex_prof module, which breaks a circular
dependency between ctl and prof.
This commit is contained in:
David Goldblatt 2017-04-24 17:09:56 -07:00 committed by David Goldblatt
parent c67c3e4a63
commit 89e2d3c12b
11 changed files with 257 additions and 264 deletions

View File

@ -0,0 +1,130 @@
#ifndef JEMALLOC_INTERNAL_CTL_H
#define JEMALLOC_INTERNAL_CTL_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
typedef struct ctl_node_s {
bool named;
} ctl_node_t;
typedef struct ctl_named_node_s {
ctl_node_t node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
size_t);
} ctl_named_node_t;
typedef struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
} ctl_indexed_node_t;
typedef struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t lstats[NSIZES - NBINS];
} ctl_arena_stats_t;
typedef struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
} ctl_stats_t;
typedef struct ctl_arena_s ctl_arena_t;
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_time;
ssize_t muzzy_decay_time;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
};
typedef struct ctl_arenas_s {
uint64_t epoch;
unsigned narenas;
ql_head(ctl_arena_t) destroyed;
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_MAX+1 elements correspond to arenas.
*/
ctl_arena_t *arenas[MALLOCX_ARENA_MAX + 3];
} ctl_arenas_t;
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_H */

View File

@ -1,48 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_EXTERNS_H
#define JEMALLOC_INTERNAL_CTL_EXTERNS_H
#include "jemalloc/internal/malloc_io.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_EXTERNS_H */

View File

@ -1,85 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_STRUCTS_H
#define JEMALLOC_INTERNAL_CTL_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t lstats[NSIZES - NBINS];
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
mutex_prof_data_t mutex_prof_data[num_global_prof_mutexes];
};
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_time;
ssize_t muzzy_decay_time;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
};
struct ctl_arenas_s {
uint64_t epoch;
unsigned narenas;
ql_head(ctl_arena_t) destroyed;
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_MAX+1 elements correspond to arenas.
*/
ctl_arena_t *arenas[MALLOCX_ARENA_MAX + 3];
};
#endif /* JEMALLOC_INTERNAL_CTL_STRUCTS_H */

View File

@ -1,57 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_TYPES_H
#define JEMALLOC_INTERNAL_CTL_TYPES_H
#define GLOBAL_PROF_MUTEXES \
OP(ctl) \
OP(prof)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
GLOBAL_PROF_MUTEXES
#undef OP
num_global_prof_mutexes
} global_prof_mutex_ind_t;
#define ARENA_PROF_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
ARENA_PROF_MUTEXES
#undef OP
num_arena_prof_mutexes
} arena_prof_mutex_ind_t;
#define MUTEX_PROF_COUNTERS \
OP(num_ops, uint64_t) \
OP(num_wait, uint64_t) \
OP(num_spin_acq, uint64_t) \
OP(num_owner_switch, uint64_t) \
OP(total_wait_time, uint64_t) \
OP(max_wait_time, uint64_t) \
OP(max_num_thds, uint32_t)
typedef enum {
#define OP(counter, type) mutex_counter_##counter,
MUTEX_PROF_COUNTERS
#undef OP
num_mutex_prof_counters
} mutex_prof_counter_ind_t;
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
typedef struct ctl_arena_s ctl_arena_t;
typedef struct ctl_arenas_s ctl_arenas_t;
#endif /* JEMALLOC_INTERNAL_CTL_TYPES_H */

View File

@ -40,7 +40,6 @@
/* TYPES */ /* TYPES */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/ctl_types.h"
#include "jemalloc/internal/witness_types.h" #include "jemalloc/internal/witness_types.h"
#include "jemalloc/internal/mutex_types.h" #include "jemalloc/internal/mutex_types.h"
#include "jemalloc/internal/tsd_types.h" #include "jemalloc/internal/tsd_types.h"
@ -59,7 +58,6 @@
#include "jemalloc/internal/witness_structs.h" #include "jemalloc/internal/witness_structs.h"
#include "jemalloc/internal/mutex_structs.h" #include "jemalloc/internal/mutex_structs.h"
#include "jemalloc/internal/ctl_structs.h"
#include "jemalloc/internal/arena_structs_a.h" #include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h" #include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/extent_dss_structs.h" #include "jemalloc/internal/extent_dss_structs.h"
@ -75,7 +73,6 @@
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h" #include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/ctl_externs.h"
#include "jemalloc/internal/witness_externs.h" #include "jemalloc/internal/witness_externs.h"
#include "jemalloc/internal/mutex_externs.h" #include "jemalloc/internal/mutex_externs.h"
#include "jemalloc/internal/extent_externs.h" #include "jemalloc/internal/extent_externs.h"

View File

@ -0,0 +1,84 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(ctl) \
OP(prof)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
} mutex_prof_global_ind_t;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
} mutex_prof_arena_ind_t;
#define MUTEX_PROF_COUNTERS \
OP(num_ops, uint64_t) \
OP(num_wait, uint64_t) \
OP(num_spin_acq, uint64_t) \
OP(num_owner_switch, uint64_t) \
OP(total_wait_time, uint64_t) \
OP(max_wait_time, uint64_t) \
OP(max_num_thds, uint32_t)
typedef enum {
#define OP(counter, type) mutex_counter_##counter,
MUTEX_PROF_COUNTERS
#undef OP
mutex_prof_num_counters
} mutex_prof_counter_ind_t;
typedef struct mutex_prof_data_s {
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
} mutex_prof_data_t;
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */

View File

@ -2,39 +2,7 @@
#define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H #define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/mutex_prof.h"
struct mutex_prof_data_s {
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
};
struct malloc_mutex_s { struct malloc_mutex_s {
union { union {

View File

@ -139,7 +139,7 @@ typedef struct arena_stats_s {
/* Number of bytes cached in tcache associated with this arena. */ /* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */ atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[num_arena_prof_mutexes]; mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */ /* One element for each large size class. */
malloc_large_stats_t lstats[NSIZES - NBINS]; malloc_large_stats_t lstats[NSIZES - NBINS];

View File

@ -3,6 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
@ -193,12 +194,12 @@ CTL_PROTO(stats_##n##_max_num_thds)
/* Global mutexes. */ /* Global mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
GLOBAL_PROF_MUTEXES MUTEX_PROF_GLOBAL_MUTEXES
#undef OP #undef OP
/* Per arena mutexes. */ /* Per arena mutexes. */
#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
/* Arena bin mutexes. */ /* Arena bin mutexes. */
@ -429,12 +430,12 @@ static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
}; };
#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) #define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
}; };
@ -473,12 +474,12 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
}; };
#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
GLOBAL_PROF_MUTEXES MUTEX_PROF_GLOBAL_MUTEXES
#undef OP #undef OP
static const ctl_named_node_t stats_mutexes_node[] = { static const ctl_named_node_t stats_mutexes_node[] = {
#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
GLOBAL_PROF_MUTEXES MUTEX_PROF_GLOBAL_MUTEXES
#undef OP #undef OP
{NAME("reset"), CTL(stats_mutexes_reset)} {NAME("reset"), CTL(stats_mutexes_reset)}
}; };
@ -737,7 +738,7 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
arena_prof_mutex_##mtx]), \ arena_prof_mutex_##mtx]), \
&(astats->astats.mutex_prof_data[ \ &(astats->astats.mutex_prof_data[ \
arena_prof_mutex_##mtx])); arena_prof_mutex_##mtx]));
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
if (!destroyed) { if (!destroyed) {
accum_atomic_zu(&sdstats->astats.base, accum_atomic_zu(&sdstats->astats.base,
@ -2401,13 +2402,13 @@ CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
#define OP(mtx) \ #define OP(mtx) \
RO_MUTEX_CTL_GEN(mutexes_##mtx, \ RO_MUTEX_CTL_GEN(mutexes_##mtx, \
ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
GLOBAL_PROF_MUTEXES MUTEX_PROF_GLOBAL_MUTEXES
#undef OP #undef OP
/* Per arena mutexes */ /* Per arena mutexes */
#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
/* tcache bin mutex */ /* tcache bin mutex */

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/jemalloc_internal_types.h" #include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h" #include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"

View File

@ -3,16 +3,18 @@
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex_prof.h"
const char *global_mutex_names[num_global_prof_mutexes] = { const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
#define OP(mtx) #mtx, #define OP(mtx) #mtx,
GLOBAL_PROF_MUTEXES MUTEX_PROF_GLOBAL_MUTEXES
#undef OP #undef OP
}; };
const char *arena_mutex_names[num_arena_prof_mutexes] = { const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
#define OP(mtx) #mtx, #define OP(mtx) #mtx,
ARENA_PROF_MUTEXES MUTEX_PROF_ARENA_MUTEXES
#undef OP #undef OP
}; };
@ -81,7 +83,7 @@ gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
static void static void
read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind, read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind,
uint64_t results[num_mutex_prof_counters]) { uint64_t results[mutex_prof_num_counters]) {
char cmd[MUTEX_CTL_STR_MAX_LENGTH]; char cmd[MUTEX_CTL_STR_MAX_LENGTH];
#define OP(c, t) \ #define OP(c, t) \
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
@ -94,7 +96,7 @@ MUTEX_PROF_COUNTERS
static void static void
mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque, mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque,
const char *name, uint64_t stats[num_mutex_prof_counters], const char *name, uint64_t stats[mutex_prof_num_counters],
const char *json_indent, bool last) { const char *json_indent, bool last) {
malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name); malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name);
@ -105,7 +107,7 @@ mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, \ malloc_cprintf(write_cb, cbopaque, \
fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \ fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
json_indent, #c, (t)stats[mutex_counter_##c], \ json_indent, #c, (t)stats[mutex_counter_##c], \
(++k == num_mutex_prof_counters) ? "" : ","); (++k == mutex_prof_num_counters) ? "" : ",");
MUTEX_PROF_COUNTERS MUTEX_PROF_COUNTERS
#undef OP #undef OP
malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent, malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent,
@ -187,7 +189,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
nmalloc, ndalloc, curregs, nrequests, nfills, nmalloc, ndalloc, curregs, nrequests, nfills,
nflushes, nreslabs, curslabs, mutex ? "," : ""); nflushes, nreslabs, curslabs, mutex ? "," : "");
if (mutex) { if (mutex) {
uint64_t mutex_stats[num_mutex_prof_counters]; uint64_t mutex_stats[mutex_prof_num_counters];
read_arena_bin_mutex_stats(i, j, mutex_stats); read_arena_bin_mutex_stats(i, j, mutex_stats);
mutex_stats_output_json(write_cb, cbopaque, mutex_stats_output_json(write_cb, cbopaque,
"mutex", mutex_stats, "\t\t\t\t\t\t", true); "mutex", mutex_stats, "\t\t\t\t\t\t", true);
@ -226,7 +228,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
&max_wait, uint64_t); &max_wait, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.mutex.num_ops", CTL_M2_M4_GET("stats.arenas.0.bins.0.mutex.num_ops",
i, j, &num_ops, uint64_t); i, j, &num_ops, uint64_t);
uint64_t mutex_stats[num_mutex_prof_counters]; uint64_t mutex_stats[mutex_prof_num_counters];
if (mutex) { if (mutex) {
read_arena_bin_mutex_stats(i, j, mutex_stats); read_arena_bin_mutex_stats(i, j, mutex_stats);
} }
@ -336,11 +338,11 @@ stats_arena_lextents_print(void (*write_cb)(void *, const char *),
static void static void
read_arena_mutex_stats(unsigned arena_ind, read_arena_mutex_stats(unsigned arena_ind,
uint64_t results[num_arena_prof_mutexes][num_mutex_prof_counters]) { uint64_t results[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]) {
char cmd[MUTEX_CTL_STR_MAX_LENGTH]; char cmd[MUTEX_CTL_STR_MAX_LENGTH];
arena_prof_mutex_ind_t i; mutex_prof_arena_ind_t i;
for (i = 0; i < num_arena_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
#define OP(c, t) \ #define OP(c, t) \
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
"arenas.0.mutexes", arena_mutex_names[i], #c); \ "arenas.0.mutexes", arena_mutex_names[i], #c); \
@ -353,7 +355,7 @@ MUTEX_PROF_COUNTERS
static void static void
mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque, mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque,
const char *name, uint64_t stats[num_mutex_prof_counters], const char *name, uint64_t stats[mutex_prof_num_counters],
bool first_mutex) { bool first_mutex) {
if (first_mutex) { if (first_mutex) {
/* Print title. */ /* Print title. */
@ -380,15 +382,15 @@ MUTEX_PROF_COUNTERS
static void static void
stats_arena_mutexes_print(void (*write_cb)(void *, const char *), stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
void *cbopaque, bool json, bool json_end, unsigned arena_ind) { void *cbopaque, bool json, bool json_end, unsigned arena_ind) {
uint64_t mutex_stats[num_arena_prof_mutexes][num_mutex_prof_counters]; uint64_t mutex_stats[mutex_prof_num_arena_mutexes][mutex_prof_num_counters];
read_arena_mutex_stats(arena_ind, mutex_stats); read_arena_mutex_stats(arena_ind, mutex_stats);
/* Output mutex stats. */ /* Output mutex stats. */
if (json) { if (json) {
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n"); malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
arena_prof_mutex_ind_t i, last_mutex; mutex_prof_arena_ind_t i, last_mutex;
last_mutex = num_arena_prof_mutexes - 1; last_mutex = mutex_prof_num_arena_mutexes - 1;
for (i = 0; i < num_arena_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
mutex_stats_output_json(write_cb, cbopaque, mutex_stats_output_json(write_cb, cbopaque,
arena_mutex_names[i], mutex_stats[i], arena_mutex_names[i], mutex_stats[i],
"\t\t\t\t\t", (i == last_mutex)); "\t\t\t\t\t", (i == last_mutex));
@ -396,8 +398,8 @@ stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n", malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n",
json_end ? "" : ","); json_end ? "" : ",");
} else { } else {
arena_prof_mutex_ind_t i; mutex_prof_arena_ind_t i;
for (i = 0; i < num_arena_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
mutex_stats_output(write_cb, cbopaque, mutex_stats_output(write_cb, cbopaque,
arena_mutex_names[i], mutex_stats[i], i == 0); arena_mutex_names[i], mutex_stats[i], i == 0);
} }
@ -993,11 +995,11 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void static void
read_global_mutex_stats( read_global_mutex_stats(
uint64_t results[num_global_prof_mutexes][num_mutex_prof_counters]) { uint64_t results[mutex_prof_num_global_mutexes][mutex_prof_num_counters]) {
char cmd[MUTEX_CTL_STR_MAX_LENGTH]; char cmd[MUTEX_CTL_STR_MAX_LENGTH];
global_prof_mutex_ind_t i; mutex_prof_global_ind_t i;
for (i = 0; i < num_global_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
#define OP(c, t) \ #define OP(c, t) \
gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
"mutexes", global_mutex_names[i], #c); \ "mutexes", global_mutex_names[i], #c); \
@ -1020,7 +1022,7 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.mapped", &mapped, size_t); CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t); CTL_GET("stats.retained", &retained, size_t);
uint64_t mutex_stats[num_global_prof_mutexes][num_mutex_prof_counters]; uint64_t mutex_stats[mutex_prof_num_global_mutexes][mutex_prof_num_counters];
if (mutex) { if (mutex) {
read_global_mutex_stats(mutex_stats); read_global_mutex_stats(mutex_stats);
} }
@ -1044,12 +1046,12 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
if (mutex) { if (mutex) {
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"mutexes\": {\n"); "\t\t\t\"mutexes\": {\n");
global_prof_mutex_ind_t i; mutex_prof_global_ind_t i;
for (i = 0; i < num_global_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
mutex_stats_output_json(write_cb, cbopaque, mutex_stats_output_json(write_cb, cbopaque,
global_mutex_names[i], mutex_stats[i], global_mutex_names[i], mutex_stats[i],
"\t\t\t\t", "\t\t\t\t",
i == num_global_prof_mutexes - 1); i == mutex_prof_num_global_mutexes - 1);
} }
malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n"); malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n");
} }
@ -1061,8 +1063,8 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
" resident: %zu, mapped: %zu, retained: %zu\n", " resident: %zu, mapped: %zu, retained: %zu\n",
allocated, active, metadata, resident, mapped, retained); allocated, active, metadata, resident, mapped, retained);
if (mutex) { if (mutex) {
global_prof_mutex_ind_t i; mutex_prof_global_ind_t i;
for (i = 0; i < num_global_prof_mutexes; i++) { for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
mutex_stats_output(write_cb, cbopaque, mutex_stats_output(write_cb, cbopaque,
global_mutex_names[i], mutex_stats[i], global_mutex_names[i], mutex_stats[i],
i == 0); i == 0);