Header refactoring: ctl - unify and remove from catchall.

In order to do this, we introduce the mutex_prof module, which breaks a circular
dependency between ctl and prof.
This commit is contained in:
David Goldblatt
2017-04-24 17:09:56 -07:00
committed by David Goldblatt
parent c67c3e4a63
commit 89e2d3c12b
11 changed files with 257 additions and 264 deletions

View File

@@ -0,0 +1,130 @@
#ifndef JEMALLOC_INTERNAL_CTL_H
#define JEMALLOC_INTERNAL_CTL_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
typedef struct ctl_node_s {
bool named;
} ctl_node_t;
typedef struct ctl_named_node_s {
ctl_node_t node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
size_t);
} ctl_named_node_t;
typedef struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
} ctl_indexed_node_t;
typedef struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t lstats[NSIZES - NBINS];
} ctl_arena_stats_t;
typedef struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
} ctl_stats_t;
typedef struct ctl_arena_s ctl_arena_t;
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_time;
ssize_t muzzy_decay_time;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
};
typedef struct ctl_arenas_s {
uint64_t epoch;
unsigned narenas;
ql_head(ctl_arena_t) destroyed;
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_MAX+1 elements correspond to arenas.
*/
ctl_arena_t *arenas[MALLOCX_ARENA_MAX + 3];
} ctl_arenas_t;
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_H */

View File

@@ -1,48 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_EXTERNS_H
#define JEMALLOC_INTERNAL_CTL_EXTERNS_H
#include "jemalloc/internal/malloc_io.h"
/* Maximum ctl tree depth. */
#define CTL_MAX_DEPTH 7
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
size_t *miblenp);
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_INTERNAL_CTL_EXTERNS_H */

View File

@@ -1,85 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_STRUCTS_H
#define JEMALLOC_INTERNAL_CTL_STRUCTS_H
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
size_t nchildren;
const ctl_node_t *children;
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
size_t);
};
struct ctl_arena_stats_s {
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t lstats[NSIZES - NBINS];
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
size_t retained;
mutex_prof_data_t mutex_prof_data[num_global_prof_mutexes];
};
struct ctl_arena_s {
unsigned arena_ind;
bool initialized;
ql_elm(ctl_arena_t) destroyed_link;
/* Basic stats, supported even if !config_stats. */
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_time;
ssize_t muzzy_decay_time;
size_t pactive;
size_t pdirty;
size_t pmuzzy;
/* NULL if !config_stats. */
ctl_arena_stats_t *astats;
};
struct ctl_arenas_s {
uint64_t epoch;
unsigned narenas;
ql_head(ctl_arena_t) destroyed;
/*
* Element 0 corresponds to merged stats for extant arenas (accessed via
* MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
* destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
* remaining MALLOCX_ARENA_MAX+1 elements correspond to arenas.
*/
ctl_arena_t *arenas[MALLOCX_ARENA_MAX + 3];
};
#endif /* JEMALLOC_INTERNAL_CTL_STRUCTS_H */

View File

@@ -1,57 +0,0 @@
#ifndef JEMALLOC_INTERNAL_CTL_TYPES_H
#define JEMALLOC_INTERNAL_CTL_TYPES_H
#define GLOBAL_PROF_MUTEXES \
OP(ctl) \
OP(prof)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
GLOBAL_PROF_MUTEXES
#undef OP
num_global_prof_mutexes
} global_prof_mutex_ind_t;
#define ARENA_PROF_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
ARENA_PROF_MUTEXES
#undef OP
num_arena_prof_mutexes
} arena_prof_mutex_ind_t;
#define MUTEX_PROF_COUNTERS \
OP(num_ops, uint64_t) \
OP(num_wait, uint64_t) \
OP(num_spin_acq, uint64_t) \
OP(num_owner_switch, uint64_t) \
OP(total_wait_time, uint64_t) \
OP(max_wait_time, uint64_t) \
OP(max_num_thds, uint32_t)
typedef enum {
#define OP(counter, type) mutex_counter_##counter,
MUTEX_PROF_COUNTERS
#undef OP
num_mutex_prof_counters
} mutex_prof_counter_ind_t;
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
typedef struct ctl_arena_s ctl_arena_t;
typedef struct ctl_arenas_s ctl_arenas_t;
#endif /* JEMALLOC_INTERNAL_CTL_TYPES_H */

View File

@@ -40,7 +40,6 @@
/* TYPES */
/******************************************************************************/
#include "jemalloc/internal/ctl_types.h"
#include "jemalloc/internal/witness_types.h"
#include "jemalloc/internal/mutex_types.h"
#include "jemalloc/internal/tsd_types.h"
@@ -59,7 +58,6 @@
#include "jemalloc/internal/witness_structs.h"
#include "jemalloc/internal/mutex_structs.h"
#include "jemalloc/internal/ctl_structs.h"
#include "jemalloc/internal/arena_structs_a.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/extent_dss_structs.h"
@@ -75,7 +73,6 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/ctl_externs.h"
#include "jemalloc/internal/witness_externs.h"
#include "jemalloc/internal/mutex_externs.h"
#include "jemalloc/internal/extent_externs.h"

View File

@@ -0,0 +1,84 @@
#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
#define JEMALLOC_INTERNAL_MUTEX_PROF_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(ctl) \
OP(prof)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
MUTEX_PROF_GLOBAL_MUTEXES
#undef OP
mutex_prof_num_global_mutexes
} mutex_prof_global_ind_t;
#define MUTEX_PROF_ARENA_MUTEXES \
OP(large) \
OP(extent_avail) \
OP(extents_dirty) \
OP(extents_muzzy) \
OP(extents_retained) \
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
OP(tcache_list)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
mutex_prof_num_arena_mutexes
} mutex_prof_arena_ind_t;
#define MUTEX_PROF_COUNTERS \
OP(num_ops, uint64_t) \
OP(num_wait, uint64_t) \
OP(num_spin_acq, uint64_t) \
OP(num_owner_switch, uint64_t) \
OP(total_wait_time, uint64_t) \
OP(max_wait_time, uint64_t) \
OP(max_num_thds, uint32_t)
typedef enum {
#define OP(counter, type) mutex_counter_##counter,
MUTEX_PROF_COUNTERS
#undef OP
mutex_prof_num_counters
} mutex_prof_counter_ind_t;
typedef struct mutex_prof_data_s {
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
} mutex_prof_data_t;
#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */

View File

@@ -2,39 +2,7 @@
#define JEMALLOC_INTERNAL_MUTEX_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/nstime.h"
struct mutex_prof_data_s {
/*
* Counters touched on the slow path, i.e. when there is lock
* contention. We update them once we have the lock.
*/
/* Total time (in nano seconds) spent waiting on this mutex. */
nstime_t tot_wait_time;
/* Max time (in nano seconds) spent on a single lock operation. */
nstime_t max_wait_time;
/* # of times have to wait for this mutex (after spinning). */
uint64_t n_wait_times;
/* # of times acquired the mutex through local spinning. */
uint64_t n_spin_acquired;
/* Max # of threads waiting for the mutex at the same time. */
uint32_t max_n_thds;
/* Current # of threads waiting on the lock. Atomic synced. */
atomic_u32_t n_waiting_thds;
/*
* Data touched on the fast path. These are modified right after we
* grab the lock, so it's placed closest to the end (i.e. right before
* the lock) so that we have a higher chance of them being on the same
* cacheline.
*/
/* # of times the mutex holder is different than the previous one. */
uint64_t n_owner_switches;
/* Previous mutex holder, to facilitate n_owner_switches. */
tsdn_t *prev_owner;
/* # of lock() operations in total. */
uint64_t n_lock_ops;
};
#include "jemalloc/internal/mutex_prof.h"
struct malloc_mutex_s {
union {

View File

@@ -139,7 +139,7 @@ typedef struct arena_stats_s {
/* Number of bytes cached in tcache associated with this arena. */
atomic_zu_t tcache_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[num_arena_prof_mutexes];
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
malloc_large_stats_t lstats[NSIZES - NBINS];