Add thread.peak.[read|reset] mallctls.

These can be used to track net allocator activity on a per-thread basis.
This commit is contained in:
David Goldblatt
2020-05-27 14:31:00 -07:00
committed by David Goldblatt
parent fe7108305a
commit d82a164d0d
13 changed files with 269 additions and 5 deletions

View File

@@ -0,0 +1,24 @@
#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
#define JEMALLOC_INTERNAL_PEAK_EVENT_H
/*
* While peak.h contains the simple helper struct that tracks state, this
* contains the allocator tie-ins (and knows about tsd, the event module, etc.).
*/
/* Update the peak with current tsd state. */
void peak_event_update(tsd_t *tsd);
/* Set current state to zero. */
void peak_event_zero(tsd_t *tsd);
uint64_t peak_event_max(tsd_t *tsd);
/* Manual hooks. */
/* The activity-triggered hooks. */
uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */

View File

@@ -53,10 +53,12 @@ void tsd_te_init(tsd_t *tsd);
* E(event, (condition), is_alloc_event)
*/
#define ITERATE_OVER_ALL_EVENTS \
E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
E(prof_sample, (config_prof && opt_prof), true) \
E(stats_interval, (opt_stats_interval >= 0), true) \
E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false)
E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
E(prof_sample, (config_prof && opt_prof), true) \
E(stats_interval, (opt_stats_interval >= 0), true) \
E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \
E(peak_alloc, config_stats, true) \
E(peak_dalloc, config_stats, false)
#define E(event, condition_unused, is_alloc_event_unused) \
C(event##_event_wait)

View File

@@ -5,6 +5,7 @@
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
@@ -69,6 +70,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(prof_sample_last_event, uint64_t, uint64_t) \
O(stats_interval_event_wait, uint64_t, uint64_t) \
O(stats_interval_last_event, uint64_t, uint64_t) \
O(peak_alloc_event_wait, uint64_t, uint64_t) \
O(peak_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
O(prng_state, uint64_t, uint64_t) \
O(iarena, arena_t *, arena_t *) \
@@ -77,6 +80,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
O(binshards, tsd_binshards_t, tsd_binshards_t)\
O(tsd_link, tsd_link_t, tsd_link_t) \
O(in_hook, bool, bool) \
O(peak, peak_t, peak_t) \
O(tcache_slow, tcache_slow_t, tcache_slow_t) \
O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
@@ -95,6 +99,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* prof_sample_last_event */ 0, \
/* stats_interval_event_wait */ 0, \
/* stats_interval_last_event */ 0, \
/* peak_alloc_event_wait */ 0, \
/* peak_dalloc_event_wait */ 0, \
/* prof_tdata */ NULL, \
/* prng_state */ 0, \
/* iarena */ NULL, \
@@ -103,6 +109,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
/* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
/* tsd_link */ {NULL}, \
/* in_hook */ false, \
/* peak */ PEAK_INITIALIZER, \
/* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
/* rtree_ctx */ RTREE_CTX_ZERO_INITIALIZER,