Add logging for sampled allocations
- prof_opt_log flag starts logging automatically at runtime - prof_log_{start,stop} mallctl for manual control
This commit is contained in:
committed by
David Goldblatt
parent
eb261e53a6
commit
b664bd7935
@@ -78,6 +78,32 @@ arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
large_prof_tctx_reset(tsdn, extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE nstime_t
|
||||
arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
|
||||
alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
/*
|
||||
* Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
|
||||
* sure we have a sampled allocation.
|
||||
*/
|
||||
assert(!extent_slab_get(extent));
|
||||
return large_prof_alloc_time_get(extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
|
||||
nstime_t t) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
assert(!extent_slab_get(extent));
|
||||
large_prof_alloc_time_set(extent, t);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||
tsd_t *tsd;
|
||||
|
@@ -177,6 +177,11 @@ extent_prof_tctx_get(const extent_t *extent) {
|
||||
ATOMIC_ACQUIRE);
|
||||
}
|
||||
|
||||
static inline nstime_t
|
||||
extent_prof_alloc_time_get(const extent_t *extent) {
|
||||
return extent->e_alloc_time;
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_arena_set(extent_t *extent, arena_t *arena) {
|
||||
unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
|
||||
@@ -300,6 +305,11 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
|
||||
nstime_copy(&extent->e_alloc_time, &t);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
|
@@ -161,11 +161,13 @@ struct extent_s {
|
||||
/* Small region slab metadata. */
|
||||
arena_slab_data_t e_slab_data;
|
||||
|
||||
/*
|
||||
* Profile counters, used for large objects. Points to a
|
||||
* prof_tctx_t.
|
||||
*/
|
||||
atomic_p_t e_prof_tctx;
|
||||
/* Profiling data, used for large objects. */
|
||||
struct {
|
||||
/* Time when this was allocated. */
|
||||
nstime_t e_alloc_time;
|
||||
/* Points to a prof_tctx_t. */
|
||||
atomic_p_t e_prof_tctx;
|
||||
};
|
||||
};
|
||||
};
|
||||
typedef ql_head(extent_t) extent_list_t;
|
||||
|
@@ -26,4 +26,7 @@ prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||
void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||
|
||||
nstime_t large_prof_alloc_time_get(const extent_t *extent);
|
||||
void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
|
||||
|
@@ -14,6 +14,7 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
|
||||
extern bool opt_prof_final; /* Final profile dumping. */
|
||||
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
||||
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
||||
extern bool opt_prof_log; /* Turn logging on at boot. */
|
||||
extern char opt_prof_prefix[
|
||||
/* Minimize memory bloat for non-prof builds. */
|
||||
#ifdef JEMALLOC_PROF
|
||||
@@ -45,7 +46,8 @@ extern size_t lg_prof_sample;
|
||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
||||
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
||||
void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx);
|
||||
void bt_init(prof_bt_t *bt, void **vec);
|
||||
void prof_backtrace(prof_bt_t *bt);
|
||||
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
||||
@@ -72,6 +74,8 @@ void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||
void prof_tdata_cleanup(tsd_t *tsd);
|
||||
bool prof_active_get(tsdn_t *tsdn);
|
||||
bool prof_active_set(tsdn_t *tsdn, bool active);
|
||||
bool prof_log_start(tsdn_t *tsdn, const char *filename);
|
||||
bool prof_log_stop(tsdn_t *tsdn);
|
||||
const char *prof_thread_name_get(tsd_t *tsd);
|
||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||
bool prof_thread_active_get(tsd_t *tsd);
|
||||
|
@@ -61,6 +61,23 @@ prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
arena_prof_tctx_reset(tsdn, ptr, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE nstime_t
|
||||
prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
|
||||
nstime_t t) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||
prof_tdata_t **tdata_out) {
|
||||
@@ -187,7 +204,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
||||
* counters.
|
||||
*/
|
||||
if (unlikely(old_sampled)) {
|
||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||
prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +216,7 @@ prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_free_sampled_object(tsd, usize, tctx);
|
||||
prof_free_sampled_object(tsd, ptr, usize, tctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -27,9 +27,9 @@
|
||||
#define WITNESS_RANK_PROF_BT2GCTX 6U
|
||||
#define WITNESS_RANK_PROF_TDATAS 7U
|
||||
#define WITNESS_RANK_PROF_TDATA 8U
|
||||
#define WITNESS_RANK_PROF_GCTX 9U
|
||||
|
||||
#define WITNESS_RANK_BACKGROUND_THREAD 10U
|
||||
#define WITNESS_RANK_PROF_LOG 9U
|
||||
#define WITNESS_RANK_PROF_GCTX 10U
|
||||
#define WITNESS_RANK_BACKGROUND_THREAD 11U
|
||||
|
||||
/*
|
||||
* Used as an argument to witness_assert_depth_to_rank() in order to validate
|
||||
@@ -37,19 +37,19 @@
|
||||
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
|
||||
* definition can have the same value as the minimally ranked core lock.
|
||||
*/
|
||||
#define WITNESS_RANK_CORE 11U
|
||||
#define WITNESS_RANK_CORE 12U
|
||||
|
||||
#define WITNESS_RANK_DECAY 11U
|
||||
#define WITNESS_RANK_TCACHE_QL 12U
|
||||
#define WITNESS_RANK_EXTENT_GROW 13U
|
||||
#define WITNESS_RANK_EXTENTS 14U
|
||||
#define WITNESS_RANK_EXTENT_AVAIL 15U
|
||||
#define WITNESS_RANK_DECAY 12U
|
||||
#define WITNESS_RANK_TCACHE_QL 13U
|
||||
#define WITNESS_RANK_EXTENT_GROW 14U
|
||||
#define WITNESS_RANK_EXTENTS 15U
|
||||
#define WITNESS_RANK_EXTENT_AVAIL 16U
|
||||
|
||||
#define WITNESS_RANK_EXTENT_POOL 16U
|
||||
#define WITNESS_RANK_RTREE 17U
|
||||
#define WITNESS_RANK_BASE 18U
|
||||
#define WITNESS_RANK_ARENA_LARGE 19U
|
||||
#define WITNESS_RANK_HOOK 20U
|
||||
#define WITNESS_RANK_EXTENT_POOL 17U
|
||||
#define WITNESS_RANK_RTREE 18U
|
||||
#define WITNESS_RANK_BASE 19U
|
||||
#define WITNESS_RANK_ARENA_LARGE 20U
|
||||
#define WITNESS_RANK_HOOK 21U
|
||||
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
|
||||
|
Reference in New Issue
Block a user