Record request size in prof recent entries

This commit is contained in:
Yinan Zhang 2020-01-09 10:20:34 -08:00
parent 40a391408c
commit 2b604a3016
8 changed files with 24 additions and 23 deletions

View File

@ -54,8 +54,8 @@ prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
size_t usize, prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
prof_tctx_t *prof_tctx_create(tsd_t *tsd);
#ifdef JEMALLOC_JET

View File

@ -126,22 +126,22 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
}
JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
prof_tctx_t *tctx) {
prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
prof_malloc_sample_object(tsd, ptr, usize, tctx);
prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
} else {
prof_tctx_reset(tsd, ptr, alloc_ctx);
}
}
JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
bool prof_active, const void *old_ptr, size_t old_usize,
prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
prof_info_t *old_prof_info) {
bool sampled, old_sampled, moved;
@ -168,7 +168,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
moved = (ptr != old_ptr);
if (unlikely(sampled)) {
prof_malloc_sample_object(tsd, ptr, usize, tctx);
prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
} else if (moved) {
prof_tctx_reset(tsd, ptr, NULL);
} else if (unlikely(old_sampled)) {

View File

@ -2,7 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_RECENT_EXTERNS_H
bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t usize);
void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size);
void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
bool prof_recent_init();
void edata_prof_recent_alloc_init(edata_t *edata);

View File

@ -213,7 +213,7 @@ struct prof_recent_s {
nstime_t dalloc_time;
prof_recent_t *next;
size_t usize;
size_t size;
prof_tctx_t *alloc_tctx;
edata_t *alloc_edata; /* NULL means allocation has been freed. */
prof_tctx_t *dalloc_tctx;

View File

@ -2175,7 +2175,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
prof_alloc_rollback(tsd, tctx, true);
goto label_oom;
}
prof_malloc(tsd, allocation, usize, &alloc_ctx, tctx);
prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
} else {
assert(!opt_prof);
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
@ -3045,8 +3045,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
*/
*usize = isalloc(tsd_tsdn(tsd), p);
}
prof_realloc(tsd, p, *usize, tctx, prof_active, old_ptr, old_usize,
&old_prof_info);
prof_realloc(tsd, p, size, *usize, tctx, prof_active, old_ptr,
old_usize, &old_prof_info);
return p;
}
@ -3338,7 +3338,7 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_alloc_rollback(tsd, tctx, false);
} else {
prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
prof_realloc(tsd, ptr, usize, tctx, prof_active, ptr,
prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
old_usize, &prof_info);
}

View File

@ -145,8 +145,8 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
}
void
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx) {
prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
size_t usize, prof_tctx_t *tctx) {
edata_t *edata = iealloc(tsd_tsdn(tsd), ptr);
prof_info_set(tsd, edata, tctx);
@ -162,7 +162,7 @@ prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize,
malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
if (record_recent) {
assert(tctx == edata_prof_tctx_get(edata));
prof_recent_alloc(tsd, edata, usize);
prof_recent_alloc(tsd, edata, size);
}
}

View File

@ -249,7 +249,7 @@ prof_recent_alloc_assert_count(tsd_t *tsd) {
}
void
prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t usize) {
prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size) {
assert(edata != NULL);
prof_tctx_t *tctx = edata_prof_tctx_get(edata);
@ -312,7 +312,7 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t usize) {
{
/* Fill content into the dummy node. */
prof_recent_t *node = prof_recent_alloc_dummy;
node->usize = usize;
node->size = size;
nstime_copy(&node->alloc_time,
edata_prof_alloc_time_get(edata));
node->alloc_tctx = tctx;
@ -487,8 +487,9 @@ prof_recent_alloc_dump(tsd_t *tsd, void (*write_cb)(void *, const char *),
n = prof_recent_alloc_next(tsd, n)) {
emitter_json_object_begin(&emitter);
emitter_json_kv(&emitter, "usize", emitter_type_size,
&n->usize);
emitter_json_kv(&emitter, "size", emitter_type_size, &n->size);
size_t usize = sz_s2u(n->size);
emitter_json_kv(&emitter, "usize", emitter_type_size, &usize);
bool released = n->alloc_edata == NULL;
emitter_json_kv(&emitter, "released", emitter_type_bool,
&released);

View File

@ -116,8 +116,8 @@ static void confirm_malloc(tsd_t *tsd, void *p) {
static void confirm_record_size(tsd_t *tsd, prof_recent_t *n, unsigned kth) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_zu_eq(n->usize, sz_s2u(NTH_REQ_SIZE(kth)),
"Recorded allocation usize is wrong");
assert_zu_eq(n->size, NTH_REQ_SIZE(kth),
"Recorded allocation size is wrong");
}
static void confirm_record_living(tsd_t *tsd, prof_recent_t *n) {