Fix off-by-one backtracing issues.

Rewrite prof_alloc_prep() as a cpp macro, PROF_ALLOC_PREP(), in order to
remove any doubt as to whether an additional stack frame is created.
Prior to this change, it was assumed that inlining would reduce the
total number of frames in the backtrace, but in practice behavior wasn't
completely predictable.

Create imemalign() and call it from posix_memalign(), memalign(), and
valloc(), so that all entry points require the same number of stack
frames to be ignored during backtracing.
This commit is contained in:
Jason Evans
2011-08-12 13:48:27 -07:00
parent 745e30b157
commit a507004d29
3 changed files with 90 additions and 82 deletions

View File

@@ -145,7 +145,6 @@
#define malloc_write JEMALLOC_N(malloc_write)
#define mb_write JEMALLOC_N(mb_write)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)

View File

@@ -227,9 +227,60 @@ bool prof_boot2(void);
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = PROF_TCACHE_GET(); \
if (prof_tdata == NULL) { \
prof_tdata = prof_tdata_init(); \
if (prof_tdata == NULL) { \
ret = NULL; \
break; \
} \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prn_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore, prof_bt_max); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_thr_cnt_t *prof_alloc_prep(size_t size);
prof_ctx_t *prof_ctx_get(const void *ptr);
void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
@@ -272,71 +323,6 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
+ (uint64_t)1U;
}
JEMALLOC_INLINE prof_thr_cnt_t *
prof_alloc_prep(size_t size)
{
#ifdef JEMALLOC_ENABLE_INLINE
/* This function does not have its own stack frame, because it is inlined. */
# define NIGNORE 1
#else
# define NIGNORE 2
#endif
prof_thr_cnt_t *ret;
prof_tdata_t *prof_tdata;
prof_bt_t bt;
assert(size == s2u(size));
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata == NULL) {
prof_tdata = prof_tdata_init();
if (prof_tdata == NULL)
return (NULL);
}
if (opt_prof_active == false) {
/* Sampling is currently inactive, so avoid sampling. */
ret = (prof_thr_cnt_t *)(uintptr_t)1U;
} else if (opt_lg_prof_sample == 0) {
/*
* Don't bother with sampling logic, since sampling interval is
* 1.
*/
bt_init(&bt, prof_tdata->vec);
prof_backtrace(&bt, NIGNORE, prof_bt_max);
ret = prof_lookup(&bt);
} else {
if (prof_tdata->threshold == 0) {
/*
* Initialize. Seed the prng differently for each
* thread.
*/
prof_tdata->prn_state = (uint64_t)(uintptr_t)&size;
prof_sample_threshold_update(prof_tdata);
}
/*
* Determine whether to capture a backtrace based on whether
* size is enough for prof_accum to reach
* prof_tdata->threshold. However, delay updating these
* variables until prof_{m,re}alloc(), because we don't know
* for sure that the allocation will succeed.
*
* Use subtraction rather than addition to avoid potential
* integer overflow.
*/
if (size >= prof_tdata->threshold - prof_tdata->accum) {
bt_init(&bt, prof_tdata->vec);
prof_backtrace(&bt, NIGNORE, prof_bt_max);
ret = prof_lookup(&bt);
} else
ret = (prof_thr_cnt_t *)(uintptr_t)1U;
}
return (ret);
#undef NIGNORE
}
JEMALLOC_INLINE prof_ctx_t *
prof_ctx_get(const void *ptr)
{
@@ -415,7 +401,7 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* prof_alloc_prep() and prof_malloc().
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
@@ -459,7 +445,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if (prof_sample_accum_update(size)) {
/*
* Don't sample. The size passed to
* prof_alloc_prep() was larger than what
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross