2017-02-13 09:03:46 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
|
|
|
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_active_get_unlocked(void) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
|
|
|
* Even if opt_prof is true, sampling can be temporarily disabled by
|
|
|
|
* setting prof_active to false. No locking is used when reading
|
|
|
|
* prof_active in the fast path, so there are no guarantees regarding
|
|
|
|
* how long it will take for all threads to notice state changes.
|
|
|
|
*/
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_active;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_gdump_get_unlocked(void) {
|
2017-01-11 10:06:31 +08:00
|
|
|
/*
|
|
|
|
* No locking is used when reading prof_gdump_val in the fast path, so
|
|
|
|
* there are no guarantees regarding how long it will take for all
|
|
|
|
* threads to notice state changes.
|
|
|
|
*/
|
2017-01-20 10:15:45 +08:00
|
|
|
return prof_gdump_val;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_get(tsd_t *tsd, bool create) {
|
2017-01-11 10:06:31 +08:00
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
tdata = tsd_prof_tdata_get(tsd);
|
|
|
|
if (create) {
|
|
|
|
if (unlikely(tdata == NULL)) {
|
|
|
|
if (tsd_nominal(tsd)) {
|
|
|
|
tdata = prof_tdata_init(tsd);
|
|
|
|
tsd_prof_tdata_set(tsd, tdata);
|
|
|
|
}
|
|
|
|
} else if (unlikely(tdata->expired)) {
|
|
|
|
tdata = prof_tdata_reinit(tsd, tdata);
|
|
|
|
tsd_prof_tdata_set(tsd, tdata);
|
|
|
|
}
|
|
|
|
assert(tdata == NULL || tdata->attached);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return tdata;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
|
|
|
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-04-12 09:13:10 +08:00
|
|
|
arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2017-03-21 02:00:07 +08:00
|
|
|
arena_prof_tctx_reset(tsdn, ptr, tctx);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_tdata_t **tdata_out) {
|
2017-01-11 10:06:31 +08:00
|
|
|
prof_tdata_t *tdata;
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tdata = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tdata_out != NULL) {
|
2017-01-11 10:06:31 +08:00
|
|
|
*tdata_out = tdata;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(tdata == NULL)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
if (likely(tdata->bytes_until_sample >= usize)) {
|
2017-01-16 08:56:30 +08:00
|
|
|
if (update) {
|
2017-01-11 10:06:31 +08:00
|
|
|
tdata->bytes_until_sample -= usize;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-11 10:06:31 +08:00
|
|
|
} else {
|
2017-04-25 09:14:57 +08:00
|
|
|
if (tsd->reentrancy_level > 0) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
/* Compute new sample threshold. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (update) {
|
2017-01-11 10:06:31 +08:00
|
|
|
prof_sample_threshold_update(tdata);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return !tdata->active;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
|
2017-01-11 10:06:31 +08:00
|
|
|
prof_tctx_t *ret;
|
|
|
|
prof_tdata_t *tdata;
|
|
|
|
prof_bt_t bt;
|
|
|
|
|
|
|
|
assert(usize == s2u(usize));
|
|
|
|
|
|
|
|
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
2017-01-16 08:56:30 +08:00
|
|
|
&tdata))) {
|
2017-01-11 10:06:31 +08:00
|
|
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-01-11 10:06:31 +08:00
|
|
|
bt_init(&bt, tdata->vec);
|
|
|
|
prof_backtrace(&bt);
|
|
|
|
ret = prof_lookup(tsd, &bt);
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
|
|
|
|
prof_tctx_t *tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
2017-03-17 16:25:12 +08:00
|
|
|
assert(usize == isalloc(tsdn, ptr));
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
|
|
|
|
(prof_tctx_t *)(uintptr_t)1U);
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|
|
|
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
|
|
|
prof_tctx_t *old_tctx) {
|
2017-01-11 10:06:31 +08:00
|
|
|
bool sampled, old_sampled, moved;
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
|
|
|
|
|
|
|
if (prof_active && !updated && ptr != NULL) {
|
2017-03-17 16:25:12 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
2017-01-11 10:06:31 +08:00
|
|
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
|
|
|
/*
|
|
|
|
* Don't sample. The usize passed to prof_alloc_prep()
|
|
|
|
* was larger than what actually got allocated, so a
|
|
|
|
* backtrace was captured for this allocation, even
|
|
|
|
* though its actual usize was insufficient to cross the
|
|
|
|
* sample threshold.
|
|
|
|
*/
|
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
|
|
|
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
|
|
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
|
|
|
moved = (ptr != old_ptr);
|
|
|
|
|
|
|
|
if (unlikely(sampled)) {
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
|
2017-01-11 10:06:31 +08:00
|
|
|
} else if (moved) {
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
|
2017-01-11 10:06:31 +08:00
|
|
|
(prof_tctx_t *)(uintptr_t)1U);
|
2017-01-17 03:09:24 +08:00
|
|
|
} else if (unlikely(old_sampled)) {
|
|
|
|
/*
|
|
|
|
* prof_tctx_set() would work for the !moved case as well, but
|
|
|
|
* prof_tctx_reset() is slightly cheaper, and the proper thing
|
|
|
|
* to do here in the presence of explicit knowledge re: moved
|
|
|
|
* state.
|
|
|
|
*/
|
2017-03-21 02:00:07 +08:00
|
|
|
prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
|
2017-01-17 03:09:24 +08:00
|
|
|
} else {
|
2017-04-12 09:13:10 +08:00
|
|
|
assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
|
2017-01-17 03:09:24 +08:00
|
|
|
(uintptr_t)1U);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The prof_free_sampled_object() call must come after the
|
|
|
|
* prof_malloc_sample_object() call, because tctx and old_tctx may be
|
|
|
|
* the same, in which case reversing the call order could cause the tctx
|
|
|
|
* to be prematurely destroyed as a side effect of momentarily zeroed
|
|
|
|
* counters.
|
|
|
|
*/
|
|
|
|
if (unlikely(old_sampled)) {
|
|
|
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-04-12 09:13:10 +08:00
|
|
|
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
|
|
|
|
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
|
2017-01-11 10:06:31 +08:00
|
|
|
|
|
|
|
cassert(config_prof);
|
2017-03-17 16:25:12 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
|
2017-01-11 10:06:31 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
2017-01-11 10:06:31 +08:00
|
|
|
prof_free_sampled_object(tsd, usize, tctx);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-11 10:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 09:03:46 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
|