2010-02-11 02:37:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
typedef struct prof_bt_s prof_bt_t;
|
|
|
|
typedef struct prof_cnt_s prof_cnt_t;
|
2014-08-19 07:22:13 +08:00
|
|
|
typedef struct prof_tctx_s prof_tctx_t;
|
|
|
|
typedef struct prof_gctx_s prof_gctx_t;
|
2010-10-21 10:05:59 +08:00
|
|
|
typedef struct prof_tdata_s prof_tdata_t;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2010-03-02 12:15:26 +08:00
|
|
|
/* Option defaults. */
|
2014-01-29 15:04:02 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
# define PROF_PREFIX_DEFAULT "jeprof"
|
|
|
|
#else
|
|
|
|
# define PROF_PREFIX_DEFAULT ""
|
|
|
|
#endif
|
2012-04-18 07:39:33 +08:00
|
|
|
#define LG_PROF_SAMPLE_DEFAULT 19
|
2010-10-01 08:10:17 +08:00
|
|
|
#define LG_PROF_INTERVAL_DEFAULT -1
|
2012-02-14 10:04:26 +08:00
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/*
|
2012-02-14 10:23:41 +08:00
|
|
|
* Hard limit on stack backtrace depth. The version of prof_backtrace() that
|
|
|
|
* is based on __builtin_return_address() necessarily has a hard-coded number
|
|
|
|
* of backtrace frame handlers, and should be kept in sync with this setting.
|
2010-02-11 02:37:56 +08:00
|
|
|
*/
|
2012-02-14 10:23:41 +08:00
|
|
|
#define PROF_BT_MAX 128
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/* Initial hash table size. */
|
2012-03-22 09:33:03 +08:00
|
|
|
#define PROF_CKH_MINITEMS 64
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
/* Size of memory buffer to use when writing dump files. */
|
2012-03-22 09:33:03 +08:00
|
|
|
#define PROF_DUMP_BUFSIZE 65536
|
|
|
|
|
|
|
|
/* Size of stack-allocated buffer used by prof_printf(). */
|
|
|
|
#define PROF_PRINTF_BUFSIZE 128
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Number of mutexes shared among all gctx's. No space is allocated for these
|
2012-03-24 09:05:51 +08:00
|
|
|
* unless profiling is enabled, so it's okay to over-provision.
|
|
|
|
*/
|
|
|
|
#define PROF_NCTX_LOCKS 1024
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/*
|
|
|
|
* Number of mutexes shared among all tdata's. No space is allocated for these
|
|
|
|
* unless profiling is enabled, so it's okay to over-provision.
|
|
|
|
*/
|
|
|
|
#define PROF_NTDATA_LOCKS 256
|
|
|
|
|
2012-04-29 09:14:24 +08:00
|
|
|
/*
|
|
|
|
* prof_tdata pointers close to NULL are used to encode state information that
|
|
|
|
* is used for cleaning up during thread shutdown.
|
|
|
|
*/
|
|
|
|
#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
|
|
|
|
#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
|
|
|
|
#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
|
|
|
struct prof_bt_s {
|
|
|
|
/* Backtrace, stored as len program counters. */
|
2010-10-21 10:05:59 +08:00
|
|
|
void **vec;
|
|
|
|
unsigned len;
|
2010-02-11 02:37:56 +08:00
|
|
|
};
|
|
|
|
|
2010-02-11 10:15:53 +08:00
|
|
|
#ifdef JEMALLOC_PROF_LIBGCC
|
|
|
|
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
|
|
|
|
typedef struct {
|
2010-10-03 06:18:50 +08:00
|
|
|
prof_bt_t *bt;
|
|
|
|
unsigned max;
|
2010-02-11 10:15:53 +08:00
|
|
|
} prof_unwind_data_t;
|
|
|
|
#endif
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
struct prof_cnt_s {
|
2014-08-19 07:22:13 +08:00
|
|
|
/* Profiling counters. */
|
|
|
|
uint64_t curobjs;
|
|
|
|
uint64_t curbytes;
|
2010-02-11 02:37:56 +08:00
|
|
|
uint64_t accumobjs;
|
|
|
|
uint64_t accumbytes;
|
|
|
|
};
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
typedef enum {
|
2014-09-25 13:14:21 +08:00
|
|
|
prof_tctx_state_initializing,
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_state_nominal,
|
|
|
|
prof_tctx_state_dumping,
|
|
|
|
prof_tctx_state_purgatory /* Dumper must finish destroying. */
|
|
|
|
} prof_tctx_state_t;
|
2014-08-17 03:58:55 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
struct prof_tctx_s {
|
|
|
|
/* Thread data for thread that performed the allocation. */
|
|
|
|
prof_tdata_t *tdata;
|
2010-10-03 06:18:50 +08:00
|
|
|
|
2014-10-13 04:03:20 +08:00
|
|
|
/*
|
2015-09-10 14:16:10 +08:00
|
|
|
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
|
|
|
|
* defunct during teardown.
|
2014-10-13 04:03:20 +08:00
|
|
|
*/
|
|
|
|
uint64_t thr_uid;
|
2015-09-10 14:16:10 +08:00
|
|
|
uint64_t thr_discrim;
|
2014-10-13 04:03:20 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/* Profiling counters, protected by tdata->lock. */
|
|
|
|
prof_cnt_t cnts;
|
|
|
|
|
|
|
|
/* Associated global context. */
|
|
|
|
prof_gctx_t *gctx;
|
|
|
|
|
2015-03-17 06:11:06 +08:00
|
|
|
/*
|
|
|
|
* UID that distinguishes multiple tctx's created by the same thread,
|
|
|
|
* but coexisting in gctx->tctxs. There are two ways that such
|
|
|
|
* coexistence can occur:
|
|
|
|
* - A dumper thread can cause a tctx to be retained in the purgatory
|
|
|
|
* state.
|
|
|
|
* - Although a single "producer" thread must create all tctx's which
|
|
|
|
* share the same thr_uid, multiple "consumers" can each concurrently
|
|
|
|
* execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
|
|
|
|
* gets called once each time cnts.cur{objs,bytes} drop to 0, but this
|
|
|
|
* threshold can be hit again before the first consumer finishes
|
|
|
|
* executing prof_tctx_destroy().
|
|
|
|
*/
|
|
|
|
uint64_t tctx_uid;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/* Linkage into gctx's tctxs. */
|
|
|
|
rb_node(prof_tctx_t) tctx_link;
|
|
|
|
|
2014-09-10 10:37:26 +08:00
|
|
|
/*
|
|
|
|
* True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
|
|
|
|
* sample vs destroy race.
|
|
|
|
*/
|
|
|
|
bool prepared;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/* Current dump-related state, protected by gctx->lock. */
|
|
|
|
prof_tctx_state_t state;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Copy of cnts snapshotted during early dump phase, protected by
|
|
|
|
* dump_mtx.
|
2010-02-11 02:37:56 +08:00
|
|
|
*/
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_cnt_t dump_cnts;
|
2010-02-11 02:37:56 +08:00
|
|
|
};
|
2014-08-19 07:22:13 +08:00
|
|
|
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
struct prof_gctx_s {
|
|
|
|
/* Protects nlimbo, cnt_summed, and tctxs. */
|
2012-03-24 09:05:51 +08:00
|
|
|
malloc_mutex_t *lock;
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2012-04-23 07:00:11 +08:00
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Number of threads that currently cause this gctx to be in a state of
|
2012-04-23 07:00:11 +08:00
|
|
|
* limbo due to one of:
|
2014-08-19 07:22:13 +08:00
|
|
|
* - Initializing this gctx.
|
|
|
|
* - Initializing per thread counters associated with this gctx.
|
|
|
|
* - Preparing to destroy this gctx.
|
|
|
|
* - Dumping a heap profile that includes this gctx.
|
2012-04-23 07:00:11 +08:00
|
|
|
* nlimbo must be 1 (single destroyer) in order to safely destroy the
|
2014-08-19 07:22:13 +08:00
|
|
|
* gctx.
|
2012-04-23 07:00:11 +08:00
|
|
|
*/
|
|
|
|
unsigned nlimbo;
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
/*
|
2014-08-17 03:58:55 +08:00
|
|
|
* Tree of profile counters, one for each thread that has allocated in
|
2010-02-11 02:37:56 +08:00
|
|
|
* this context.
|
|
|
|
*/
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_tree_t tctxs;
|
2014-01-17 05:23:56 +08:00
|
|
|
|
2014-08-17 03:58:55 +08:00
|
|
|
/* Linkage for tree of contexts to be dumped. */
|
2014-08-19 07:22:13 +08:00
|
|
|
rb_node(prof_gctx_t) dump_link;
|
|
|
|
|
|
|
|
/* Temporary storage for summation during dump. */
|
|
|
|
prof_cnt_t cnt_summed;
|
2014-08-16 06:05:12 +08:00
|
|
|
|
|
|
|
/* Associated backtrace. */
|
|
|
|
prof_bt_t bt;
|
|
|
|
|
|
|
|
/* Backtrace vector, variable size, referred to by bt. */
|
|
|
|
void *vec[1];
|
2010-02-11 02:37:56 +08:00
|
|
|
};
|
2014-08-19 07:22:13 +08:00
|
|
|
typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
|
|
|
|
|
2010-10-21 10:05:59 +08:00
|
|
|
struct prof_tdata_s {
|
2014-08-19 07:22:13 +08:00
|
|
|
malloc_mutex_t *lock;
|
|
|
|
|
|
|
|
/* Monotonically increasing unique thread identifier. */
|
|
|
|
uint64_t thr_uid;
|
|
|
|
|
2014-10-03 14:01:10 +08:00
|
|
|
/*
|
|
|
|
* Monotonically increasing discriminator among tdata structures
|
|
|
|
* associated with the same thr_uid.
|
|
|
|
*/
|
|
|
|
uint64_t thr_discrim;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/* Included in heap profile dumps if non-NULL. */
|
|
|
|
char *thread_name;
|
|
|
|
|
2014-10-03 14:01:10 +08:00
|
|
|
bool attached;
|
|
|
|
bool expired;
|
2014-08-19 07:22:13 +08:00
|
|
|
|
|
|
|
rb_node(prof_tdata_t) tdata_link;
|
|
|
|
|
2015-03-17 06:11:06 +08:00
|
|
|
/*
|
|
|
|
* Counter used to initialize prof_tctx_t's tctx_uid. No locking is
|
|
|
|
* necessary when incrementing this field, because only one thread ever
|
|
|
|
* does so.
|
|
|
|
*/
|
|
|
|
uint64_t tctx_uid_next;
|
|
|
|
|
2010-10-21 10:05:59 +08:00
|
|
|
/*
|
2014-08-19 07:22:13 +08:00
|
|
|
* Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
|
2014-08-16 06:01:15 +08:00
|
|
|
* backtraces for which it has non-zero allocation/deallocation counters
|
2014-08-19 07:22:13 +08:00
|
|
|
* associated with thread-specific prof_tctx_t objects. Other threads
|
|
|
|
* may write to prof_tctx_t contents when freeing associated objects.
|
2010-10-21 10:05:59 +08:00
|
|
|
*/
|
2014-08-19 07:22:13 +08:00
|
|
|
ckh_t bt2tctx;
|
2010-10-03 06:18:50 +08:00
|
|
|
|
2010-10-21 10:05:59 +08:00
|
|
|
/* Sampling state. */
|
2012-03-03 07:59:45 +08:00
|
|
|
uint64_t prng_state;
|
2014-04-16 04:47:13 +08:00
|
|
|
uint64_t bytes_until_sample;
|
2012-04-23 07:00:11 +08:00
|
|
|
|
|
|
|
/* State used to avoid dumping while operating on prof internals. */
|
|
|
|
bool enq;
|
|
|
|
bool enq_idump;
|
|
|
|
bool enq_gdump;
|
2014-08-16 06:01:15 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/*
|
|
|
|
* Set to true during an early dump phase for tdata's which are
|
|
|
|
* currently being dumped. New threads' tdata's have this initialized
|
|
|
|
* to false so that they aren't accidentally included in later dump
|
|
|
|
* phases.
|
|
|
|
*/
|
|
|
|
bool dumping;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* True if profiling is active for this tdata's thread
|
|
|
|
* (thread.prof.active mallctl).
|
|
|
|
*/
|
|
|
|
bool active;
|
|
|
|
|
|
|
|
/* Temporary storage for summation during dump. */
|
|
|
|
prof_cnt_t cnt_summed;
|
|
|
|
|
2014-08-16 06:01:15 +08:00
|
|
|
/* Backtrace vector, used for calls to prof_backtrace(). */
|
|
|
|
void *vec[PROF_BT_MAX];
|
2010-10-03 06:18:50 +08:00
|
|
|
};
|
2014-08-19 07:22:13 +08:00
|
|
|
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
|
2010-10-03 06:18:50 +08:00
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
|
|
|
extern bool opt_prof;
|
2010-04-01 09:43:24 +08:00
|
|
|
extern bool opt_prof_active;
|
2014-10-04 14:25:30 +08:00
|
|
|
extern bool opt_prof_thread_active_init;
|
2010-10-03 06:18:50 +08:00
|
|
|
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
|
2010-04-01 08:35:51 +08:00
|
|
|
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
|
2010-10-24 09:37:06 +08:00
|
|
|
extern bool opt_prof_gdump; /* High-water memory dumping. */
|
2012-04-18 07:39:33 +08:00
|
|
|
extern bool opt_prof_final; /* Final profile dumping. */
|
2010-10-03 06:18:50 +08:00
|
|
|
extern bool opt_prof_leak; /* Dump leak summary at exit. */
|
|
|
|
extern bool opt_prof_accum; /* Report cumulative bytes. */
|
2014-01-17 10:04:30 +08:00
|
|
|
extern char opt_prof_prefix[
|
|
|
|
/* Minimize memory bloat for non-prof builds. */
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
PATH_MAX +
|
|
|
|
#endif
|
|
|
|
1];
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2014-10-04 14:25:30 +08:00
|
|
|
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
|
|
|
|
extern bool prof_active;
|
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
|
|
|
|
extern bool prof_gdump_val;
|
|
|
|
|
2010-02-12 05:19:21 +08:00
|
|
|
/*
|
|
|
|
* Profile dump interval, measured in bytes allocated. Each arena triggers a
|
|
|
|
* profile dump when it reaches this threshold. The effect is that the
|
|
|
|
* interval between profile dumps averages prof_interval, though the actual
|
|
|
|
* interval between dumps will tend to be sporadic, and the interval will be a
|
|
|
|
* maximum of approximately (prof_interval * narenas).
|
|
|
|
*/
|
|
|
|
extern uint64_t prof_interval;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
/*
|
|
|
|
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
|
|
|
|
* resets.
|
|
|
|
*/
|
|
|
|
extern size_t lg_prof_sample;
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *tctx);
|
2014-09-23 12:09:23 +08:00
|
|
|
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
2010-10-21 10:05:59 +08:00
|
|
|
void bt_init(prof_bt_t *bt, void **vec);
|
2014-04-23 09:41:15 +08:00
|
|
|
void prof_backtrace(prof_bt_t *bt);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
|
2014-01-18 07:40:52 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
2014-10-03 14:01:10 +08:00
|
|
|
size_t prof_tdata_count(void);
|
2014-01-18 07:40:52 +08:00
|
|
|
size_t prof_bt_count(void);
|
2014-10-03 14:01:10 +08:00
|
|
|
const prof_cnt_t *prof_cnt_all(void);
|
2014-01-18 07:40:52 +08:00
|
|
|
typedef int (prof_dump_open_t)(bool, const char *);
|
|
|
|
extern prof_dump_open_t *prof_dump_open;
|
2016-05-11 13:21:10 +08:00
|
|
|
typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
|
2014-10-03 14:01:10 +08:00
|
|
|
extern prof_dump_header_t *prof_dump_header;
|
2014-01-18 07:40:52 +08:00
|
|
|
#endif
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_idump(tsdn_t *tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
bool prof_mdump(tsd_t *tsd, const char *filename);
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_gdump(tsdn_t *tsdn);
|
2016-10-21 14:59:12 +08:00
|
|
|
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
2016-10-21 14:59:12 +08:00
|
|
|
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
2014-09-23 12:09:23 +08:00
|
|
|
void prof_tdata_cleanup(tsd_t *tsd);
|
2016-05-11 13:21:10 +08:00
|
|
|
bool prof_active_get(tsdn_t *tsdn);
|
|
|
|
bool prof_active_set(tsdn_t *tsdn, bool active);
|
2016-04-14 14:36:15 +08:00
|
|
|
const char *prof_thread_name_get(tsd_t *tsd);
|
2014-10-04 14:25:30 +08:00
|
|
|
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
2016-04-14 14:36:15 +08:00
|
|
|
bool prof_thread_active_get(tsd_t *tsd);
|
|
|
|
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
2016-05-11 13:21:10 +08:00
|
|
|
bool prof_thread_active_init_get(tsdn_t *tsdn);
|
|
|
|
bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
|
|
|
|
bool prof_gdump_get(tsdn_t *tsdn);
|
|
|
|
bool prof_gdump_set(tsdn_t *tsdn, bool active);
|
2010-02-11 02:37:56 +08:00
|
|
|
void prof_boot0(void);
|
2010-10-24 09:37:06 +08:00
|
|
|
void prof_boot1(void);
|
2016-10-21 14:59:12 +08:00
|
|
|
bool prof_boot2(tsd_t *tsd);
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_prefork0(tsdn_t *tsdn);
|
|
|
|
void prof_prefork1(tsdn_t *tsdn);
|
|
|
|
void prof_postfork_parent(tsdn_t *tsdn);
|
|
|
|
void prof_postfork_child(tsdn_t *tsdn);
|
2014-08-19 07:22:13 +08:00
|
|
|
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
2010-02-11 02:37:56 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
2010-10-21 10:05:59 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2014-10-04 14:25:30 +08:00
|
|
|
bool prof_active_get_unlocked(void);
|
2015-01-26 13:16:57 +08:00
|
|
|
bool prof_gdump_get_unlocked(void);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
|
|
|
|
void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2016-04-14 14:36:15 +08:00
|
|
|
prof_tctx_t *tctx);
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2016-04-14 14:36:15 +08:00
|
|
|
const void *old_ptr, prof_tctx_t *tctx);
|
2014-09-23 12:09:23 +08:00
|
|
|
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t **tdata_out);
|
2015-09-15 14:17:25 +08:00
|
|
|
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
|
|
|
bool update);
|
2016-05-11 13:21:10 +08:00
|
|
|
void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *tctx);
|
2014-09-23 12:09:23 +08:00
|
|
|
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
2015-09-15 14:48:11 +08:00
|
|
|
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
2015-09-15 14:17:25 +08:00
|
|
|
size_t old_usize, prof_tctx_t *old_tctx);
|
2014-09-23 12:09:23 +08:00
|
|
|
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
|
2010-10-21 10:05:59 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2014-10-04 14:25:30 +08:00
|
|
|
prof_active_get_unlocked(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even if opt_prof is true, sampling can be temporarily disabled by
|
|
|
|
* setting prof_active to false. No locking is used when reading
|
|
|
|
* prof_active in the fast path, so there are no guarantees regarding
|
|
|
|
* how long it will take for all threads to notice state changes.
|
|
|
|
*/
|
|
|
|
return (prof_active);
|
|
|
|
}
|
|
|
|
|
2015-01-26 13:16:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
prof_gdump_get_unlocked(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No locking is used when reading prof_gdump_val in the fast path, so
|
|
|
|
* there are no guarantees regarding how long it will take for all
|
|
|
|
* threads to notice state changes.
|
|
|
|
*/
|
|
|
|
return (prof_gdump_val);
|
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_tdata_get(tsd_t *tsd, bool create)
|
2012-04-23 07:00:11 +08:00
|
|
|
{
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t *tdata;
|
2012-04-23 07:00:11 +08:00
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = tsd_prof_tdata_get(tsd);
|
2014-08-19 07:22:13 +08:00
|
|
|
if (create) {
|
2014-09-23 12:09:23 +08:00
|
|
|
if (unlikely(tdata == NULL)) {
|
2014-10-05 02:12:53 +08:00
|
|
|
if (tsd_nominal(tsd)) {
|
2016-10-21 14:59:12 +08:00
|
|
|
tdata = prof_tdata_init(tsd);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd_prof_tdata_set(tsd, tdata);
|
|
|
|
}
|
2014-10-03 14:01:10 +08:00
|
|
|
} else if (unlikely(tdata->expired)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_reinit(tsd, tdata);
|
|
|
|
tsd_prof_tdata_set(tsd, tdata);
|
|
|
|
}
|
2014-10-03 14:01:10 +08:00
|
|
|
assert(tdata == NULL || tdata->attached);
|
2014-08-19 07:22:13 +08:00
|
|
|
}
|
2012-04-23 07:00:11 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
return (tdata);
|
2012-04-23 07:00:11 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_get(tsdn_t *tsdn, const void *ptr)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 10:05:59 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (arena_prof_tctx_get(tsdn, ptr));
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 10:05:59 +08:00
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_tctx_set(tsdn, ptr, usize, tctx);
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
|
2015-09-15 14:48:11 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
|
2015-09-15 14:48:11 +08:00
|
|
|
prof_tctx_t *old_tctx)
|
|
|
|
{
|
|
|
|
|
|
|
|
cassert(config_prof);
|
|
|
|
assert(ptr != NULL);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
|
2015-09-15 14:48:11 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|
|
|
prof_tdata_t **tdata_out)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tdata_t *tdata;
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tdata = prof_tdata_get(tsd, true);
|
2015-10-28 06:12:10 +08:00
|
|
|
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
|
2014-08-19 07:22:13 +08:00
|
|
|
tdata = NULL;
|
2014-04-16 04:47:13 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tdata_out != NULL)
|
|
|
|
*tdata_out = tdata;
|
2014-04-16 04:47:13 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
if (unlikely(tdata == NULL))
|
2012-04-29 14:27:13 +08:00
|
|
|
return (true);
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
if (likely(tdata->bytes_until_sample >= usize)) {
|
2014-09-10 10:37:26 +08:00
|
|
|
if (update)
|
2014-08-19 07:22:13 +08:00
|
|
|
tdata->bytes_until_sample -= usize;
|
2014-04-16 04:47:13 +08:00
|
|
|
return (true);
|
|
|
|
} else {
|
2011-03-15 13:22:29 +08:00
|
|
|
/* Compute new sample threshold. */
|
2014-09-10 10:37:26 +08:00
|
|
|
if (update)
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_sample_threshold_update(tdata);
|
2014-10-04 01:16:09 +08:00
|
|
|
return (!tdata->active);
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
2015-09-15 14:17:25 +08:00
|
|
|
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
2014-08-19 07:22:13 +08:00
|
|
|
{
|
|
|
|
prof_tctx_t *ret;
|
|
|
|
prof_tdata_t *tdata;
|
|
|
|
prof_bt_t bt;
|
|
|
|
|
|
|
|
assert(usize == s2u(usize));
|
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
|
|
|
&tdata)))
|
2014-08-19 07:22:13 +08:00
|
|
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
|
|
|
else {
|
|
|
|
bt_init(&bt, tdata->vec);
|
|
|
|
prof_backtrace(&bt);
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = prof_lookup(tsd, &bt);
|
2014-04-16 04:47:13 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
|
|
|
|
return (ret);
|
2014-04-16 04:47:13 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2010-10-21 10:05:59 +08:00
|
|
|
assert(ptr != NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(usize == isalloc(tsdn, ptr, true));
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc_sample_object(tsdn, ptr, usize, tctx);
|
2014-04-16 04:47:13 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
2015-09-15 14:48:11 +08:00
|
|
|
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
|
|
|
prof_tctx_t *old_tctx)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
2015-09-15 14:44:37 +08:00
|
|
|
bool sampled, old_sampled;
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2014-08-19 07:22:13 +08:00
|
|
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
if (prof_active && !updated && ptr != NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
|
2014-09-23 12:09:23 +08:00
|
|
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
2014-04-16 04:47:13 +08:00
|
|
|
/*
|
2015-09-15 14:17:25 +08:00
|
|
|
* Don't sample. The usize passed to prof_alloc_prep()
|
2014-08-19 07:22:13 +08:00
|
|
|
* was larger than what actually got allocated, so a
|
|
|
|
* backtrace was captured for this allocation, even
|
|
|
|
* though its actual usize was insufficient to cross the
|
|
|
|
* sample threshold.
|
2014-04-16 04:47:13 +08:00
|
|
|
*/
|
2016-06-02 07:17:31 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-08-19 07:22:13 +08:00
|
|
|
tctx = (prof_tctx_t *)(uintptr_t)1U;
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 14:44:37 +08:00
|
|
|
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
|
|
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
|
|
|
|
|
|
|
if (unlikely(sampled))
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
|
2014-08-19 07:22:13 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
|
2015-09-15 14:44:37 +08:00
|
|
|
|
|
|
|
if (unlikely(old_sampled))
|
|
|
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 02:26:05 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
|
2010-10-21 10:05:59 +08:00
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
|
2010-10-21 10:05:59 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
cassert(config_prof);
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free_sampled_object(tsd, usize, tctx);
|
2010-10-21 10:05:59 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|