Add lock to protect prof last-N dumping

This commit is contained in:
Yinan Zhang 2020-04-13 12:05:51 -07:00
parent a835d9cf85
commit 3e19ebd2ea
4 changed files with 33 additions and 17 deletions

View File

@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_PROF_RECENT_EXTERNS_H
#define JEMALLOC_INTERNAL_PROF_RECENT_EXTERNS_H
extern malloc_mutex_t prof_recent_dump_mtx;
bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size);
void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);

View File

@ -29,7 +29,8 @@
#define WITNESS_RANK_PROF_TDATA 8U
#define WITNESS_RANK_PROF_LOG 9U
#define WITNESS_RANK_PROF_GCTX 10U
#define WITNESS_RANK_BACKGROUND_THREAD 11U
#define WITNESS_RANK_PROF_RECENT_DUMP 11U
#define WITNESS_RANK_BACKGROUND_THREAD 12U
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
@ -37,19 +38,19 @@
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 12U
#define WITNESS_RANK_CORE 13U
#define WITNESS_RANK_DECAY 12U
#define WITNESS_RANK_TCACHE_QL 13U
#define WITNESS_RANK_EXTENT_GROW 14U
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EDATA_CACHE 16U
#define WITNESS_RANK_DECAY 13U
#define WITNESS_RANK_TCACHE_QL 14U
#define WITNESS_RANK_EXTENT_GROW 15U
#define WITNESS_RANK_EXTENTS 16U
#define WITNESS_RANK_EDATA_CACHE 17U
#define WITNESS_RANK_EMAP 17U
#define WITNESS_RANK_RTREE 18U
#define WITNESS_RANK_BASE 19U
#define WITNESS_RANK_ARENA_LARGE 20U
#define WITNESS_RANK_HOOK 21U
#define WITNESS_RANK_EMAP 18U
#define WITNESS_RANK_RTREE 19U
#define WITNESS_RANK_BASE 20U
#define WITNESS_RANK_ARENA_LARGE 21U
#define WITNESS_RANK_HOOK 22U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
@ -60,8 +61,8 @@
#define WITNESS_RANK_PROF_DUMP_FILENAME WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_RECENT_ALLOC WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
/******************************************************************************/
/* PER-WITNESS DATA */

View File

@ -1116,6 +1116,7 @@ prof_prefork0(tsdn_t *tsdn) {
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx);
}
}
@ -1145,6 +1146,7 @@ prof_postfork_parent(tsdn_t *tsdn) {
malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
counter_postfork_parent(tsdn, &prof_idump_accumulated);
malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
}
@ -1170,6 +1172,7 @@ prof_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
counter_postfork_child(tsdn, &prof_idump_accumulated);
malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
}

View File

@ -18,6 +18,8 @@ static
#endif
prof_recent_list_t prof_recent_alloc_list;
malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
static void
prof_recent_alloc_max_init() {
atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
@ -433,6 +435,7 @@ prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
static void
prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
while (!ql_empty(to_delete)) {
prof_recent_t *node = ql_first(to_delete);
@ -507,6 +510,7 @@ prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
#define PROF_RECENT_PRINT_BUFSIZE 65536
void
prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
buf_writer_t buf_writer;
buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
PROF_RECENT_PRINT_BUFSIZE);
@ -543,8 +547,10 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
prof_recent_alloc_restore_locked(tsd, &temp_list);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_alloc_async_cleanup(tsd, &temp_list);
buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
prof_recent_alloc_async_cleanup(tsd, &temp_list);
}
#undef PROF_RECENT_PRINT_BUFSIZE
@ -552,9 +558,13 @@ bool
prof_recent_init() {
prof_recent_alloc_max_init();
if (malloc_mutex_init(&prof_recent_alloc_mtx,
"prof_recent_alloc", WITNESS_RANK_PROF_RECENT_ALLOC,
malloc_mutex_rank_exclusive)) {
if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
return true;
}
if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
return true;
}