diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index d82fbc4f..96db4c3e 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t; */ #define PROF_BT_MAX 128 -/* Maximum number of backtraces to store in each per thread LRU cache. */ -#define PROF_TCMAX 1024 - /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 @@ -87,9 +84,6 @@ struct prof_thr_cnt_s { /* Linkage into prof_ctx_t's cnts_ql. */ ql_elm(prof_thr_cnt_t) cnts_link; - /* Linkage into thread's LRU. */ - ql_elm(prof_thr_cnt_t) lru_link; - /* * Associated context. If a thread frees an object that it did not * allocate, it is possible that the context is not cached in the @@ -157,10 +151,11 @@ typedef ql_head(prof_ctx_t) prof_ctx_list_t; struct prof_tdata_s { /* - * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a - * cache of backtraces, with associated thread-specific prof_thr_cnt_t - * objects. Other threads may read the prof_thr_cnt_t contents, but no - * others will ever write them. + * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_thr_cnt_t objects. Other + * threads may read the prof_thr_cnt_t contents, but no others will ever + * write them. * * Upon thread exit, the thread must merge all the prof_thr_cnt_t * counter data into the associated prof_ctx_t objects, and unlink/free @@ -168,12 +163,6 @@ struct prof_tdata_s { */ ckh_t bt2cnt; - /* LRU for contents of bt2cnt. */ - ql_head(prof_thr_cnt_t) lru_ql; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void **vec; - /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; @@ -182,6 +171,9 @@ struct prof_tdata_s { bool enq; bool enq_idump; bool enq_gdump; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; }; #endif /* JEMALLOC_H_STRUCTS */ diff --git a/src/prof.c b/src/prof.c index 0eb7dbdb..4f95fdb9 100644 --- a/src/prof.c +++ b/src/prof.c @@ -567,33 +567,13 @@ prof_lookup(prof_bt_t *bt) return (NULL); /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - not_reached(); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx); - return (NULL); - } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); + ret.v = imalloc(sizeof(prof_thr_cnt_t)); + if (ret.p == NULL) { + if (new_ctx) + prof_ctx_destroy(ctx); + return (NULL); } - /* Finish initializing ret. */ + ql_elm_new(ret.p, cnts_link); ret.p->ctx = ctx; ret.p->epoch = 0; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); @@ -603,15 +583,10 @@ prof_lookup(prof_bt_t *bt) idalloc(ret.v); return (NULL); } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); malloc_mutex_lock(ctx->lock); ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link); ctx->nlimbo--; malloc_mutex_unlock(ctx->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); } return (ret.p); @@ -1247,14 +1222,6 @@ prof_tdata_init(void) idalloc(prof_tdata); return (NULL); } - ql_new(&prof_tdata->lru_ql); - - prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); - if (prof_tdata->vec == NULL) { - ckh_delete(&prof_tdata->bt2cnt); - idalloc(prof_tdata); - return (NULL); - } prof_tdata->prng_state = (uint64_t)(uintptr_t)prof_tdata; prof_sample_threshold_update(prof_tdata); @@ -1271,7 +1238,6 @@ prof_tdata_init(void) void prof_tdata_cleanup(void *arg) { - prof_thr_cnt_t *cnt; prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; cassert(config_prof); @@ -1292,21 +1258,22 @@ prof_tdata_cleanup(void *arg) * nothing, so that the destructor will not be called again. */ } else if (prof_tdata != NULL) { - /* - * Delete the hash table. All of its contents can still be - * iterated over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); + union { + prof_thr_cnt_t *p; + void *v; + } cnt; + size_t tabind; + /* * Iteratively merge cnt's into the global stats and delete * them. */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); + for (tabind = 0; ckh_iter(&prof_tdata->bt2cnt, &tabind, NULL, + &cnt.v);) { + prof_ctx_merge(cnt.p->ctx, cnt.p); + idalloc(cnt.v); } - idalloc(prof_tdata->vec); + ckh_delete(&prof_tdata->bt2cnt); idalloc(prof_tdata); prof_tdata = PROF_TDATA_STATE_PURGATORY; prof_tdata_tsd_set(&prof_tdata);