Use ql for prof last-N list

This commit is contained in:
Yinan Zhang 2020-04-02 10:48:58 -07:00
parent 8da6676a02
commit a5ddfa7d91
2 changed files with 61 additions and 66 deletions

View File

@ -203,7 +203,7 @@ struct prof_recent_s {
nstime_t alloc_time;
nstime_t dalloc_time;
prof_recent_t *next;
ql_elm(prof_recent_t) link;
size_t size;
prof_tctx_t *alloc_tctx;
edata_t *alloc_edata; /* NULL means allocation has been freed. */

View File

@ -14,11 +14,13 @@
# define STATIC_INLINE_IF_NOT_TEST
#endif
typedef ql_head(prof_recent_t) prof_recent_list_t;
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
static atomic_zd_t prof_recent_alloc_max;
static ssize_t prof_recent_alloc_count = 0;
static prof_recent_t *prof_recent_alloc_dummy = NULL;
static prof_recent_list_t prof_recent_alloc_list;
static void
prof_recent_alloc_max_init() {
@ -204,29 +206,26 @@ prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent) {
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_begin(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(prof_recent_alloc_dummy != NULL);
return prof_recent_alloc_dummy->next;
return ql_first(&prof_recent_alloc_list);
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_end(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(prof_recent_alloc_dummy != NULL);
return prof_recent_alloc_dummy;
return NULL;
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_next(tsd_t *tsd, prof_recent_t *node) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(prof_recent_alloc_dummy != NULL);
assert(node != NULL && node != prof_recent_alloc_dummy);
return node->next;
assert(node != NULL);
return ql_next(&prof_recent_alloc_list, node, link);
}
static bool
prof_recent_alloc_is_empty(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (prof_recent_alloc_begin(tsd) == prof_recent_alloc_end(tsd)) {
if (ql_empty(&prof_recent_alloc_list)) {
assert(prof_recent_alloc_count == 0);
return true;
} else {
@ -238,17 +237,17 @@ prof_recent_alloc_is_empty(tsd_t *tsd) {
static void
prof_recent_alloc_assert_count(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
if (config_debug) {
ssize_t count = 0;
prof_recent_t *n = prof_recent_alloc_begin(tsd);
while (n != prof_recent_alloc_end(tsd)) {
++count;
n = prof_recent_alloc_next(tsd, n);
}
assert(count == prof_recent_alloc_count);
assert(prof_recent_alloc_max_get(tsd) == -1 ||
count <= prof_recent_alloc_max_get(tsd));
if (!config_debug) {
return;
}
ssize_t count = 0;
prof_recent_t *n;
ql_foreach(n, &prof_recent_alloc_list, link) {
++count;
}
assert(count == prof_recent_alloc_count);
assert(prof_recent_alloc_max_get(tsd) == -1 ||
count <= prof_recent_alloc_max_get(tsd));
}
void
@ -311,45 +310,42 @@ prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size) {
goto label_rollback;
}
assert(prof_recent_alloc_dummy != NULL);
{
/* Fill content into the dummy node. */
prof_recent_t *node = prof_recent_alloc_dummy;
node->size = size;
nstime_copy(&node->alloc_time,
edata_prof_alloc_time_get(edata));
node->alloc_tctx = tctx;
edata_prof_recent_alloc_set(tsd, edata, node);
nstime_init_zero(&node->dalloc_time);
node->dalloc_tctx = NULL;
}
prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
/* If upper limit is reached, simply shift the dummy. */
/* If upper limit is reached, rotate the head. */
assert(prof_recent_alloc_max_get(tsd) != -1);
assert(!prof_recent_alloc_is_empty(tsd));
prof_recent_alloc_dummy = prof_recent_alloc_dummy->next;
old_alloc_tctx = prof_recent_alloc_dummy->alloc_tctx;
prof_recent_t *head = ql_first(&prof_recent_alloc_list);
old_alloc_tctx = head->alloc_tctx;
assert(old_alloc_tctx != NULL);
old_dalloc_tctx = prof_recent_alloc_dummy->dalloc_tctx;
prof_recent_alloc_evict_edata(tsd, prof_recent_alloc_dummy);
old_dalloc_tctx = head->dalloc_tctx;
prof_recent_alloc_evict_edata(tsd, head);
ql_rotate(&prof_recent_alloc_list, link);
} else {
/* Otherwise use the new node as the dummy. */
/* Otherwise make use of the new node. */
assert(prof_recent_alloc_max_get(tsd) == -1 ||
prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
if (reserve == NULL) {
goto label_rollback;
}
reserve->next = prof_recent_alloc_dummy->next;
prof_recent_alloc_dummy->next = reserve;
prof_recent_alloc_dummy = reserve;
ql_elm_new(reserve, link);
ql_tail_insert(&prof_recent_alloc_list, reserve, link);
reserve = NULL;
old_alloc_tctx = NULL;
old_dalloc_tctx = NULL;
++prof_recent_alloc_count;
}
/* Fill content into the tail node. */
prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
assert(tail != NULL);
tail->size = size;
nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
tail->alloc_tctx = tctx;
edata_prof_recent_alloc_set(tsd, edata, tail);
nstime_init_zero(&tail->dalloc_time);
tail->dalloc_tctx = NULL;
assert(!prof_recent_alloc_is_empty(tsd));
prof_recent_alloc_assert_count(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -403,19 +399,27 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
return old_max;
}
prof_recent_t *begin = prof_recent_alloc_dummy->next;
/* For verification purpose only. */
ssize_t count = prof_recent_alloc_count - max;
do {
assert(!prof_recent_alloc_is_empty(tsd));
prof_recent_t *node = prof_recent_alloc_dummy->next;
assert(node != prof_recent_alloc_dummy);
prof_recent_t *node;
ql_foreach(node, &prof_recent_alloc_list, link) {
if (prof_recent_alloc_count == max) {
break;
}
prof_recent_alloc_evict_edata(tsd, node);
prof_recent_alloc_dummy->next = node->next;
--prof_recent_alloc_count;
} while (prof_recent_alloc_count > max);
prof_recent_t *end = prof_recent_alloc_dummy->next;
assert(begin != end);
}
assert(prof_recent_alloc_count == max);
prof_recent_list_t old_list;
ql_move(&old_list, &prof_recent_alloc_list);
if (max == 0) {
assert(node == NULL);
} else {
assert(node != NULL);
ql_split(&old_list, node, &prof_recent_alloc_list, link);
}
assert(!ql_empty(&old_list));
prof_recent_alloc_assert_count(tsd);
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -432,15 +436,15 @@ prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
* to and controlled by application.
*/
do {
prof_recent_t *node = begin;
node = ql_first(&old_list);
ql_remove(&old_list, node, link);
decrement_recent_count(tsd, node->alloc_tctx);
if (node->dalloc_tctx != NULL) {
decrement_recent_count(tsd, node->dalloc_tctx);
}
begin = node->next;
idalloctm(tsd_tsdn(tsd), node, NULL, NULL, true, true);
--count;
} while (begin != end);
} while (!ql_empty(&old_list));
assert(count == 0);
return old_max;
@ -482,9 +486,8 @@ prof_recent_alloc_dump(tsd_t *tsd, void (*write_cb)(void *, const char *),
emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize, &max);
emitter_json_array_kv_begin(&emitter, "recent_alloc");
for (prof_recent_t *n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
prof_recent_t *n;
ql_foreach(n, &prof_recent_alloc_list, link) {
emitter_json_object_begin(&emitter);
emitter_json_kv(&emitter, "size", emitter_type_size, &n->size);
@ -541,15 +544,7 @@ prof_recent_init() {
return true;
}
assert(prof_recent_alloc_dummy == NULL);
prof_recent_alloc_dummy = (prof_recent_t *)iallocztm(
TSDN_NULL, sizeof(prof_recent_t),
sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (prof_recent_alloc_dummy == NULL) {
return true;
}
prof_recent_alloc_dummy->next = prof_recent_alloc_dummy;
ql_new(&prof_recent_alloc_list);
return false;
}