Get rid of custom iterator for last-N records

This commit is contained in:
Yinan Zhang 2020-04-02 13:40:22 -07:00
parent a5ddfa7d91
commit 2deabac079
3 changed files with 25 additions and 62 deletions

View File

@ -7,9 +7,8 @@ void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
bool prof_recent_init();
void edata_prof_recent_alloc_init(edata_t *edata);
#ifdef JEMALLOC_JET
prof_recent_t *prof_recent_alloc_begin(tsd_t *tsd);
prof_recent_t *prof_recent_alloc_end(tsd_t *tsd);
prof_recent_t *prof_recent_alloc_next(tsd_t *tsd, prof_recent_t *node);
typedef ql_head(prof_recent_t) prof_recent_list_t;
extern prof_recent_list_t prof_recent_alloc_list;
prof_recent_t *edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata);
#endif

View File

@ -8,19 +8,15 @@
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_recent.h"
#ifndef JEMALLOC_JET
# define STATIC_INLINE_IF_NOT_TEST static inline
#else
# define STATIC_INLINE_IF_NOT_TEST
#endif
typedef ql_head(prof_recent_t) prof_recent_list_t;
ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
static atomic_zd_t prof_recent_alloc_max;
static ssize_t prof_recent_alloc_count = 0;
static prof_recent_list_t prof_recent_alloc_list;
#ifndef JEMALLOC_JET
typedef ql_head(prof_recent_t) prof_recent_list_t;
static
#endif
prof_recent_list_t prof_recent_alloc_list;
static void
prof_recent_alloc_max_init() {
@ -102,7 +98,10 @@ edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
return edata_prof_recent_alloc_get_dont_call_directly(edata);
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
#ifndef JEMALLOC_JET
static inline
#endif
prof_recent_t *
edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
prof_recent_t *recent_alloc =
@ -203,25 +202,6 @@ prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent) {
}
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_begin(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
return ql_first(&prof_recent_alloc_list);
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_end(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
return NULL;
}
STATIC_INLINE_IF_NOT_TEST prof_recent_t *
prof_recent_alloc_next(tsd_t *tsd, prof_recent_t *node) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert(node != NULL);
return ql_next(&prof_recent_alloc_list, node, link);
}
static bool
prof_recent_alloc_is_empty(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);

View File

@ -172,8 +172,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
if (i < OPT_ALLOC_MAX - 1) {
malloc_mutex_lock(tsd_tsdn(tsd),
&prof_recent_alloc_mtx);
assert_ptr_ne(prof_recent_alloc_begin(tsd),
prof_recent_alloc_end(tsd),
assert_false(ql_empty(&prof_recent_alloc_list),
"Empty recent allocation");
malloc_mutex_unlock(tsd_tsdn(tsd),
&prof_recent_alloc_mtx);
@ -187,9 +186,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
}
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
++c;
confirm_record_size(tsd, n, i + c - OPT_ALLOC_MAX);
if (c == OPT_ALLOC_MAX) {
@ -220,9 +217,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
assert_ptr_not_null(p, "malloc failed unexpectedly");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
confirm_record_size(tsd, n, c + OPT_ALLOC_MAX);
confirm_record_released(tsd, n);
++c;
@ -251,9 +246,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
confirm_malloc(tsd, p);
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
++c;
confirm_record_size(tsd, n,
/* Is the allocation from the third batch? */
@ -283,9 +276,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
confirm_record_size(tsd, n, c + 3 * OPT_ALLOC_MAX);
confirm_record_released(tsd, n);
++c;
@ -303,9 +294,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
confirm_record_size(tsd, n, c + 3 * OPT_ALLOC_MAX);
confirm_record_released(tsd, n);
++c;
@ -323,9 +312,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
++c;
confirm_record_size(tsd, n, c + 3 * OPT_ALLOC_MAX);
confirm_record_released(tsd, n);
@ -340,9 +327,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
c = 0;
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
for (n = prof_recent_alloc_begin(tsd);
n != prof_recent_alloc_end(tsd);
n = prof_recent_alloc_next(tsd, n)) {
ql_foreach(n, &prof_recent_alloc_list, link) {
++c;
confirm_record_size(tsd, n, c + 3 * OPT_ALLOC_MAX);
confirm_record_released(tsd, n);
@ -356,13 +341,12 @@ TEST_BEGIN(test_prof_recent_alloc) {
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
n = prof_recent_alloc_begin(tsd);
assert_ptr_ne(n, prof_recent_alloc_end(tsd), "Recent list is empty");
assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
n = ql_first(&prof_recent_alloc_list);
confirm_record_size(tsd, n, 4 * OPT_ALLOC_MAX - 1);
confirm_record_released(tsd, n);
n = prof_recent_alloc_next(tsd, n);
assert_ptr_eq(n, prof_recent_alloc_end(tsd),
"Recent list should be empty");
n = ql_next(&prof_recent_alloc_list, n, link);
assert_ptr_null(n, "Recent list should only contain one record");
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
/* Completely turn off. */
@ -370,7 +354,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_ptr_eq(prof_recent_alloc_begin(tsd), prof_recent_alloc_end(tsd),
assert_true(ql_empty(&prof_recent_alloc_list),
"Recent list should be empty");
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
@ -379,7 +363,7 @@ TEST_BEGIN(test_prof_recent_alloc) {
assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
assert_ptr_eq(prof_recent_alloc_begin(tsd), prof_recent_alloc_end(tsd),
assert_true(ql_empty(&prof_recent_alloc_list),
"Recent list should be empty");
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);