Add stats for stashed bytes in tcache.

This commit is contained in:
Qi Wang 2021-11-30 14:39:34 -08:00 committed by Qi Wang
parent b75822bc6e
commit e491cef9ab
9 changed files with 112 additions and 36 deletions

View File

@ -73,6 +73,7 @@ struct arena_stats_s {
/* Number of bytes cached in tcache associated with this arena. */
size_t tcache_bytes; /* Derived. */
size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];

View File

@ -223,18 +223,6 @@ cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
return n;
}
/*
* Obtain a racy view of the number of items currently in the cache bin, in the
* presence of possible concurrent modifications.
*/
static inline cache_bin_sz_t
cache_bin_ncached_get_remote(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
/* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Internal.
*
@ -436,15 +424,49 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
bool racy) {
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
void **full = cache_bin_full_position_get(bin, info);
uint16_t nstashed = cache_bin_diff(bin, (uint16_t)(uintptr_t)full,
cache_bin_sz_t n = cache_bin_diff(bin, (uint16_t)(uintptr_t)full,
bin->low_bits_full) / sizeof(void *);
assert(nstashed <= ncached_max);
assert(n <= ncached_max);
return nstashed;
/* Below are for assertions only. */
void *stashed = *(full + n - 1);
bool aligned = cache_bin_nonfast_aligned(stashed);
#ifdef JEMALLOC_JET
/* Allow arbitrary pointers to be stashed in tests. */
aligned = true;
#endif
assert(n == 0 || (stashed != NULL && aligned) || racy);
return n;
}
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info, false);
assert(n <= cache_bin_info_ncached_max(info));
return n;
}
/*
* Obtain a racy view of the number of items currently in the cache bin, in the
* presence of possible concurrent modifications.
*/
static inline void
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*ncached = n;
n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
assert(n <= cache_bin_info_ncached_max(info));
*nstashed = n;
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
}
/*
@ -538,7 +560,7 @@ cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
cache_bin_sz_t nstashed) {
assert(nstashed > 0);
assert(cache_bin_nstashed_get(bin, info) == nstashed);
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
void **full = cache_bin_full_position_get(bin, info);
arr->ptr = full;
@ -551,7 +573,7 @@ cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
/* Reset the bin local full position. */
bin->low_bits_full = (uint16_t)(uintptr_t)full;
assert(cache_bin_nstashed_get(bin, info) == 0);
assert(cache_bin_nstashed_get_local(bin, info) == 0);
}
/*

View File

@ -148,18 +148,21 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
/* tcache_bytes counts currently cached bytes. */
/* Currently cached bytes and sanitizer-stashed bytes in tcache. */
astats->tcache_bytes = 0;
astats->tcache_stashed_bytes = 0;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
for (szind_t i = 0; i < nhbins; i++) {
cache_bin_t *cache_bin = &descriptor->bins[i];
astats->tcache_bytes +=
cache_bin_ncached_get_remote(cache_bin,
&tcache_bin_info[i]) * sz_index2size(i) +
cache_bin_nstashed_get(cache_bin,
&tcache_bin_info[i]) * sz_index2size(i);
cache_bin_sz_t ncached, nstashed;
cache_bin_nitems_get_remote(cache_bin,
&tcache_bin_info[i], &ncached, &nstashed);
astats->tcache_bytes += ncached * sz_index2size(i);
astats->tcache_stashed_bytes += nstashed *
sz_index2size(i);
}
}
malloc_mutex_prof_read(tsdn,

View File

@ -290,6 +290,7 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
@ -787,6 +788,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("tcache_stashed_bytes"),
CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
{NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
@ -1169,6 +1172,8 @@ MUTEX_PROF_ARENA_MUTEXES
&astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
sdstats->astats.tcache_stashed_bytes +=
astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
@ -3503,6 +3508,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
arenas_i(mib[2])->astats->astats.resident,
size_t)

View File

@ -1055,7 +1055,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
large_nflushes;
size_t tcache_bytes, abandoned_vm;
size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
@ -1344,6 +1344,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
GET_AND_EMIT_MEM_STAT(internal)
GET_AND_EMIT_MEM_STAT(metadata_thp)
GET_AND_EMIT_MEM_STAT(tcache_bytes)
GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
GET_AND_EMIT_MEM_STAT(resident)
GET_AND_EMIT_MEM_STAT(abandoned_vm)
GET_AND_EMIT_MEM_STAT(extent_avail)

View File

@ -553,7 +553,7 @@ tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
info);
cache_bin_sz_t nstashed = cache_bin_nstashed_get(cache_bin, info);
cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info));
if (nstashed == 0) {
return;
@ -567,7 +567,7 @@ tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
is_small);
cache_bin_finish_flush_stashed(cache_bin, info);
assert(cache_bin_nstashed_get(cache_bin, info) == 0);
assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
assert(head_content == *cache_bin->stack_head);
}

View File

@ -266,7 +266,8 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Bin not empty");
expect_true(cache_bin_nstashed_get(bin, info) == 0, "Bin not empty");
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
"Bin not empty");
expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
bool ret;
@ -283,7 +284,7 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
ret = cache_bin_stash(bin, &ptrs[i + nfill]);
expect_true(ret, "Unexpected stash failure");
}
expect_true(cache_bin_nstashed_get(bin, info) == nstash,
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
"Wrong stashed count");
if (nfill + nstash == info->ncached_max) {
@ -303,7 +304,7 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
}
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get(bin, info) == nstash,
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
@ -313,7 +314,7 @@ do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
cache_bin_finish_flush_stashed(bin, info);
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
"Wrong cached count");
expect_true(cache_bin_nstashed_get(bin, info) == 0,
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
"Wrong stashed count");
cache_bin_alloc(bin, &ret);
@ -338,7 +339,7 @@ TEST_BEGIN(test_cache_bin_stash) {
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
expect_true(cache_bin_ncached_get_local(&bin, &info) ==
(i / 2 + i % 2), "Wrong ncached value");
expect_true(cache_bin_nstashed_get(&bin, &info) == i / 2,
expect_true(cache_bin_nstashed_get_local(&bin, &info) == i / 2,
"Wrong nstashed value");
if (i % 2 == 0) {
cache_bin_dalloc_easy(&bin, &ptrs[i]);
@ -361,7 +362,7 @@ TEST_BEGIN(test_cache_bin_stash) {
expect_true(diff % 2 == 0, "Should be able to alloc");
} else {
expect_false(ret, "Should not alloc stashed");
expect_true(cache_bin_nstashed_get(&bin, &info) ==
expect_true(cache_bin_nstashed_get_local(&bin, &info) ==
ncached_max / 2, "Wrong nstashed value");
}
}

View File

@ -367,7 +367,7 @@ TEST_END
static void
test_tcache_bytes_for_usize(size_t usize) {
uint64_t epoch;
size_t tcache_bytes;
size_t tcache_bytes, tcache_stashed_bytes;
size_t sz = sizeof(tcache_bytes);
void *ptr = mallocx(usize, 0);
@ -377,7 +377,11 @@ test_tcache_bytes_for_usize(size_t usize) {
assert_d_eq(mallctl(
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
&tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
size_t tcache_bytes_before = tcache_bytes;
assert_d_eq(mallctl(
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
dallocx(ptr, 0);
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
@ -385,7 +389,11 @@ test_tcache_bytes_for_usize(size_t usize) {
assert_d_eq(mallctl(
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
&tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
size_t tcache_bytes_after = tcache_bytes;
assert_d_eq(mallctl(
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
usize, "Incorrectly attributed a free");
}

View File

@ -53,6 +53,26 @@ uaf_detection_enabled(void) {
return true;
}
static size_t
read_tcache_stashed_bytes(unsigned arena_ind) {
if (!config_stats) {
return 0;
}
uint64_t epoch;
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
size_t tcache_stashed_bytes;
size_t sz = sizeof(tcache_stashed_bytes);
assert_d_eq(mallctl(
"stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
return tcache_stashed_bytes;
}
static void
test_use_after_free(size_t alloc_size, bool write_after_free) {
void *ptr = (void *)(uintptr_t)san_uaf_align;
@ -95,6 +115,7 @@ test_use_after_free(size_t alloc_size, bool write_after_free) {
while (iter-- != 0) {
char *volatile mem = items[iter];
assert_c_eq(*mem, magic, "Unexpected memory content");
size_t stashed_before = read_tcache_stashed_bytes(arena_ind);
free(mem);
if (*mem != magic) {
junked = true;
@ -103,6 +124,18 @@ test_use_after_free(size_t alloc_size, bool write_after_free) {
if (write_after_free) {
*(char *)mem = magic + 1;
}
size_t stashed_after = read_tcache_stashed_bytes(
arena_ind);
/*
* An edge case is the deallocation above triggering the
* tcache GC event, in which case the stashed pointers
* may get flushed immediately, before returning from
* free(). Treat these cases as checked already.
*/
if (stashed_after <= stashed_before) {
fake_abort_called = true;
}
}
/* Flush tcache (including stashed). */
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),