Move junking out of arena/tcache code.

This is debug only and we keep it off the fast path.  Moving it here simplifies
the internal logic.

This never tries to junk on regions that were shrunk via xallocx.  I think this
is fine for two reasons:
- The shrunk-with-xallocx case is rare.
- We don't always do that anyway before this diff (it depends on the opt
  settings and extent hooks in effect).
This commit is contained in:
David Goldblatt
2020-02-28 11:37:39 -08:00
committed by David Goldblatt
parent b428dceeaf
commit 79f1ee2fc0
9 changed files with 249 additions and 248 deletions

View File

@@ -1,141 +1,191 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/util.h"
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static large_dalloc_junk_t *large_dalloc_junk_orig;
static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig;
static void *watch_for_junking;
static bool saw_junking;
#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
static size_t ptr_ind;
static void *volatile ptrs[100];
static void *last_junked_ptr;
static size_t last_junked_usize;
static void
watch_junking(void *p) {
watch_for_junking = p;
saw_junking = false;
reset() {
ptr_ind = 0;
last_junked_ptr = NULL;
last_junked_usize = 0;
}
static void
arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
if (ptr == watch_for_junking) {
saw_junking = true;
}
test_junk(void *ptr, size_t usize) {
last_junked_ptr = ptr;
last_junked_usize = usize;
}
static void
large_dalloc_junk_intercept(void *ptr, size_t usize) {
size_t i;
large_dalloc_junk_orig(ptr, usize);
for (i = 0; i < usize; i++) {
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
do_allocs(size_t size, bool zero, size_t lg_align) {
#define JUNK_ALLOC(...) \
do { \
assert(ptr_ind + 1 < arraylen(ptrs)); \
void *ptr = __VA_ARGS__; \
assert_ptr_not_null(ptr, ""); \
ptrs[ptr_ind++] = ptr; \
if (opt_junk_alloc && !zero) { \
expect_ptr_eq(ptr, last_junked_ptr, ""); \
expect_zu_eq(last_junked_usize, \
malloc_usable_size(ptr), ""); \
} \
} while (0)
if (!zero && lg_align == 0) {
JUNK_ALLOC(malloc(size));
}
if (ptr == watch_for_junking) {
saw_junking = true;
if (!zero) {
JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
}
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
if (!zero) {
JUNK_ALLOC(je_memalign(1 << lg_align, size));
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
if (!zero && lg_align == LG_PAGE) {
JUNK_ALLOC(je_valloc(size));
}
#endif
int zero_flag = zero ? MALLOCX_ZERO : 0;
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
| MALLOCX_TCACHE_NONE));
if (lg_align >= LG_SIZEOF_PTR) {
void *memalign_result;
int err = posix_memalign(&memalign_result, (1 << lg_align),
size);
assert_d_eq(err, 0, "");
JUNK_ALLOC(memalign_result);
}
}
static void
large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) {
large_dalloc_maybe_junk_orig(ptr, usize);
if (ptr == watch_for_junking) {
saw_junking = true;
}
}
TEST_BEGIN(test_junk_alloc_free) {
bool zerovals[] = {false, true};
size_t sizevals[] = {
1, 8, 100, 1000, 100*1000
/*
* Memory allocation failure is a real possibility in 32-bit mode.
* Rather than try to check in the face of resource exhaustion, we just
* rely more on the 64-bit tests. This is a little bit white-box-y in
* the sense that this is only a good test strategy if we know that the
* junk pathways don't touch interact with the allocation selection
* mechanisms; but this is in fact the case.
*/
#if LG_SIZEOF_PTR == 3
, 10 * 1000 * 1000
#endif
};
size_t lg_alignvals[] = {
0, 4, 10, 15, 16, LG_PAGE
#if LG_SIZEOF_PTR == 3
, 20, 24
#endif
};
static void
test_junk(size_t sz_min, size_t sz_max) {
uint8_t *s;
size_t sz_prev, sz, i;
#define JUNK_FREE(...) \
do { \
do_allocs(size, zero, lg_align); \
for (size_t n = 0; n < ptr_ind; n++) { \
void *ptr = ptrs[n]; \
__VA_ARGS__; \
if (opt_junk_free) { \
assert_ptr_eq(ptr, last_junked_ptr, \
""); \
assert_zu_eq(usize, last_junked_usize, \
""); \
} \
reset(); \
} \
} while (0)
for (size_t i = 0; i < arraylen(zerovals); i++) {
for (size_t j = 0; j < arraylen(sizevals); j++) {
for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
bool zero = zerovals[i];
size_t size = sizevals[j];
size_t lg_align = lg_alignvals[k];
size_t usize = nallocx(size,
MALLOCX_LG_ALIGN(lg_align));
if (opt_junk_free) {
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
large_dalloc_junk_orig = large_dalloc_junk;
large_dalloc_junk = large_dalloc_junk_intercept;
large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
}
sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0);
expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
expect_u_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
expect_u_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
if (opt_junk_alloc) {
expect_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
"Newly allocated byte %zu/%zu isn't "
"junk-filled", i, sz);
JUNK_FREE(free(ptr));
JUNK_FREE(dallocx(ptr, 0));
JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize,
MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
if (opt_zero_realloc_action
== zero_realloc_action_free) {
JUNK_FREE(realloc(ptr, 0));
}
}
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
uint8_t *t;
watch_junking(s);
t = (uint8_t *)rallocx(s, sz+1, 0);
expect_ptr_not_null((void *)t,
"Unexpected rallocx() failure");
expect_zu_ge(sallocx(t, 0), sz+1,
"Unexpectedly small rallocx() result");
if (!background_thread_enabled()) {
expect_ptr_ne(s, t,
"Unexpected in-place rallocx()");
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be "
"junk-filled", sz);
}
s = t;
}
}
watch_junking(s);
dallocx(s, 0);
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be junk-filled", sz);
if (opt_junk_free) {
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
large_dalloc_junk = large_dalloc_junk_orig;
large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
}
}
TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SC_SMALL_MAXCLASS - 1);
}
TEST_END
TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1)));
TEST_BEGIN(test_realloc_expand) {
char *volatile ptr;
char *volatile expanded;
test_skip_if(!opt_junk_alloc);
/* Realloc */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = realloc(ptr, SC_LARGE_MINCLASS);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., 0) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., nonzero) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., MALLOCX_ZERO) */
ptr = malloc(SC_SMALL_MAXCLASS);
last_junked_ptr = (void *)-1;
last_junked_usize = (size_t)-1;
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
expect_ptr_eq(last_junked_ptr, (void *)-1, "");
expect_zu_eq(last_junked_usize, (size_t)-1, "");
free(expanded);
/*
* Unfortunately, testing xallocx reliably is difficult to do portably
* (since allocations can be expanded / not expanded differently on
* different platforms. We rely on manual inspection there -- the
* xallocx pathway is easy to inspect, though.
*
* Likewise, we don't test the shrinking pathways. It's difficult to do
* so consistently (because of the risk of split failure or memory
* exhaustion, in which case no junking should happen). This is fine
* -- junking is a best-effort debug mechanism in the first place.
*/
}
TEST_END
int
main(void) {
junk_alloc_callback = &test_junk;
junk_free_callback = &test_junk;
return test(
test_junk_small,
test_junk_large);
test_junk_alloc_free,
test_realloc_expand);
}