Move junking out of arena/tcache code.

This is debug only and we keep it off the fast path.  Moving it here simplifies
the internal logic.

This never tries to junk on regions that were shrunk via xallocx.  I think this
is fine for two reasons:
- The shrunk-with-xallocx case is rare.
- We don't always do that anyway before this diff (it depends on the opt
  settings and extent hooks in effect).
This commit is contained in:
David Goldblatt 2020-02-28 11:37:39 -08:00 committed by David Goldblatt
parent b428dceeaf
commit 79f1ee2fc0
9 changed files with 249 additions and 248 deletions

View File

@ -50,11 +50,6 @@ void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind);
void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
bool zero);
typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
@ -63,9 +58,9 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
bool arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr);
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
bool arena_dalloc_bin_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);

View File

@ -14,6 +14,8 @@ extern bool opt_confirm_conf;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern void (*junk_free_callback)(void *ptr, size_t size);
extern void (*junk_alloc_callback)(void *ptr, size_t size);
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;

View File

@ -12,13 +12,7 @@ void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
typedef void (large_dalloc_junk_t)(void *, size_t);
extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
void large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
void large_dalloc(tsdn_t *tsdn, edata_t *edata);
size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);

View File

@ -61,23 +61,9 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &bin_infos[binind],
false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &bin_infos[binind], true);
}
if (unlikely(zero)) {
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
@ -119,16 +105,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
assert(usize <= tcache_maxclass);
}
if (likely(!zero)) {
if (slow_path && config_fill) {
if (unlikely(opt_junk_alloc)) {
memset(ret, JEMALLOC_ALLOC_JUNK,
usize);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (unlikely(zero)) {
memset(ret, 0, usize);
}
@ -148,10 +125,6 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= SC_SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
}
bin = tcache_small_bin_get(tcache, binind);
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_ncached_max_get(binind) >> 1;
@ -170,10 +143,6 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
large_dalloc_junk(ptr, sz_index2size(binind));
}
bin = tcache_large_bin_get(tcache, binind);
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_ncached_max_get(binind) >> 1;

View File

@ -1446,30 +1446,10 @@ label_refill:
fresh_slab = NULL;
}
if (config_fill && unlikely(opt_junk_alloc)) {
for (unsigned i = 0; i < filled; i++) {
void *ptr = *(empty_position - nfill + filled + i);
arena_alloc_junk_small(ptr, bin_info, true);
}
}
cache_bin_ncached_set(tbin, binind, filled);
arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
if (!zero) {
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
static void
arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
arena_dalloc_junk_small_impl;
/*
* Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
* bin->slabcur if necessary.
@ -1528,18 +1508,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
if (fresh_slab != NULL) {
arena_slab_dalloc(tsdn, arena, fresh_slab);
}
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, bin_info, false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, bin_info, true);
}
if (zero) {
memset(ret, 0, usize);
}
arena_decay_tick(tsdn, arena);
@ -1706,11 +1675,8 @@ arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
/* Returns true if arena_slab_dalloc must be called on slab */
static bool
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *slab, void *ptr, bool junked) {
szind_t binind, edata_t *slab, void *ptr) {
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
}
arena_slab_reg_dalloc(slab, edata_slab_data_get(slab), ptr);
bool ret = false;
@ -1733,10 +1699,10 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
}
bool
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr) {
arena_dalloc_bin_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr) {
return arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
ptr, true);
ptr);
}
static void
@ -1747,7 +1713,7 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
malloc_mutex_lock(tsdn, &bin->lock);
bool ret = arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
ptr, false);
ptr);
malloc_mutex_unlock(tsdn, &bin->lock);
if (ret) {

View File

@ -81,6 +81,24 @@ const char *zero_realloc_mode_names[] = {
"abort",
};
/*
* These are the documented values for junk fill debugging facilities -- see the
* man page.
*/
static const uint8_t junk_alloc_byte = 0xa5;
static const uint8_t junk_free_byte = 0x5a;
static void default_junk_alloc(void *ptr, size_t usize) {
memset(ptr, junk_alloc_byte, usize);
}
static void default_junk_free(void *ptr, size_t usize) {
memset(ptr, junk_free_byte, usize);
}
void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
@ -2210,6 +2228,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
if (config_fill && sopts->slow && !dopts->zero) {
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(allocation, usize);
} else if (unlikely(opt_zero)) {
memset(allocation, 0, usize);
}
}
if (sopts->slow) {
UTRACE(0, size, allocation);
}
@ -2582,6 +2608,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
false);
} else {
if (config_fill && slow_path && opt_junk_free) {
junk_free_callback(ptr, usize);
}
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
true);
}
@ -2648,6 +2677,9 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
false);
} else {
if (config_fill && slow_path && opt_junk_free) {
junk_free_callback(ptr, usize);
}
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
true);
}
@ -2745,6 +2777,14 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tcache_t *tcache = tsd_tcachep_get(tsd);
cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
/*
* If junking were enabled, this is where we would do it. It's not
* though, since we ensured above that we're on the fast path. Assert
* that to double-check.
*/
assert(!opt_junk_free);
if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
@ -3180,6 +3220,16 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
UTRACE(ptr, size, p);
check_entry_exit_locking(tsd_tsdn(tsd));
if (config_fill && malloc_slow && !zero && usize > old_usize) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)p + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@ -3465,6 +3515,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
goto label_not_resized;
}
thread_dalloc_event(tsd, old_usize);
if (config_fill && malloc_slow) {
if (usize > old_usize && !zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)ptr + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};

View File

@ -38,8 +38,8 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
/*
* Copy zero into is_zeroed and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed ends up true when zero is false.
* extent, so that it is possible to make correct zero fill decisions
* below, even if is_zeroed ends up true when zero is false.
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
@ -60,36 +60,12 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (zero) {
assert(is_zeroed);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
edata_usize_get(edata));
}
arena_decay_tick(tsdn, arena);
return edata_addr_get(edata);
}
static void
large_dalloc_junk_impl(void *ptr, size_t size) {
memset(ptr, JEMALLOC_FREE_JUNK, size);
}
large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
static void
large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
if (opt_retain || (have_dss && extent_in_dss(ptr))) {
large_dalloc_junk(ptr, size);
}
}
}
large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
large_dalloc_maybe_junk_impl;
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
@ -112,11 +88,6 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
large_dalloc_maybe_junk(edata_addr_get(trail),
edata_size_get(trail));
}
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
}
@ -142,9 +113,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
}
/*
* Copy zero into is_zeroed_trail and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed_trail ends up true when zero is
* false.
* extent, so that it is possible to make correct zero fill decisions
* below, even if is_zeroed_trail ends up true when zero is false.
*/
bool is_zeroed_trail = zero;
edata_t *trail;
@ -201,11 +171,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
memset(zbase, 0, nzero);
}
assert(is_zeroed_trail);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
return false;
@ -310,21 +276,18 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
}
/*
* junked_locked indicates whether the extent's data have been junk-filled, and
* whether the arena's large_mtx is currently held.
* locked indicates whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool junked_locked) {
if (!junked_locked) {
bool locked) {
if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
large_dalloc_maybe_junk(edata_addr_get(edata),
edata_usize_get(edata));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
@ -342,7 +305,7 @@ large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
}
void
large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}

View File

@ -176,7 +176,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
*/
VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
/* Look up edata once per item. */
if (config_opt_safety_checks) {
tbin_edatas_lookup_size_check(tsd, tbin, binind, nflush,
@ -262,7 +262,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
if (tcache_bin_flush_match(edata, cur_arena_ind,
cur_binshard, small)) {
large_dalloc_prep_junked_locked(tsdn,
large_dalloc_prep_locked(tsdn,
edata);
}
}
@ -291,8 +291,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
continue;
}
if (small) {
if (arena_dalloc_bin_junked_locked(tsdn,
cur_arena, cur_bin, binind, edata, ptr)) {
if (arena_dalloc_bin_locked(tsdn, cur_arena,
cur_bin, binind, edata, ptr)) {
dalloc_slabs[dalloc_count] = edata;
dalloc_count++;
}

View File

@ -1,141 +1,191 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/util.h"
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static large_dalloc_junk_t *large_dalloc_junk_orig;
static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig;
static void *watch_for_junking;
static bool saw_junking;
#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
static size_t ptr_ind;
static void *volatile ptrs[100];
static void *last_junked_ptr;
static size_t last_junked_usize;
static void
watch_junking(void *p) {
watch_for_junking = p;
saw_junking = false;
reset() {
ptr_ind = 0;
last_junked_ptr = NULL;
last_junked_usize = 0;
}
static void
arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
size_t i;
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
if (ptr == watch_for_junking) {
saw_junking = true;
}
test_junk(void *ptr, size_t usize) {
last_junked_ptr = ptr;
last_junked_usize = usize;
}
static void
large_dalloc_junk_intercept(void *ptr, size_t usize) {
size_t i;
large_dalloc_junk_orig(ptr, usize);
for (i = 0; i < usize; i++) {
expect_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
do_allocs(size_t size, bool zero, size_t lg_align) {
#define JUNK_ALLOC(...) \
do { \
assert(ptr_ind + 1 < arraylen(ptrs)); \
void *ptr = __VA_ARGS__; \
assert_ptr_not_null(ptr, ""); \
ptrs[ptr_ind++] = ptr; \
if (opt_junk_alloc && !zero) { \
expect_ptr_eq(ptr, last_junked_ptr, ""); \
expect_zu_eq(last_junked_usize, \
malloc_usable_size(ptr), ""); \
} \
} while (0)
if (!zero && lg_align == 0) {
JUNK_ALLOC(malloc(size));
}
if (ptr == watch_for_junking) {
saw_junking = true;
if (!zero) {
JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
}
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
if (!zero) {
JUNK_ALLOC(je_memalign(1 << lg_align, size));
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
if (!zero && lg_align == LG_PAGE) {
JUNK_ALLOC(je_valloc(size));
}
#endif
int zero_flag = zero ? MALLOCX_ZERO : 0;
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
| MALLOCX_TCACHE_NONE));
if (lg_align >= LG_SIZEOF_PTR) {
void *memalign_result;
int err = posix_memalign(&memalign_result, (1 << lg_align),
size);
assert_d_eq(err, 0, "");
JUNK_ALLOC(memalign_result);
}
}
static void
large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) {
large_dalloc_maybe_junk_orig(ptr, usize);
if (ptr == watch_for_junking) {
saw_junking = true;
}
}
TEST_BEGIN(test_junk_alloc_free) {
bool zerovals[] = {false, true};
size_t sizevals[] = {
1, 8, 100, 1000, 100*1000
/*
* Memory allocation failure is a real possibility in 32-bit mode.
* Rather than try to check in the face of resource exhaustion, we just
* rely more on the 64-bit tests. This is a little bit white-box-y in
* the sense that this is only a good test strategy if we know that the
* junk pathways don't touch interact with the allocation selection
* mechanisms; but this is in fact the case.
*/
#if LG_SIZEOF_PTR == 3
, 10 * 1000 * 1000
#endif
};
size_t lg_alignvals[] = {
0, 4, 10, 15, 16, LG_PAGE
#if LG_SIZEOF_PTR == 3
, 20, 24
#endif
};
static void
test_junk(size_t sz_min, size_t sz_max) {
uint8_t *s;
size_t sz_prev, sz, i;
#define JUNK_FREE(...) \
do { \
do_allocs(size, zero, lg_align); \
for (size_t n = 0; n < ptr_ind; n++) { \
void *ptr = ptrs[n]; \
__VA_ARGS__; \
if (opt_junk_free) { \
assert_ptr_eq(ptr, last_junked_ptr, \
""); \
assert_zu_eq(usize, last_junked_usize, \
""); \
} \
reset(); \
} \
} while (0)
for (size_t i = 0; i < arraylen(zerovals); i++) {
for (size_t j = 0; j < arraylen(sizevals); j++) {
for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
bool zero = zerovals[i];
size_t size = sizevals[j];
size_t lg_align = lg_alignvals[k];
size_t usize = nallocx(size,
MALLOCX_LG_ALIGN(lg_align));
if (opt_junk_free) {
arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
large_dalloc_junk_orig = large_dalloc_junk;
large_dalloc_junk = large_dalloc_junk_intercept;
large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
}
sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0);
expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
expect_u_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
expect_u_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
if (opt_junk_alloc) {
expect_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
"Newly allocated byte %zu/%zu isn't "
"junk-filled", i, sz);
JUNK_FREE(free(ptr));
JUNK_FREE(dallocx(ptr, 0));
JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
lg_align)));
JUNK_FREE(sdallocx(ptr, usize,
MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
if (opt_zero_realloc_action
== zero_realloc_action_free) {
JUNK_FREE(realloc(ptr, 0));
}
}
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
uint8_t *t;
watch_junking(s);
t = (uint8_t *)rallocx(s, sz+1, 0);
expect_ptr_not_null((void *)t,
"Unexpected rallocx() failure");
expect_zu_ge(sallocx(t, 0), sz+1,
"Unexpectedly small rallocx() result");
if (!background_thread_enabled()) {
expect_ptr_ne(s, t,
"Unexpected in-place rallocx()");
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be "
"junk-filled", sz);
}
s = t;
}
}
watch_junking(s);
dallocx(s, 0);
expect_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be junk-filled", sz);
if (opt_junk_free) {
arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
large_dalloc_junk = large_dalloc_junk_orig;
large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
}
}
TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SC_SMALL_MAXCLASS - 1);
}
TEST_END
TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1)));
TEST_BEGIN(test_realloc_expand) {
char *volatile ptr;
char *volatile expanded;
test_skip_if(!opt_junk_alloc);
/* Realloc */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = realloc(ptr, SC_LARGE_MINCLASS);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., 0) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., nonzero) */
ptr = malloc(SC_SMALL_MAXCLASS);
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
expect_zu_eq(last_junked_usize,
SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
free(expanded);
/* rallocx(..., MALLOCX_ZERO) */
ptr = malloc(SC_SMALL_MAXCLASS);
last_junked_ptr = (void *)-1;
last_junked_usize = (size_t)-1;
expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
expect_ptr_eq(last_junked_ptr, (void *)-1, "");
expect_zu_eq(last_junked_usize, (size_t)-1, "");
free(expanded);
/*
* Unfortunately, testing xallocx reliably is difficult to do portably
* (since allocations can be expanded / not expanded differently on
* different platforms. We rely on manual inspection there -- the
* xallocx pathway is easy to inspect, though.
*
* Likewise, we don't test the shrinking pathways. It's difficult to do
* so consistently (because of the risk of split failure or memory
* exhaustion, in which case no junking should happen). This is fine
* -- junking is a best-effort debug mechanism in the first place.
*/
}
TEST_END
int
main(void) {
junk_alloc_callback = &test_junk;
junk_free_callback = &test_junk;
return test(
test_junk_small,
test_junk_large);
test_junk_alloc_free,
test_realloc_expand);
}