Move junking out of arena/tcache code.

This is debug only and we keep it off the fast path.  Moving it here simplifies
the internal logic.

This never tries to junk on regions that were shrunk via xallocx.  I think this
is fine for two reasons:
- The shrunk-with-xallocx case is rare.
- We don't always do that anyway before this diff (it depends on the opt
  settings and extent hooks in effect).
This commit is contained in:
David Goldblatt
2020-02-28 11:37:39 -08:00
committed by David Goldblatt
parent b428dceeaf
commit 79f1ee2fc0
9 changed files with 249 additions and 248 deletions

View File

@@ -1446,30 +1446,10 @@ label_refill:
fresh_slab = NULL;
}
if (config_fill && unlikely(opt_junk_alloc)) {
for (unsigned i = 0; i < filled; i++) {
void *ptr = *(empty_position - nfill + filled + i);
arena_alloc_junk_small(ptr, bin_info, true);
}
}
cache_bin_ncached_set(tbin, binind, filled);
arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
if (!zero) {
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
static void
arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
arena_dalloc_junk_small_impl;
/*
* Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
* bin->slabcur if necessary.
@@ -1528,18 +1508,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
if (fresh_slab != NULL) {
arena_slab_dalloc(tsdn, arena, fresh_slab);
}
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, bin_info, false);
} else if (unlikely(opt_zero)) {
memset(ret, 0, usize);
}
}
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, bin_info, true);
}
if (zero) {
memset(ret, 0, usize);
}
arena_decay_tick(tsdn, arena);
@@ -1706,11 +1675,8 @@ arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
/* Returns true if arena_slab_dalloc must be called on slab */
static bool
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *slab, void *ptr, bool junked) {
szind_t binind, edata_t *slab, void *ptr) {
const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
}
arena_slab_reg_dalloc(slab, edata_slab_data_get(slab), ptr);
bool ret = false;
@@ -1733,10 +1699,10 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
}
bool
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr) {
arena_dalloc_bin_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind, edata_t *edata, void *ptr) {
return arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
ptr, true);
ptr);
}
static void
@@ -1747,7 +1713,7 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
malloc_mutex_lock(tsdn, &bin->lock);
bool ret = arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, edata,
ptr, false);
ptr);
malloc_mutex_unlock(tsdn, &bin->lock);
if (ret) {

View File

@@ -81,6 +81,24 @@ const char *zero_realloc_mode_names[] = {
"abort",
};
/*
* These are the documented values for junk fill debugging facilities -- see the
* man page.
*/
static const uint8_t junk_alloc_byte = 0xa5;
static const uint8_t junk_free_byte = 0x5a;
static void default_junk_alloc(void *ptr, size_t usize) {
memset(ptr, junk_alloc_byte, usize);
}
static void default_junk_free(void *ptr, size_t usize) {
memset(ptr, junk_free_byte, usize);
}
void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
@@ -2210,6 +2228,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
if (config_fill && sopts->slow && !dopts->zero) {
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(allocation, usize);
} else if (unlikely(opt_zero)) {
memset(allocation, 0, usize);
}
}
if (sopts->slow) {
UTRACE(0, size, allocation);
}
@@ -2582,6 +2608,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
false);
} else {
if (config_fill && slow_path && opt_junk_free) {
junk_free_callback(ptr, usize);
}
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
true);
}
@@ -2648,6 +2677,9 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
false);
} else {
if (config_fill && slow_path && opt_junk_free) {
junk_free_callback(ptr, usize);
}
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
true);
}
@@ -2745,6 +2777,14 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tcache_t *tcache = tsd_tcachep_get(tsd);
cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
/*
* If junking were enabled, this is where we would do it. It's not
* though, since we ensured above that we're on the fast path. Assert
* that to double-check.
*/
assert(!opt_junk_free);
if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
@@ -3180,6 +3220,16 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
UTRACE(ptr, size, p);
check_entry_exit_locking(tsd_tsdn(tsd));
if (config_fill && malloc_slow && !zero && usize > old_usize) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)p + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -3465,6 +3515,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
goto label_not_resized;
}
thread_dalloc_event(tsd, old_usize);
if (config_fill && malloc_slow) {
if (usize > old_usize && !zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)ptr + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};

View File

@@ -38,8 +38,8 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
/*
* Copy zero into is_zeroed and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed ends up true when zero is false.
* extent, so that it is possible to make correct zero fill decisions
* below, even if is_zeroed ends up true when zero is false.
*/
is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
@@ -60,36 +60,12 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (zero) {
assert(is_zeroed);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(edata_addr_get(edata), JEMALLOC_ALLOC_JUNK,
edata_usize_get(edata));
}
arena_decay_tick(tsdn, arena);
return edata_addr_get(edata);
}
static void
large_dalloc_junk_impl(void *ptr, size_t size) {
memset(ptr, JEMALLOC_FREE_JUNK, size);
}
large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
static void
large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the extent isn't about to be
* unmapped.
*/
if (opt_retain || (have_dss && extent_in_dss(ptr))) {
large_dalloc_junk(ptr, size);
}
}
}
large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
large_dalloc_maybe_junk_impl;
static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata);
@@ -112,11 +88,6 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
large_dalloc_maybe_junk(edata_addr_get(trail),
edata_size_get(trail));
}
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail);
}
@@ -142,9 +113,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
}
/*
* Copy zero into is_zeroed_trail and pass the copy when allocating the
* extent, so that it is possible to make correct junk/zero fill
* decisions below, even if is_zeroed_trail ends up true when zero is
* false.
* extent, so that it is possible to make correct zero fill decisions
* below, even if is_zeroed_trail ends up true when zero is false.
*/
bool is_zeroed_trail = zero;
edata_t *trail;
@@ -201,11 +171,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
memset(zbase, 0, nzero);
}
assert(is_zeroed_trail);
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset((void *)((uintptr_t)edata_addr_get(edata) + oldusize),
JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize);
return false;
@@ -310,21 +276,18 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
}
/*
* junked_locked indicates whether the extent's data have been junk-filled, and
* whether the arena's large_mtx is currently held.
* locked indicates whether the arena's large_mtx is currently held.
*/
static void
large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
bool junked_locked) {
if (!junked_locked) {
bool locked) {
if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
edata_list_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
large_dalloc_maybe_junk(edata_addr_get(edata),
edata_usize_get(edata));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
@@ -342,7 +305,7 @@ large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
}
void
large_dalloc_prep_junked_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}

View File

@@ -176,7 +176,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
*/
VARIABLE_ARRAY(edata_t *, item_edata, nflush + 1);
void **bottom_item = cache_bin_bottom_item_get(tbin, binind);
/* Look up edata once per item. */
if (config_opt_safety_checks) {
tbin_edatas_lookup_size_check(tsd, tbin, binind, nflush,
@@ -262,7 +262,7 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
if (tcache_bin_flush_match(edata, cur_arena_ind,
cur_binshard, small)) {
large_dalloc_prep_junked_locked(tsdn,
large_dalloc_prep_locked(tsdn,
edata);
}
}
@@ -291,8 +291,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
continue;
}
if (small) {
if (arena_dalloc_bin_junked_locked(tsdn,
cur_arena, cur_bin, binind, edata, ptr)) {
if (arena_dalloc_bin_locked(tsdn, cur_arena,
cur_bin, binind, edata, ptr)) {
dalloc_slabs[dalloc_count] = edata;
dalloc_count++;
}