Let opt.zero propagate to core allocation.

I.e. set dopts->zero early on if opt.zero is true, rather than leaving it set by
the entry-point function (malloc, calloc, etc.) and then memsetting.  This
avoids situations where we zero once in the large-alloc pathway and then again
via memset.
This commit is contained in:
David Goldblatt 2020-04-29 09:05:57 -07:00 committed by David Goldblatt
parent 2c09d43494
commit f1f8a75496
2 changed files with 23 additions and 27 deletions

View File

@ -2165,7 +2165,9 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
/* This is the beginning of the "core" algorithm. */
if (config_fill && sopts->slow && opt_zero) {
dopts->zero = true;
}
if (dopts->alignment == 0) {
ind = sz_size2index(size);
if (unlikely(ind >= SC_NSIZES)) {
@ -2263,12 +2265,9 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
assert(usize == isalloc(tsd_tsdn(tsd), allocation));
if (config_fill && sopts->slow && !dopts->zero) {
if (unlikely(opt_junk_alloc)) {
if (config_fill && sopts->slow && !dopts->zero
&& unlikely(opt_junk_alloc)) {
junk_alloc_callback(allocation, usize);
} else if (unlikely(opt_zero)) {
memset(allocation, 0, usize);
}
}
if (sopts->slow) {
@ -3210,7 +3209,6 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
size_t usize;
size_t old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
tcache_t *tcache;
@ -3220,6 +3218,11 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
tsd = tsd_fetch();
check_entry_exit_locking(tsd_tsdn(tsd));
bool zero = flags & MALLOCX_ZERO;
if (config_fill && unlikely(opt_zero)) {
zero = true;
}
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
@ -3275,14 +3278,11 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
UTRACE(ptr, size, p);
check_entry_exit_locking(tsd_tsdn(tsd));
if (config_fill && malloc_slow && !zero && usize > old_usize) {
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
&& !zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)p + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
return p;
@ -3497,7 +3497,11 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd_t *tsd;
size_t usize, old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
if (config_fill && unlikely(opt_zero)) {
zero = true;
}
LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
"flags: %d", ptr, size, extra, flags);
@ -3561,16 +3565,11 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
thread_alloc_event(tsd, usize);
thread_dalloc_event(tsd, old_usize);
if (config_fill && malloc_slow) {
if (usize > old_usize && !zero) {
if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
!zero) {
size_t excess_len = usize - old_usize;
void *excess_start = (void *)((uintptr_t)ptr + old_usize);
if (unlikely(opt_junk_alloc)) {
junk_alloc_callback(excess_start, excess_len);
} else if (unlikely(opt_zero)) {
memset(excess_start, 0, excess_len);
}
}
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {

View File

@ -32,9 +32,6 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return NULL;
}
if (config_fill && unlikely(opt_zero)) {
zero = true;
}
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}