Remove Valgrind support.
This commit is contained in:
41
src/arena.c
41
src/arena.c
@@ -350,27 +350,16 @@ JEMALLOC_INLINE_C void
|
||||
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
|
||||
{
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
|
||||
(run_ind << LG_PAGE)), (npages << LG_PAGE));
|
||||
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
|
||||
(npages << LG_PAGE));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
|
||||
{
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
|
||||
<< LG_PAGE)), PAGE);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
|
||||
{
|
||||
size_t i;
|
||||
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
|
||||
|
||||
arena_run_page_mark_zeroed(chunk, run_ind);
|
||||
for (i = 0; i < PAGE / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
@@ -471,12 +460,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
if (flag_decommitted != 0) {
|
||||
/* The run is untouched, and therefore zeroed. */
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
|
||||
*)((uintptr_t)chunk + (run_ind << LG_PAGE)),
|
||||
(need_pages << LG_PAGE));
|
||||
} else if (flag_dirty != 0) {
|
||||
if (flag_decommitted != 0)
|
||||
; /* The run is untouched, and therefore zeroed. */
|
||||
else if (flag_dirty != 0) {
|
||||
/* The run is dirty, so all pages must be zeroed. */
|
||||
arena_run_zero(chunk, run_ind, need_pages);
|
||||
} else {
|
||||
@@ -492,15 +478,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||
else if (config_debug) {
|
||||
arena_run_page_validate_zeroed(chunk,
|
||||
run_ind+i);
|
||||
} else {
|
||||
arena_run_page_mark_zeroed(chunk,
|
||||
run_ind+i);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
|
||||
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -564,8 +544,6 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
|
||||
if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
|
||||
arena_run_page_validate_zeroed(chunk, run_ind+i);
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
|
||||
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
|
||||
return (false);
|
||||
}
|
||||
|
||||
@@ -700,19 +678,9 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
||||
* the chunk is not zeroed.
|
||||
*/
|
||||
if (!zero) {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
(void *)arena_bitselm_get_const(chunk, map_bias+1),
|
||||
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
|
||||
chunk_npages-1) -
|
||||
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
|
||||
for (i = map_bias+1; i < chunk_npages-1; i++)
|
||||
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
|
||||
} else {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
|
||||
*)arena_bitselm_get_const(chunk, map_bias+1),
|
||||
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
|
||||
chunk_npages-1) -
|
||||
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
|
||||
if (config_debug) {
|
||||
for (i = map_bias+1; i < chunk_npages-1; i++) {
|
||||
assert(arena_mapbits_unzeroed_get(chunk, i) ==
|
||||
@@ -2571,13 +2539,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
|
||||
} else if (unlikely(opt_zero))
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
|
||||
} else {
|
||||
if (config_fill && unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
@@ -3311,7 +3277,6 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
*/
|
||||
|
||||
copysize = (usize < oldsize) ? usize : oldsize;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
|
||||
memcpy(ret, ptr, copysize);
|
||||
isqalloc(tsd, ptr, oldsize, tcache, true);
|
||||
} else {
|
||||
|
@@ -24,7 +24,6 @@ base_node_try_alloc(tsdn_t *tsdn)
|
||||
return (NULL);
|
||||
node = base_nodes;
|
||||
base_nodes = *(extent_node_t **)node;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
return (node);
|
||||
}
|
||||
|
||||
@@ -34,7 +33,6 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
|
||||
|
||||
malloc_mutex_assert_owner(tsdn, &base_mtx);
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||
*(extent_node_t **)node = base_nodes;
|
||||
base_nodes = node;
|
||||
}
|
||||
@@ -123,7 +121,6 @@ base_alloc(tsdn_t *tsdn, size_t size)
|
||||
base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
|
||||
PAGE_CEILING((uintptr_t)ret);
|
||||
}
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
|
||||
label_return:
|
||||
malloc_mutex_unlock(tsdn, &base_mtx);
|
||||
return (ret);
|
||||
|
10
src/chunk.c
10
src/chunk.c
@@ -316,7 +316,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
size_t i;
|
||||
size_t *p = (size_t *)(uintptr_t)ret;
|
||||
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
||||
for (i = 0; i < size / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
@@ -376,8 +375,6 @@ chunk_alloc_base(size_t size)
|
||||
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -401,8 +398,6 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
assert(commit);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@@ -434,8 +429,6 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
commit, arena->dss_prec);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -478,8 +471,6 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@@ -494,7 +485,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
|
||||
assert(!cache || !zeroed);
|
||||
unzeroed = cache || !zeroed;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||
|
@@ -138,11 +138,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
&chunk_hooks, cpad, cpad_size,
|
||||
false, true);
|
||||
}
|
||||
if (*zero) {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
ret, size);
|
||||
if (*zero)
|
||||
memset(ret, 0, size);
|
||||
}
|
||||
if (!*commit)
|
||||
*commit = pages_decommit(ret, size);
|
||||
return (ret);
|
||||
|
@@ -86,7 +86,6 @@ CTL_PROTO(config_stats)
|
||||
CTL_PROTO(config_tcache)
|
||||
CTL_PROTO(config_tls)
|
||||
CTL_PROTO(config_utrace)
|
||||
CTL_PROTO(config_valgrind)
|
||||
CTL_PROTO(config_xmalloc)
|
||||
CTL_PROTO(opt_abort)
|
||||
CTL_PROTO(opt_dss)
|
||||
@@ -260,7 +259,6 @@ static const ctl_named_node_t config_node[] = {
|
||||
{NAME("tcache"), CTL(config_tcache)},
|
||||
{NAME("tls"), CTL(config_tls)},
|
||||
{NAME("utrace"), CTL(config_utrace)},
|
||||
{NAME("valgrind"), CTL(config_valgrind)},
|
||||
{NAME("xmalloc"), CTL(config_xmalloc)}
|
||||
};
|
||||
|
||||
@@ -1270,7 +1268,6 @@ CTL_RO_CONFIG_GEN(config_stats, bool)
|
||||
CTL_RO_CONFIG_GEN(config_tcache, bool)
|
||||
CTL_RO_CONFIG_GEN(config_tls, bool)
|
||||
CTL_RO_CONFIG_GEN(config_utrace, bool)
|
||||
CTL_RO_CONFIG_GEN(config_valgrind, bool)
|
||||
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
||||
|
||||
/******************************************************************************/
|
||||
@@ -1622,8 +1619,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||
READONLY();
|
||||
WRITEONLY();
|
||||
|
||||
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
|
||||
unlikely(opt_quarantine))) {
|
||||
if (config_fill && unlikely(opt_quarantine)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
|
@@ -42,9 +42,6 @@ bool opt_xmalloc = false;
|
||||
bool opt_zero = false;
|
||||
unsigned opt_narenas = 0;
|
||||
|
||||
/* Initialized to true if the process is running inside Valgrind. */
|
||||
bool in_valgrind;
|
||||
|
||||
unsigned ncpus;
|
||||
|
||||
/* Protects arenas initialization. */
|
||||
@@ -80,8 +77,7 @@ enum {
|
||||
flag_opt_quarantine = (1U << 2),
|
||||
flag_opt_zero = (1U << 3),
|
||||
flag_opt_utrace = (1U << 4),
|
||||
flag_in_valgrind = (1U << 5),
|
||||
flag_opt_xmalloc = (1U << 6)
|
||||
flag_opt_xmalloc = (1U << 5)
|
||||
};
|
||||
static uint8_t malloc_slow_flags;
|
||||
|
||||
@@ -894,9 +890,6 @@ malloc_slow_flag_init(void)
|
||||
| (opt_utrace ? flag_opt_utrace : 0)
|
||||
| (opt_xmalloc ? flag_opt_xmalloc : 0);
|
||||
|
||||
if (config_valgrind)
|
||||
malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
|
||||
|
||||
malloc_slow = (malloc_slow_flags != 0);
|
||||
}
|
||||
|
||||
@@ -908,24 +901,6 @@ malloc_conf_init(void)
|
||||
const char *opts, *k, *v;
|
||||
size_t klen, vlen;
|
||||
|
||||
/*
|
||||
* Automatically configure valgrind before processing options. The
|
||||
* valgrind option remains in jemalloc 3.x for compatibility reasons.
|
||||
*/
|
||||
if (config_valgrind) {
|
||||
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
|
||||
if (config_fill && unlikely(in_valgrind)) {
|
||||
opt_junk = "false";
|
||||
opt_junk_alloc = false;
|
||||
opt_junk_free = false;
|
||||
assert(!opt_zero);
|
||||
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
||||
opt_redzone = true;
|
||||
}
|
||||
if (config_tcache && unlikely(in_valgrind))
|
||||
opt_tcache = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
/* Get runtime configuration. */
|
||||
switch (i) {
|
||||
@@ -1183,19 +1158,7 @@ malloc_conf_init(void)
|
||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
||||
}
|
||||
if (config_tcache) {
|
||||
CONF_HANDLE_BOOL(opt_tcache, "tcache",
|
||||
!config_valgrind || !in_valgrind)
|
||||
if (CONF_MATCH("tcache")) {
|
||||
assert(config_valgrind && in_valgrind);
|
||||
if (opt_tcache) {
|
||||
opt_tcache = false;
|
||||
malloc_conf_error(
|
||||
"tcache cannot be enabled "
|
||||
"while running inside Valgrind",
|
||||
k, klen, v, vlen);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
|
||||
"lg_tcache_max", -1,
|
||||
(sizeof(size_t) << 3) - 1)
|
||||
@@ -1508,8 +1471,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
|
||||
if (unlikely(ind >= NSIZES))
|
||||
return (NULL);
|
||||
|
||||
if (config_stats || (config_prof && opt_prof) || (slow_path &&
|
||||
config_valgrind && unlikely(in_valgrind))) {
|
||||
if (config_stats || (config_prof && opt_prof)) {
|
||||
*usize = index2size(ind);
|
||||
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
||||
}
|
||||
@@ -1562,7 +1524,6 @@ je_malloc(size_t size)
|
||||
ret = ialloc_body(size, false, &tsdn, &usize, true);
|
||||
ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
|
||||
UTRACE(0, size, ret);
|
||||
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
@@ -1664,8 +1625,6 @@ label_return:
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
}
|
||||
UTRACE(0, size, result);
|
||||
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
|
||||
false);
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
return (ret);
|
||||
label_oom:
|
||||
@@ -1684,11 +1643,8 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
||||
JEMALLOC_ATTR(nonnull(1))
|
||||
je_posix_memalign(void **memptr, size_t alignment, size_t size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = imemalign(memptr, alignment, size, sizeof(void *));
|
||||
|
||||
return (ret);
|
||||
return (imemalign(memptr, alignment, size, sizeof(void *)));
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
@@ -1703,7 +1659,6 @@ je_aligned_alloc(size_t alignment, size_t size)
|
||||
ret = NULL;
|
||||
set_errno(err);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@@ -1739,7 +1694,6 @@ je_calloc(size_t num, size_t size)
|
||||
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
|
||||
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
|
||||
UTRACE(0, num_size, ret);
|
||||
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
@@ -1792,7 +1746,6 @@ JEMALLOC_INLINE_C void
|
||||
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
|
||||
{
|
||||
size_t usize;
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
|
||||
@@ -1802,25 +1755,20 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
|
||||
if (config_prof && opt_prof) {
|
||||
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
||||
prof_free(tsd, ptr, usize);
|
||||
} else if (config_stats || config_valgrind)
|
||||
} else if (config_stats)
|
||||
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
||||
if (config_stats)
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
|
||||
if (likely(!slow_path))
|
||||
iqalloc(tsd, ptr, tcache, false);
|
||||
else {
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(tsd_tsdn(tsd), ptr);
|
||||
else
|
||||
iqalloc(tsd, ptr, tcache, true);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
|
||||
{
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
|
||||
@@ -1831,10 +1779,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
|
||||
prof_free(tsd, ptr, usize);
|
||||
if (config_stats)
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(tsd_tsdn(tsd), ptr);
|
||||
isqalloc(tsd, ptr, usize, tcache, slow_path);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
|
||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||
@@ -1846,7 +1791,6 @@ je_realloc(void *ptr, size_t size)
|
||||
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t old_usize = 0;
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
if (unlikely(size == 0)) {
|
||||
if (ptr != NULL) {
|
||||
@@ -1871,18 +1815,13 @@ je_realloc(void *ptr, size_t size)
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
|
||||
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
||||
if (config_valgrind && unlikely(in_valgrind)) {
|
||||
old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
|
||||
u2rz(old_usize);
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = s2u(size);
|
||||
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
|
||||
NULL : irealloc_prof(tsd, ptr, old_usize, usize);
|
||||
} else {
|
||||
if (config_stats || (config_valgrind &&
|
||||
unlikely(in_valgrind)))
|
||||
if (config_stats)
|
||||
usize = s2u(size);
|
||||
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
|
||||
}
|
||||
@@ -1913,8 +1852,6 @@ je_realloc(void *ptr, size_t size)
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
UTRACE(ptr, size, ret);
|
||||
JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
|
||||
old_rzsize, true, false);
|
||||
witness_assert_lockless(tsdn);
|
||||
return (ret);
|
||||
}
|
||||
@@ -2143,8 +2080,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
|
||||
szind_t ind = size2index(size);
|
||||
if (unlikely(ind >= NSIZES))
|
||||
return (NULL);
|
||||
if (config_stats || (config_prof && opt_prof) || (slow_path &&
|
||||
config_valgrind && unlikely(in_valgrind))) {
|
||||
if (config_stats || (config_prof && opt_prof)) {
|
||||
*usize = index2size(ind);
|
||||
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
||||
}
|
||||
@@ -2181,8 +2117,6 @@ je_mallocx(size_t size, int flags)
|
||||
p = imallocx_body(size, flags, &tsdn, &usize, true);
|
||||
ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
|
||||
UTRACE(0, size, p);
|
||||
JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
|
||||
MALLOCX_ZERO_GET(flags));
|
||||
}
|
||||
|
||||
return (p);
|
||||
@@ -2261,7 +2195,6 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
tsd_t *tsd;
|
||||
size_t usize;
|
||||
size_t old_usize;
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
||||
bool zero = flags & MALLOCX_ZERO;
|
||||
arena_t *arena;
|
||||
@@ -2291,8 +2224,6 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
tcache = tcache_get(tsd, true);
|
||||
|
||||
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
old_rzsize = u2rz(old_usize);
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||
@@ -2307,7 +2238,7 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
tcache, arena);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
||||
if (config_stats)
|
||||
usize = isalloc(tsd_tsdn(tsd), p, config_prof);
|
||||
}
|
||||
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
||||
@@ -2317,8 +2248,6 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
UTRACE(ptr, size, p);
|
||||
JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
|
||||
old_usize, old_rzsize, false, zero);
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
return (p);
|
||||
label_oom:
|
||||
@@ -2413,7 +2342,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
size_t usize, old_usize;
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
||||
bool zero = flags & MALLOCX_ZERO;
|
||||
|
||||
@@ -2443,9 +2371,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
if (unlikely(HUGE_MAXCLASS - size < extra))
|
||||
extra = HUGE_MAXCLASS - size;
|
||||
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
old_rzsize = u2rz(old_usize);
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
|
||||
alignment, zero);
|
||||
@@ -2460,8 +2385,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
|
||||
old_usize, old_rzsize, false, zero);
|
||||
label_not_resized:
|
||||
UTRACE(ptr, size, ptr);
|
||||
witness_assert_lockless(tsd_tsdn(tsd));
|
||||
|
@@ -150,12 +150,7 @@ quarantine(tsd_t *tsd, void *ptr)
|
||||
quarantine->curbytes += usize;
|
||||
quarantine->curobjs++;
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
/*
|
||||
* Only do redzone validation if Valgrind isn't in
|
||||
* operation.
|
||||
*/
|
||||
if ((!config_valgrind || likely(!in_valgrind))
|
||||
&& usize <= SMALL_MAXCLASS)
|
||||
if (usize <= SMALL_MAXCLASS)
|
||||
arena_quarantine_junk_small(ptr, usize);
|
||||
else
|
||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||
|
@@ -517,7 +517,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
OPT_WRITE_BOOL(redzone)
|
||||
OPT_WRITE_BOOL(zero)
|
||||
OPT_WRITE_BOOL(utrace)
|
||||
OPT_WRITE_BOOL(valgrind)
|
||||
OPT_WRITE_BOOL(xmalloc)
|
||||
OPT_WRITE_BOOL(tcache)
|
||||
OPT_WRITE_SSIZE_T(lg_tcache_max)
|
||||
|
@@ -1,34 +0,0 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#ifndef JEMALLOC_VALGRIND
|
||||
# error "This source file is for Valgrind integration."
|
||||
#endif
|
||||
|
||||
#include <valgrind/memcheck.h>
|
||||
|
||||
void
|
||||
valgrind_make_mem_noaccess(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
|
||||
}
|
||||
|
||||
void
|
||||
valgrind_make_mem_undefined(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
|
||||
}
|
||||
|
||||
void
|
||||
valgrind_make_mem_defined(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
|
||||
}
|
||||
|
||||
void
|
||||
valgrind_freelike_block(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
VALGRIND_FREELIKE_BLOCK(ptr, usize);
|
||||
}
|
Reference in New Issue
Block a user