Optimize Valgrind integration.

Forcefully disable tcache if running inside Valgrind, and remove
Valgrind calls in tcache-specific code.

Restructure Valgrind-related code to move most Valgrind calls out of the
fast path functions.

Take advantage of static knowledge to elide some branches in
JEMALLOC_VALGRIND_REALLOC().
This commit is contained in:
Jason Evans 2014-04-15 16:35:08 -07:00
parent ecd3e59ca3
commit bd87b01999
12 changed files with 231 additions and 136 deletions

View File

@ -48,6 +48,7 @@ cfgoutputs_in := @cfgoutputs_in@
cfgoutputs_out := @cfgoutputs_out@
enable_autogen := @enable_autogen@
enable_code_coverage := @enable_code_coverage@
enable_valgrind := @enable_valgrind@
enable_zone_allocator := @enable_zone_allocator@
DSO_LDFLAGS = @DSO_LDFLAGS@
SOREV = @SOREV@
@ -82,6 +83,9 @@ C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
$(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \
$(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \
$(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c
ifeq ($(enable_valgrind), 1)
C_SRCS += $(srcroot)src/valgrind.c
endif
ifeq ($(enable_zone_allocator), 1)
C_SRCS += $(srcroot)src/zone.c
endif

View File

@ -979,7 +979,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
option for related tuning information. This option is enabled by
default unless running inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
url="http://valgrind.org/">Valgrind</ulink>, in which case it is
forcefully disabled.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_tcache_max">

View File

@ -60,11 +60,6 @@ typedef intptr_t ssize_t;
#include <sys/ktrace.h>
#endif
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
@ -362,81 +357,7 @@ static const bool config_ivsalloc =
# define VARIABLE_ARRAY(type, name, count) type name[count]
#endif
#ifdef JEMALLOC_VALGRIND
/*
* The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
* so that when Valgrind reports errors, there are no extra stack frames
* in the backtraces.
*
* The size that is reported to valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (config_valgrind && in_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do { \
if (config_valgrind && in_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (old_ptr != NULL) { \
VALGRIND_FREELIKE_BLOCK(old_ptr, \
old_rzsize); \
} \
if (ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
VALGRIND_MAKE_MEM_DEFINED( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (config_valgrind && in_valgrind) \
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
do {} while (0)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
do {} while (0)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
old_rzsize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@ -463,6 +384,7 @@ static const bool config_ivsalloc =
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@ -534,6 +456,7 @@ void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@ -560,6 +483,7 @@ void jemalloc_postfork_child(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"

View File

@ -409,3 +409,7 @@ thread_allocated_tsd_set
tsd_init_check_recursion
tsd_init_finish
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined

View File

@ -314,13 +314,11 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
@ -369,11 +367,8 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else
memset(ret, 0, size);
}
if (config_stats)
tbin->tstats.nrequests++;

View File

@ -0,0 +1,112 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (in_valgrind) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (in_valgrind) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (in_valgrind) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
if (in_valgrind && cond) \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (in_valgrind) { \
size_t rzsize = p2rz(ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (in_valgrind) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -337,8 +337,8 @@ static inline void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (npages << LG_PAGE));
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
@ -347,8 +347,8 @@ static inline void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
<< LG_PAGE)), PAGE);
}
static inline void
@ -457,7 +457,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
arena_run_zero(chunk, run_ind, need_pages);
}
} else {
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
@ -525,7 +525,7 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
run_ind+need_pages-1) == 0)
arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
@ -592,14 +592,14 @@ arena_chunk_init_hard(arena_t *arena)
* the chunk is not zeroed.
*/
if (zero == false) {
VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
(uintptr_t)arena_mapp_get(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else {
VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
@ -1645,13 +1645,13 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
@ -2226,7 +2226,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
iqalloct(ptr, try_tcache_dalloc);
return (ret);

View File

@ -63,7 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
}
@ -89,7 +89,8 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@ -102,7 +103,7 @@ void
base_node_dealloc(extent_node_t *node)
{
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;

View File

@ -127,7 +127,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
VALGRIND_MAKE_MEM_DEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
@ -203,7 +203,7 @@ label_return:
prof_gdump();
}
if (config_valgrind)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@ -217,7 +217,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not

View File

@ -126,7 +126,8 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (cpad_size != 0)
chunk_unmap(cpad, cpad_size);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size);
}
return (ret);

View File

@ -479,9 +479,10 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
#define CONF_HANDLE_BOOL(o, n) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
#define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
#define CONF_HANDLE_BOOL(o, n, cont) \
if (CONF_MATCH(n)) { \
if (strncmp("true", v, vlen) == 0 && \
vlen == sizeof("true")-1) \
o = true; \
@ -493,11 +494,11 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
continue; \
if (cont) \
continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
@ -528,8 +529,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
@ -550,8 +550,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
@ -560,7 +559,7 @@ malloc_conf_init(void)
continue; \
}
CONF_HANDLE_BOOL(opt_abort, "abort")
CONF_HANDLE_BOOL(opt_abort, "abort", true)
/*
* Chunks always require at least one header page, plus
* one data page in the absence of redzones, or three
@ -599,44 +598,62 @@ malloc_conf_init(void)
SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, "junk")
CONF_HANDLE_BOOL(opt_junk, "junk", true)
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone")
CONF_HANDLE_BOOL(opt_zero, "zero")
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, "utrace")
CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
}
if (config_tcache) {
CONF_HANDLE_BOOL(opt_tcache, "tcache")
CONF_HANDLE_BOOL(opt_tcache, "tcache",
!config_valgrind || !in_valgrind)
if (CONF_MATCH("tcache")) {
assert(config_valgrind && in_valgrind);
if (opt_tcache) {
opt_tcache = false;
malloc_conf_error(
"tcache cannot be enabled "
"while running inside Valgrind",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_BOOL(opt_prof, "prof", true)
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
true)
CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
true)
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
true)
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
#undef CONF_HANDLE_BOOL
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@ -1293,8 +1310,8 @@ je_realloc(void *ptr, size_t size)
ta->deallocated += old_usize;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
false);
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
return (ret);
}
@ -1604,7 +1621,8 @@ je_rallocx(void *ptr, size_t size, int flags)
ta->deallocated += old_usize;
}
UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
old_rzsize, false, zero);
return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
@ -1731,7 +1749,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
ta->allocated += usize;
ta->deallocated += old_usize;
}
JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
return (usize);

34
src/valgrind.c Normal file
View File

@ -0,0 +1,34 @@
#include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_VALGRIND
# error "This source file is for Valgrind integration."
#endif
#include <valgrind/memcheck.h>
void
valgrind_make_mem_noaccess(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
}
void
valgrind_make_mem_undefined(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
}
void
valgrind_make_mem_defined(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
}
void
valgrind_freelike_block(void *ptr, size_t usize)
{
VALGRIND_FREELIKE_BLOCK(ptr, usize);
}