diff --git a/INSTALL b/INSTALL index 68787165..36306fec 100644 --- a/INSTALL +++ b/INSTALL @@ -169,9 +169,6 @@ any of the following arguments (not a definitive list) to 'configure': See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option documentation for usage details. ---disable-valgrind - Disable support for Valgrind. - --disable-zone-allocator Disable zone allocator for Darwin. This means jemalloc won't be hooked as the default allocator on OSX/iOS. diff --git a/Makefile.in b/Makefile.in index 652f01f2..34facf43 100644 --- a/Makefile.in +++ b/Makefile.in @@ -49,7 +49,6 @@ cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ enable_code_coverage := @enable_code_coverage@ enable_prof := @enable_prof@ -enable_valgrind := @enable_valgrind@ enable_zone_allocator := @enable_zone_allocator@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF DSO_LDFLAGS = @DSO_LDFLAGS@ @@ -104,9 +103,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/tsd.c \ $(srcroot)src/util.c \ $(srcroot)src/witness.c -ifeq ($(enable_valgrind), 1) -C_SRCS += $(srcroot)src/valgrind.c -endif ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif diff --git a/README b/README index 9b268f42..67cbf6da 100644 --- a/README +++ b/README @@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features -such as heap profiling, Valgrind integration, and extensive monitoring/tuning -hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, -and therefore versatility remains critical. Ongoing development efforts trend -toward making jemalloc among the best allocators for a broad range of demanding -applications, and eliminating/mitigating weaknesses that have practical -repercussions for real world applications. +such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc +releases continue to be integrated back into FreeBSD, and therefore versatility +remains critical. Ongoing development efforts trend toward making jemalloc +among the best allocators for a broad range of demanding applications, and +eliminating/mitigating weaknesses that have practical repercussions for real +world applications. The COPYING file contains copyright and licensing information. diff --git a/configure.ac b/configure.ac index 7f19715d..df5cf25a 100644 --- a/configure.ac +++ b/configure.ac @@ -988,35 +988,6 @@ if test "x$enable_utrace" = "x1" ; then fi AC_SUBST([enable_utrace]) -dnl Support Valgrind by default. -AC_ARG_ENABLE([valgrind], - [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], -[if test "x$enable_valgrind" = "xno" ; then - enable_valgrind="0" -else - enable_valgrind="1" -fi -], -[enable_valgrind="1"] -) -if test "x$enable_valgrind" = "x1" ; then - JE_COMPILABLE([valgrind], [ -#include -#include - -#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) -# error "Incompatible Valgrind version" -#endif -], [], [je_cv_valgrind]) - if test "x${je_cv_valgrind}" = "xno" ; then - enable_valgrind="0" - fi - if test "x$enable_valgrind" = "x1" ; then - AC_DEFINE([JEMALLOC_VALGRIND], [ ]) - fi -fi -AC_SUBST([enable_valgrind]) - dnl Do not support the xmalloc option by default. AC_ARG_ENABLE([xmalloc], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], @@ -1782,7 +1753,6 @@ AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) AC_MSG_RESULT([tcache : ${enable_tcache}]) AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([utrace : ${enable_utrace}]) -AC_MSG_RESULT([valgrind : ${enable_valgrind}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([munmap : ${enable_munmap}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index c4a44e3c..2f8f150a 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -869,16 +869,6 @@ for (i = 0; i < nbins; i++) { build configuration. - - - config.valgrind - (bool) - r- - - was specified during - build configuration. - - config.xmalloc @@ -1046,9 +1036,8 @@ for (i = 0; i < nbins; i++) { "false", junk filling be disabled entirely. This is intended for debugging and will impact performance negatively. This option is "false" by default unless is specified - during configuration, in which case it is "true" by default unless - running inside Valgrind. + during configuration, in which case it is "true" by + default. @@ -1063,13 +1052,9 @@ for (i = 0; i < nbins; i++) { specified number of bytes of memory. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk-filled if the opt.junk option is - enabled. This feature is of particular use in combination with Valgrind, which can detect attempts - to access quarantined objects. This is intended for debugging and will - impact performance negatively. The default quarantine size is 0 unless - running inside Valgrind, in which case the default is 16 - MiB. + linkend="opt.junk">opt.junk option is enabled. + This is intended for debugging and will impact performance negatively. + The default quarantine size is 0. @@ -1083,12 +1068,8 @@ for (i = 0; i < nbins; i++) { allocations have redzones before and after them. Furthermore, if the opt.junk option is enabled, the redzones are checked for corruption during deallocation. - However, the primary intended purpose of this feature is to be used in - combination with Valgrind, - which needs redzones in order to do effective buffer overflow/underflow - detection. This option is intended for debugging and will impact - performance negatively. This option is disabled by - default unless running inside Valgrind. + This option is intended for debugging and will impact performance + negatively. This option is disabled by default. @@ -1155,9 +1136,7 @@ malloc_conf = "xmalloc:true";]]> increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by - default unless running inside Valgrind, in which case it is - forcefully disabled. + default. @@ -2746,9 +2725,7 @@ MAPPED_LIBRARIES: This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information - would be prohibitive. However, jemalloc does integrate with the most - excellent Valgrind tool if the - configuration option is enabled. + would be prohibitive. DIAGNOSTIC MESSAGES diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 51bf8974..4c845e30 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -113,13 +113,6 @@ static const bool config_utrace = false #endif ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; static const bool config_xmalloc = #ifdef JEMALLOC_XMALLOC true @@ -361,7 +354,6 @@ typedef unsigned szind_t; #endif #include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" @@ -393,7 +385,6 @@ typedef unsigned szind_t; #define JEMALLOC_H_STRUCTS #include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" @@ -441,8 +432,6 @@ extern bool opt_xmalloc; extern bool opt_zero; extern unsigned opt_narenas; -extern bool in_valgrind; - /* Number of CPUs. */ extern unsigned ncpus; @@ -489,7 +478,6 @@ void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); #include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" @@ -521,7 +509,6 @@ void jemalloc_postfork_child(void); #define JEMALLOC_H_INLINES #include "jemalloc/internal/nstime.h" -#include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" #include "jemalloc/internal/prng.h" diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in index 7de0cf7c..c9aa5fd5 100644 --- a/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -148,9 +148,6 @@ /* Support utrace(2)-based tracing. */ #undef JEMALLOC_UTRACE -/* Support Valgrind. */ -#undef JEMALLOC_VALGRIND - /* Support optional abort() on OOM. */ #undef JEMALLOC_XMALLOC diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index f2b6a55d..15b8ceec 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -296,7 +296,6 @@ iallocztm iarena_cleanup idalloc idalloctm -in_valgrind index2size index2size_compute index2size_lookup @@ -591,10 +590,6 @@ tsdn_fetch tsdn_null tsdn_tsd u2rz -valgrind_freelike_block -valgrind_make_mem_defined -valgrind_make_mem_noaccess -valgrind_make_mem_undefined witness_assert_lockless witness_assert_not_owner witness_assert_owner diff --git a/include/jemalloc/internal/quarantine.h b/include/jemalloc/internal/quarantine.h index ae607399..1ab4345e 100644 --- a/include/jemalloc/internal/quarantine.h +++ b/include/jemalloc/internal/quarantine.h @@ -4,9 +4,6 @@ typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS diff --git a/include/jemalloc/internal/valgrind.h b/include/jemalloc/internal/valgrind.h deleted file mode 100644 index 1a868082..00000000 --- a/include/jemalloc/internal/valgrind.h +++ /dev/null @@ -1,114 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef JEMALLOC_VALGRIND -#include - -/* - * The size that is reported to Valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_noaccess(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_undefined(ptr, usize); \ -} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_make_mem_defined(ptr, usize); \ -} while (0) -/* - * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro - * calls must be embedded in macros rather than in functions so that when - * Valgrind reports errors, there are no extra stack frames in the backtraces. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ - if (unlikely(in_valgrind && cond)) { \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ - zero); \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ - ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ - zero) do { \ - if (unlikely(in_valgrind)) { \ - size_t rzsize = p2rz(tsdn, ptr); \ - \ - if (!maybe_moved || ptr == old_ptr) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (!old_ptr_maybe_null || old_ptr != NULL) { \ - valgrind_freelike_block(old_ptr, \ - old_rzsize); \ - } \ - if (!ptr_maybe_null || ptr != NULL) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - valgrind_make_mem_defined(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - valgrind_make_mem_defined( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (unlikely(in_valgrind)) \ - valgrind_freelike_block(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ - ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ - zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_VALGRIND -void valgrind_make_mem_noaccess(void *ptr, size_t usize); -void valgrind_make_mem_undefined(void *ptr, size_t usize); -void valgrind_make_mem_defined(void *ptr, size_t usize); -void valgrind_freelike_block(void *ptr, size_t usize); -#endif - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj index 9315022d..432d1f24 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -74,7 +74,6 @@ - @@ -395,4 +394,4 @@ - \ No newline at end of file + diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters index 88c15efa..c0e568ec 100644 --- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -161,9 +161,6 @@ Header Files\internal - - Header Files\internal - Header Files\msvc_compat @@ -257,4 +254,4 @@ Source Files - \ No newline at end of file + diff --git a/src/arena.c b/src/arena.c index c605bcd3..4e6d3d60 100644 --- a/src/arena.c +++ b/src/arena.c @@ -350,27 +350,16 @@ JEMALLOC_INLINE_C void arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (npages << LG_PAGE)); memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, (npages << LG_PAGE)); } -JEMALLOC_INLINE_C void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind - << LG_PAGE)), PAGE); -} - JEMALLOC_INLINE_C void arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) { size_t i; UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - arena_run_page_mark_zeroed(chunk, run_ind); for (i = 0; i < PAGE / sizeof(size_t); i++) assert(p[i] == 0); } @@ -471,12 +460,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, } if (zero) { - if (flag_decommitted != 0) { - /* The run is untouched, and therefore zeroed. */ - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)((uintptr_t)chunk + (run_ind << LG_PAGE)), - (need_pages << LG_PAGE)); - } else if (flag_dirty != 0) { + if (flag_decommitted != 0) + ; /* The run is untouched, and therefore zeroed. */ + else if (flag_dirty != 0) { /* The run is dirty, so all pages must be zeroed. */ arena_run_zero(chunk, run_ind, need_pages); } else { @@ -492,15 +478,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, else if (config_debug) { arena_run_page_validate_zeroed(chunk, run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); } } } - } else { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); } /* @@ -564,8 +544,6 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) arena_run_page_validate_zeroed(chunk, run_ind+i); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); return (false); } @@ -700,19 +678,9 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) * the chunk is not zeroed. */ if (!zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get_const(chunk, map_bias+1), - (size_t)((uintptr_t)arena_bitselm_get_const(chunk, - chunk_npages-1) - - (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_internal_set(chunk, i, flag_unzeroed); } else { - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get_const(chunk, map_bias+1), - (size_t)((uintptr_t)arena_bitselm_get_const(chunk, - chunk_npages-1) - - (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); if (config_debug) { for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == @@ -2571,13 +2539,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) } else if (unlikely(opt_zero)) memset(ret, 0, usize); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); } else { if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); memset(ret, 0, usize); } @@ -3311,7 +3277,6 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, */ copysize = (usize < oldsize) ? usize : oldsize; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache, true); } else { diff --git a/src/base.c b/src/base.c index 81b0801f..1b0bf697 100644 --- a/src/base.c +++ b/src/base.c @@ -24,7 +24,6 @@ base_node_try_alloc(tsdn_t *tsdn) return (NULL); node = base_nodes; base_nodes = *(extent_node_t **)node; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); return (node); } @@ -34,7 +33,6 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) malloc_mutex_assert_owner(tsdn, &base_mtx); - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); *(extent_node_t **)node = base_nodes; base_nodes = node; } @@ -123,7 +121,6 @@ base_alloc(tsdn_t *tsdn, size_t size) base_resident += PAGE_CEILING((uintptr_t)ret + csize) - PAGE_CEILING((uintptr_t)ret); } - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); label_return: malloc_mutex_unlock(tsdn, &base_mtx); return (ret); diff --git a/src/chunk.c b/src/chunk.c index adc666ff..7af7bb91 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -316,7 +316,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, size_t i; size_t *p = (size_t *)(uintptr_t)ret; - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } @@ -376,8 +375,6 @@ chunk_alloc_base(size_t size) ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); if (ret == NULL) return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } @@ -401,8 +398,6 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, if (ret == NULL) return (NULL); assert(commit); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } @@ -434,8 +429,6 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, commit, arena->dss_prec); if (ret == NULL) return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } @@ -478,8 +471,6 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, return (NULL); } - if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); return (ret); } @@ -494,7 +485,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(!cache || !zeroed); unzeroed = cache || !zeroed; - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); diff --git a/src/chunk_dss.c b/src/chunk_dss.c index 0b1f82bd..d42aeb0b 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -138,11 +138,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, &chunk_hooks, cpad, cpad_size, false, true); } - if (*zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - ret, size); + if (*zero) memset(ret, 0, size); - } if (!*commit) *commit = pages_decommit(ret, size); return (ret); diff --git a/src/ctl.c b/src/ctl.c index dad80086..d2e94269 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -86,7 +86,6 @@ CTL_PROTO(config_stats) CTL_PROTO(config_tcache) CTL_PROTO(config_tls) CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) CTL_PROTO(opt_dss) @@ -260,7 +259,6 @@ static const ctl_named_node_t config_node[] = { {NAME("tcache"), CTL(config_tcache)}, {NAME("tls"), CTL(config_tls)}, {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; @@ -1270,7 +1268,6 @@ CTL_RO_CONFIG_GEN(config_stats, bool) CTL_RO_CONFIG_GEN(config_tcache, bool) CTL_RO_CONFIG_GEN(config_tls, bool) CTL_RO_CONFIG_GEN(config_utrace, bool) -CTL_RO_CONFIG_GEN(config_valgrind, bool) CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ @@ -1622,8 +1619,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, READONLY(); WRITEONLY(); - if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && - unlikely(opt_quarantine))) { + if (config_fill && unlikely(opt_quarantine)) { ret = EFAULT; goto label_return; } diff --git a/src/jemalloc.c b/src/jemalloc.c index 941c1c85..cfe6ed32 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -42,9 +42,6 @@ bool opt_xmalloc = false; bool opt_zero = false; unsigned opt_narenas = 0; -/* Initialized to true if the process is running inside Valgrind. */ -bool in_valgrind; - unsigned ncpus; /* Protects arenas initialization. */ @@ -80,8 +77,7 @@ enum { flag_opt_quarantine = (1U << 2), flag_opt_zero = (1U << 3), flag_opt_utrace = (1U << 4), - flag_in_valgrind = (1U << 5), - flag_opt_xmalloc = (1U << 6) + flag_opt_xmalloc = (1U << 5) }; static uint8_t malloc_slow_flags; @@ -894,9 +890,6 @@ malloc_slow_flag_init(void) | (opt_utrace ? flag_opt_utrace : 0) | (opt_xmalloc ? flag_opt_xmalloc : 0); - if (config_valgrind) - malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); - malloc_slow = (malloc_slow_flags != 0); } @@ -908,24 +901,6 @@ malloc_conf_init(void) const char *opts, *k, *v; size_t klen, vlen; - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && unlikely(in_valgrind)) { - opt_junk = "false"; - opt_junk_alloc = false; - opt_junk_free = false; - assert(!opt_zero); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && unlikely(in_valgrind)) - opt_tcache = false; - } - for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { @@ -1183,19 +1158,7 @@ malloc_conf_init(void) CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) } if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache", - !config_valgrind || !in_valgrind) - if (CONF_MATCH("tcache")) { - assert(config_valgrind && in_valgrind); - if (opt_tcache) { - opt_tcache = false; - malloc_conf_error( - "tcache cannot be enabled " - "while running inside Valgrind", - k, klen, v, vlen); - } - continue; - } + CONF_HANDLE_BOOL(opt_tcache, "tcache", true) CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", -1, (sizeof(size_t) << 3) - 1) @@ -1508,8 +1471,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, if (unlikely(ind >= NSIZES)) return (NULL); - if (config_stats || (config_prof && opt_prof) || (slow_path && - config_valgrind && unlikely(in_valgrind))) { + if (config_stats || (config_prof && opt_prof)) { *usize = index2size(ind); assert(*usize > 0 && *usize <= HUGE_MAXCLASS); } @@ -1562,7 +1524,6 @@ je_malloc(size_t size) ret = ialloc_body(size, false, &tsdn, &usize, true); ialloc_post_check(ret, tsdn, usize, "malloc", true, true); UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } return (ret); @@ -1664,8 +1625,6 @@ label_return: *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); - JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, - false); witness_assert_lockless(tsd_tsdn(tsd)); return (ret); label_oom: @@ -1684,11 +1643,8 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { - int ret; - ret = imemalign(memptr, alignment, size, sizeof(void *)); - - return (ret); + return (imemalign(memptr, alignment, size, sizeof(void *))); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1703,7 +1659,6 @@ je_aligned_alloc(size_t alignment, size_t size) ret = NULL; set_errno(err); } - return (ret); } @@ -1739,7 +1694,6 @@ je_calloc(size_t num, size_t size) ret = ialloc_body(num_size, true, &tsdn, &usize, true); ialloc_post_check(ret, tsdn, usize, "calloc", true, true); UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } return (ret); @@ -1792,7 +1746,6 @@ JEMALLOC_INLINE_C void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); witness_assert_lockless(tsd_tsdn(tsd)); @@ -1802,25 +1755,20 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (config_prof && opt_prof) { usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); prof_free(tsd, ptr, usize); - } else if (config_stats || config_valgrind) + } else if (config_stats) usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (likely(!slow_path)) iqalloc(tsd, ptr, tcache, false); - else { - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd_tsdn(tsd), ptr); + else iqalloc(tsd, ptr, tcache, true); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); - } } JEMALLOC_INLINE_C void isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); witness_assert_lockless(tsd_tsdn(tsd)); @@ -1831,10 +1779,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) prof_free(tsd, ptr, usize); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd_tsdn(tsd), ptr); isqalloc(tsd, ptr, usize, tcache, slow_path); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1846,7 +1791,6 @@ je_realloc(void *ptr, size_t size) tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(size == 0)) { if (ptr != NULL) { @@ -1871,18 +1815,13 @@ je_realloc(void *ptr, size_t size) witness_assert_lockless(tsd_tsdn(tsd)); old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) { - old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : - u2rz(old_usize); - } if (config_prof && opt_prof) { usize = s2u(size); ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? NULL : irealloc_prof(tsd, ptr, old_usize, usize); } else { - if (config_stats || (config_valgrind && - unlikely(in_valgrind))) + if (config_stats) usize = s2u(size); ret = iralloc(tsd, ptr, old_usize, size, 0, false); } @@ -1913,8 +1852,6 @@ je_realloc(void *ptr, size_t size) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, - old_rzsize, true, false); witness_assert_lockless(tsdn); return (ret); } @@ -2143,8 +2080,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, szind_t ind = size2index(size); if (unlikely(ind >= NSIZES)) return (NULL); - if (config_stats || (config_prof && opt_prof) || (slow_path && - config_valgrind && unlikely(in_valgrind))) { + if (config_stats || (config_prof && opt_prof)) { *usize = index2size(ind); assert(*usize > 0 && *usize <= HUGE_MAXCLASS); } @@ -2181,8 +2117,6 @@ je_mallocx(size_t size, int flags) p = imallocx_body(size, flags, &tsdn, &usize, true); ialloc_post_check(p, tsdn, usize, "mallocx", false, true); UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, - MALLOCX_ZERO_GET(flags)); } return (p); @@ -2261,7 +2195,6 @@ je_rallocx(void *ptr, size_t size, int flags) tsd_t *tsd; size_t usize; size_t old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; arena_t *arena; @@ -2291,8 +2224,6 @@ je_rallocx(void *ptr, size_t size, int flags) tcache = tcache_get(tsd, true); old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); @@ -2307,7 +2238,7 @@ je_rallocx(void *ptr, size_t size, int flags) tcache, arena); if (unlikely(p == NULL)) goto label_oom; - if (config_stats || (config_valgrind && unlikely(in_valgrind))) + if (config_stats) usize = isalloc(tsd_tsdn(tsd), p, config_prof); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); @@ -2317,8 +2248,6 @@ je_rallocx(void *ptr, size_t size, int flags) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, - old_usize, old_rzsize, false, zero); witness_assert_lockless(tsd_tsdn(tsd)); return (p); label_oom: @@ -2413,7 +2342,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; @@ -2443,9 +2371,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) if (unlikely(HUGE_MAXCLASS - size < extra)) extra = HUGE_MAXCLASS - size; - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); - if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero); @@ -2460,8 +2385,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, - old_usize, old_rzsize, false, zero); label_not_resized: UTRACE(ptr, size, ptr); witness_assert_lockless(tsd_tsdn(tsd)); diff --git a/src/quarantine.c b/src/quarantine.c index 18903fb5..9658ffad 100644 --- a/src/quarantine.c +++ b/src/quarantine.c @@ -150,12 +150,7 @@ quarantine(tsd_t *tsd, void *ptr) quarantine->curbytes += usize; quarantine->curobjs++; if (config_fill && unlikely(opt_junk_free)) { - /* - * Only do redzone validation if Valgrind isn't in - * operation. - */ - if ((!config_valgrind || likely(!in_valgrind)) - && usize <= SMALL_MAXCLASS) + if (usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else memset(ptr, JEMALLOC_FREE_JUNK, usize); diff --git a/src/stats.c b/src/stats.c index 073be4fe..97f901f6 100644 --- a/src/stats.c +++ b/src/stats.c @@ -517,7 +517,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(tcache) OPT_WRITE_SSIZE_T(lg_tcache_max) diff --git a/src/valgrind.c b/src/valgrind.c deleted file mode 100644 index 8e7ef3a2..00000000 --- a/src/valgrind.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_VALGRIND -# error "This source file is for Valgrind integration." -#endif - -#include - -void -valgrind_make_mem_noaccess(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); -} - -void -valgrind_make_mem_undefined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); -} - -void -valgrind_make_mem_defined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_DEFINED(ptr, usize); -} - -void -valgrind_freelike_block(void *ptr, size_t usize) -{ - - VALGRIND_FREELIKE_BLOCK(ptr, usize); -} diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 8ba36c21..c602f0ff 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -88,8 +88,7 @@ TEST_BEGIN(test_arena_reset) size_t mib[3]; tsdn_t *tsdn; - test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill - && unlikely(opt_quarantine))); + test_skip_if(config_fill && unlikely(opt_quarantine)); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 69f8c20c..641138ac 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -139,7 +139,6 @@ TEST_BEGIN(test_mallctl_config) TEST_MALLCTL_CONFIG(tcache, bool); TEST_MALLCTL_CONFIG(tls, bool); TEST_MALLCTL_CONFIG(utrace, bool); - TEST_MALLCTL_CONFIG(valgrind, bool); TEST_MALLCTL_CONFIG(xmalloc, bool); #undef TEST_MALLCTL_CONFIG