Tighten valgrind integration.

Tighten valgrind integration such that immediately after memory is
validated or zeroed, valgrind is told to forget the memory's 'defined'
state.  The only place newly allocated memory should be left marked as
'defined' is in the public functions (e.g. calloc() and realloc()).
This commit is contained in:
Jason Evans 2013-01-21 20:04:42 -08:00
parent 14a2c6a698
commit 38067483c5
3 changed files with 31 additions and 22 deletions

View File

@ -320,6 +320,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} }
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} }
if (config_stats) if (config_stats)
@ -370,6 +371,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
} else { } else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} }
if (config_stats) if (config_stats)

View File

@ -359,13 +359,29 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
} }
static inline void static inline void
arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (npages << LG_PAGE));
}
static inline void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{ {
size_t i; size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
for (i = 0; i < PAGE / sizeof(size_t); i++) for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
} }
static void static void
@ -441,19 +457,10 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
for (i = 0; i < need_pages; i++) { for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, if (arena_mapbits_unzeroed_get(chunk,
run_ind+i) != 0) { run_ind+i) != 0) {
VALGRIND_MAKE_MEM_UNDEFINED( arena_run_zero(chunk, run_ind+i,
(void *)((uintptr_t) 1);
chunk + ((run_ind+i) <<
LG_PAGE)), PAGE);
memset((void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), 0, PAGE);
} else if (config_debug) { } else if (config_debug) {
VALGRIND_MAKE_MEM_DEFINED( arena_run_page_validate_zeroed(
(void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), PAGE);
arena_chunk_validate_zeroed(
chunk, run_ind+i); chunk, run_ind+i);
} }
} }
@ -462,11 +469,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* The run is dirty, so all pages must be * The run is dirty, so all pages must be
* zeroed. * zeroed.
*/ */
VALGRIND_MAKE_MEM_UNDEFINED((void arena_run_zero(chunk, run_ind, need_pages);
*)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (need_pages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), 0, (need_pages << LG_PAGE));
} }
} }
@ -492,19 +495,21 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
*/ */
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind) == 0) arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
arena_chunk_validate_zeroed(chunk, run_ind); arena_run_page_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) { for (i = 1; i < need_pages - 1; i++) {
arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
arena_chunk_validate_zeroed(chunk, run_ind+i); arena_run_page_validate_zeroed(chunk,
run_ind+i);
}
} }
arena_mapbits_small_set(chunk, run_ind+need_pages-1, arena_mapbits_small_set(chunk, run_ind+need_pages-1,
need_pages-1, binind, flag_dirty); need_pages-1, binind, flag_dirty);
if (config_debug && flag_dirty == 0 && if (config_debug && flag_dirty == 0 &&
arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) == arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
0) { 0) {
arena_chunk_validate_zeroed(chunk, arena_run_page_validate_zeroed(chunk,
run_ind+need_pages-1); run_ind+need_pages-1);
} }
} }
@ -1459,6 +1464,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} }
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} }
return (ret); return (ret);

View File

@ -127,6 +127,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (*zero) { if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} }
return (ret); return (ret);
} }