Fix Valgrind integration.

Fix Valgrind integration to annotate all internally allocated memory in
a way that keeps Valgrind happy about internal data structure access.
This commit is contained in:
Jason Evans 2013-01-31 17:02:53 -08:00
parent a7a28c334e
commit 06912756cc
9 changed files with 44 additions and 32 deletions

View File

@ -25,6 +25,8 @@ found in the git revision history:
array.
+ Reallocation failure for internal reallocation of the quarantined object
array (very unlikely) resulted in memory corruption.
- Fix Valgrind integration to annotate all internally allocated memory in a
way that keeps Valgrind happy about internal data structure access.
* 3.3.0 (January 23, 2013)

View File

@ -443,6 +443,7 @@ static const bool config_ivsalloc =
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)

View File

@ -320,8 +320,8 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
@ -371,8 +371,8 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;

View File

@ -366,8 +366,6 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (npages << LG_PAGE));
}
static inline void
@ -380,8 +378,6 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
LG_PAGE)), PAGE);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
}
static void
@ -513,6 +509,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
run_ind+need_pages-1);
}
}
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
@ -574,6 +572,11 @@ arena_chunk_alloc(arena_t *arena)
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) {
VALGRIND_MAKE_MEM_DEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(void *)((uintptr_t)
arena_mapp_get(chunk, chunk_npages-1)
- (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
@ -1246,8 +1249,6 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
(uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */
VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
bin_info->redzone_size);
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
@ -1464,8 +1465,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}

View File

@ -63,6 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
}
@ -88,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@ -100,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node)
{
VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;

View File

@ -120,7 +120,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
if (node != NULL)
base_node_dealloc(node);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (*zero) {
if (zeroed == false)
memset(ret, 0, size);
@ -131,7 +130,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
}
return (ret);
@ -180,27 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */
ret = NULL;
label_return:
if (config_ivsalloc && base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size, true);
return (NULL);
if (ret != NULL) {
if (config_ivsalloc && base == false) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size, true);
return (NULL);
}
}
}
if ((config_stats || config_prof) && ret != NULL) {
bool gdump;
malloc_mutex_lock(&chunks_mtx);
if (config_stats)
stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
if (config_stats || config_prof) {
bool gdump;
malloc_mutex_lock(&chunks_mtx);
if (config_stats)
stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks =
stats_chunks.curchunks;
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
malloc_mutex_unlock(&chunks_mtx);
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
}
if (config_valgrind)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@ -214,6 +217,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
extent_node_t *xnode, *node, *prev, key;
unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not

View File

@ -127,7 +127,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
return (ret);
}

View File

@ -41,6 +41,7 @@ je_thread_start(void *arg)
malloc_printf("Unexpected allocm() error\n");
abort();
}
dallocm(p, 0);
return (NULL);
}

View File

@ -17,6 +17,7 @@ je_thread_start(void *arg)
malloc_printf("%s(): Error in malloc()\n", __func__);
return (void *)1;
}
free(p);
size = sizeof(arena_ind);
if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,