diff --git a/ChangeLog b/ChangeLog index ee63cb48..3c0af684 100644 --- a/ChangeLog +++ b/ChangeLog @@ -25,6 +25,8 @@ found in the git revision history: array. + Reallocation failure for internal reallocation of the quarantined object array (very unlikely) resulted in memory corruption. + - Fix Valgrind integration to annotate all internally allocated memory in a + way that keeps Valgrind happy about internal data structure access. * 3.3.0 (January 23, 2013) diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index c606c122..6270a08e 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -443,6 +443,7 @@ static const bool config_ivsalloc = #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) #define VALGRIND_FREELIKE_BLOCK(addr, rzB) +#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) #define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 71900c2f..ba36204f 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -320,8 +320,8 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) } VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); if (config_stats) tbin->tstats.nrequests++; @@ -371,8 +371,8 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) } else { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); if (config_stats) tbin->tstats.nrequests++; diff --git a/src/arena.c b/src/arena.c index 8d50f4d4..d79e0358 100644 --- a/src/arena.c +++ b/src/arena.c @@ -366,8 +366,6 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) LG_PAGE)), (npages << LG_PAGE)); memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, (npages << LG_PAGE)); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); } static inline void @@ -380,8 +378,6 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) LG_PAGE)), PAGE); for (i = 0; i < PAGE / sizeof(size_t); i++) assert(p[i] == 0); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); } static void @@ -513,6 +509,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, run_ind+need_pages-1); } } + VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << + LG_PAGE)), (need_pages << LG_PAGE)); } static arena_chunk_t * @@ -574,6 +572,11 @@ arena_chunk_alloc(arena_t *arena) for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_unzeroed_set(chunk, i, unzeroed); } else if (config_debug) { + VALGRIND_MAKE_MEM_DEFINED( + (void *)arena_mapp_get(chunk, map_bias+1), + (void *)((uintptr_t) + arena_mapp_get(chunk, chunk_npages-1) + - (uintptr_t)arena_mapp_get(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == unzeroed); @@ -1246,8 +1249,6 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) (uintptr_t)bin_info->bitmap_offset); /* Initialize run internals. */ - VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset - - bin_info->redzone_size); run->bin = bin; run->nextind = 0; run->nfree = bin_info->nregs; @@ -1464,8 +1465,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) } VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } diff --git a/src/base.c b/src/base.c index b1a5945e..4e62e8fa 100644 --- a/src/base.c +++ b/src/base.c @@ -63,6 +63,7 @@ base_alloc(size_t size) ret = base_next_addr; base_next_addr = (void *)((uintptr_t)base_next_addr + csize); malloc_mutex_unlock(&base_mtx); + VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); return (ret); } @@ -88,6 +89,7 @@ base_node_alloc(void) ret = base_nodes; base_nodes = *(extent_node_t **)ret; malloc_mutex_unlock(&base_mtx); + VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); } else { malloc_mutex_unlock(&base_mtx); ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); @@ -100,6 +102,7 @@ void base_node_dealloc(extent_node_t *node) { + VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); malloc_mutex_lock(&base_mtx); *(extent_node_t **)node = base_nodes; base_nodes = node; diff --git a/src/chunk.c b/src/chunk.c index 8cff240a..044f76be 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -120,7 +120,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, if (node != NULL) base_node_dealloc(node); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); if (*zero) { if (zeroed == false) memset(ret, 0, size); @@ -131,7 +130,6 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } } return (ret); @@ -180,27 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, /* All strategies for allocation failed. */ ret = NULL; label_return: - if (config_ivsalloc && base == false && ret != NULL) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { - chunk_dealloc(ret, size, true); - return (NULL); + if (ret != NULL) { + if (config_ivsalloc && base == false) { + if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { + chunk_dealloc(ret, size, true); + return (NULL); + } } - } - if ((config_stats || config_prof) && ret != NULL) { - bool gdump; - malloc_mutex_lock(&chunks_mtx); - if (config_stats) - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) { - stats_chunks.highchunks = stats_chunks.curchunks; - if (config_prof) - gdump = true; - } else if (config_prof) - gdump = false; - malloc_mutex_unlock(&chunks_mtx); - if (config_prof && opt_prof && opt_prof_gdump && gdump) - prof_gdump(); + if (config_stats || config_prof) { + bool gdump; + malloc_mutex_lock(&chunks_mtx); + if (config_stats) + stats_chunks.nchunks += (size / chunksize); + stats_chunks.curchunks += (size / chunksize); + if (stats_chunks.curchunks > stats_chunks.highchunks) { + stats_chunks.highchunks = + stats_chunks.curchunks; + if (config_prof) + gdump = true; + } else if (config_prof) + gdump = false; + malloc_mutex_unlock(&chunks_mtx); + if (config_prof && opt_prof && opt_prof_gdump && gdump) + prof_gdump(); + } + if (config_valgrind) + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } assert(CHUNK_ADDR2BASE(ret) == ret); return (ret); @@ -214,6 +217,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, extent_node_t *xnode, *node, *prev, key; unzeroed = pages_purge(chunk, size); + VALGRIND_MAKE_MEM_NOACCESS(chunk, size); /* * Allocate a node before acquiring chunks_mtx even though it might not diff --git a/src/chunk_dss.c b/src/chunk_dss.c index d1aea930..24781cc5 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -127,7 +127,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) if (*zero) { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } return (ret); } diff --git a/test/ALLOCM_ARENA.c b/test/ALLOCM_ARENA.c index 15856908..2c52485e 100644 --- a/test/ALLOCM_ARENA.c +++ b/test/ALLOCM_ARENA.c @@ -41,6 +41,7 @@ je_thread_start(void *arg) malloc_printf("Unexpected allocm() error\n"); abort(); } + dallocm(p, 0); return (NULL); } diff --git a/test/thread_arena.c b/test/thread_arena.c index 2ffdb5e8..c5a21fa0 100644 --- a/test/thread_arena.c +++ b/test/thread_arena.c @@ -17,6 +17,7 @@ je_thread_start(void *arg) malloc_printf("%s(): Error in malloc()\n", __func__); return (void *)1; } + free(p); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,