From 7ad54c1c30e0805e0758690115875f982de46cf2 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Sat, 21 Apr 2012 16:04:51 -0700 Subject: [PATCH] Fix chunk allocation/deallocation bugs. Fix chunk_alloc_dss() to zero memory when requested. Fix chunk_dealloc() to avoid chunk_dealloc_mmap() for dss-allocated memory. Fix huge_palloc() to always junk fill when requested. Improve chunk_recycle() to report that memory is zeroed as a side effect of pages_purge(). --- include/jemalloc/internal/tsd.h | 2 +- src/chunk.c | 17 +++++++++++++---- src/chunk_dss.c | 4 ++++ src/huge.c | 10 ++++++++-- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index 5e904cbb..20491c88 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -111,7 +111,7 @@ a_name##_tsd_cleanup_wrapper(void) \ \ if (a_name##_initialized) { \ a_name##_initialized = false; \ - a_cleanup(&a_name##_tls); \ + a_cleanup(&a_name##_tls); \ } \ return (a_name##_initialized); \ } \ diff --git a/src/chunk.c b/src/chunk.c index 31485058..0fccd0ce 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -98,7 +98,10 @@ chunk_recycle(size_t size, size_t alignment, bool *zero) if (node != NULL) base_node_dealloc(node); -#ifdef JEMALLOC_PURGE_MADVISE_FREE +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED + /* Pages are zeroed as a side effect of pages_purge(). */ + *zero = true; +#else if (*zero) { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); @@ -161,7 +164,13 @@ label_return: if (config_prof && opt_prof && opt_prof_gdump && gdump) prof_gdump(); } + if (config_debug && *zero && ret != NULL) { + size_t i; + size_t *p = (size_t *)(uintptr_t)ret; + for (i = 0; i < size / sizeof(size_t); i++) + assert(p[i] == 0); + } assert(CHUNK_ADDR2BASE(ret) == ret); return (ret); } @@ -258,9 +267,9 @@ chunk_dealloc(void *chunk, size_t size, bool unmap) } if (unmap) { - if (chunk_dealloc_mmap(chunk, size) == false) - return; - chunk_record(chunk, size); + if ((config_dss && chunk_in_dss(chunk)) || + chunk_dealloc_mmap(chunk, size)) + chunk_record(chunk, size); } } diff --git a/src/chunk_dss.c b/src/chunk_dss.c index bd4a724b..2d68e480 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -89,6 +89,10 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) chunk_dealloc(cpad, cpad_size, true); + if (*zero) { + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } return (ret); } } while (dss_prev != (void *)-1); diff --git a/src/huge.c b/src/huge.c index daf0c622..23eb074a 100644 --- a/src/huge.c +++ b/src/huge.c @@ -28,6 +28,7 @@ huge_palloc(size_t size, size_t alignment, bool zero) void *ret; size_t csize; extent_node_t *node; + bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ @@ -42,7 +43,12 @@ huge_palloc(size_t size, size_t alignment, bool zero) if (node == NULL) return (NULL); - ret = chunk_alloc(csize, alignment, false, &zero); + /* + * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that + * it is possible to make correct junk/zero fill decisions below. + */ + is_zeroed = zero; + ret = chunk_alloc(csize, alignment, false, &is_zeroed); if (ret == NULL) { base_node_dealloc(node); return (NULL); @@ -64,7 +70,7 @@ huge_palloc(size_t size, size_t alignment, bool zero) if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); - else if (opt_zero) + else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); }