From 8f0e0eb1c01d5d934586ea62e519ca8b8637aebc Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Sat, 21 Apr 2012 13:33:48 -0700 Subject: [PATCH] Fix a memory corruption bug in chunk_alloc_dss(). Fix a memory corruption bug in chunk_alloc_dss() that was due to claiming newly allocated memory is zeroed. Reverse order of preference between mmap() and sbrk() to prefer mmap(). Clean up management of 'zero' parameter in chunk_alloc*(). --- ChangeLog | 2 ++ doc/jemalloc.xml.in | 4 ++-- include/jemalloc/internal/chunk_mmap.h | 2 +- src/chunk.c | 10 +++++----- src/chunk_dss.c | 1 - src/chunk_mmap.c | 16 ++++++++++------ 6 files changed, 20 insertions(+), 15 deletions(-) diff --git a/ChangeLog b/ChangeLog index 6d5670f5..b71fa165 100644 --- a/ChangeLog +++ b/ChangeLog @@ -70,6 +70,8 @@ found in the git revision history: invalid statistics and crashes. - Work around TLS dallocation via free() on Linux. This bug could cause write-after-free memory corruption. + - Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could + cause memory corruption and crashes with --enable-dss specified. - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter. - Fix realloc(p, 0) to act like free(p). - Do not enforce minimum alignment in memalign(). diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index f78f423c..e8a57225 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -444,9 +444,9 @@ for (i = 0; i < nbins; i++) { suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory. If is specified during configuration, this - allocator uses both sbrk + allocator uses both mmap 2 and - mmap + sbrk 2, in that order of preference; otherwise only mmap 2 is used. diff --git a/include/jemalloc/internal/chunk_mmap.h b/include/jemalloc/internal/chunk_mmap.h index 2d01ac22..8224430a 100644 --- a/include/jemalloc/internal/chunk_mmap.h +++ b/include/jemalloc/internal/chunk_mmap.h @@ -11,7 +11,7 @@ void pages_purge(void *addr, size_t length); -void *chunk_alloc_mmap(size_t size, size_t alignment); +void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); bool chunk_dealloc_mmap(void *chunk, size_t size); bool chunk_mmap_boot(void); diff --git a/src/chunk.c b/src/chunk.c index bcaedea4..31485058 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -125,16 +125,16 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) ret = chunk_recycle(size, alignment, zero); if (ret != NULL) goto label_return; + + ret = chunk_alloc_mmap(size, alignment, zero); + if (ret != NULL) + goto label_return; + if (config_dss) { ret = chunk_alloc_dss(size, alignment, zero); if (ret != NULL) goto label_return; } - ret = chunk_alloc_mmap(size, alignment); - if (ret != NULL) { - *zero = true; - goto label_return; - } /* All strategies for allocation failed. */ ret = NULL; diff --git a/src/chunk_dss.c b/src/chunk_dss.c index b05509a5..bd4a724b 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -89,7 +89,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) chunk_dealloc(cpad, cpad_size, true); - *zero = true; return (ret); } } while (dss_prev != (void *)-1); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 9dea8318..126406ae 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -18,7 +18,7 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false, static void *pages_map(void *addr, size_t size); static void pages_unmap(void *addr, size_t size); static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool unaligned); + bool unaligned, bool *zero); /******************************************************************************/ @@ -87,7 +87,7 @@ pages_purge(void *addr, size_t length) } static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned) +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero) { void *ret, *pages; size_t alloc_size, leadsize, trailsize; @@ -122,11 +122,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned) mmap_unaligned_tsd_set(&mu); } + assert(ret != NULL); + *zero = true; return (ret); } void * -chunk_alloc_mmap(size_t size, size_t alignment) +chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) { void *ret; @@ -177,8 +179,8 @@ chunk_alloc_mmap(size_t size, size_t alignment) * the reliable-but-expensive method. */ pages_unmap(ret, size); - ret = chunk_alloc_mmap_slow(size, alignment, - true); + return (chunk_alloc_mmap_slow(size, alignment, + true, zero)); } else { /* Clean up unneeded leading space. */ pages_unmap(ret, chunksize - offset); @@ -187,8 +189,10 @@ chunk_alloc_mmap(size_t size, size_t alignment) } } } else - ret = chunk_alloc_mmap_slow(size, alignment, false); + return (chunk_alloc_mmap_slow(size, alignment, false, zero)); + assert(ret != NULL); + *zero = true; return (ret); }