Fix a memory corruption bug in chunk_alloc_dss().

Fix a memory corruption bug in chunk_alloc_dss() that was due to
claiming newly allocated memory is zeroed.

Reverse order of preference between mmap() and sbrk() to prefer mmap().

Clean up management of 'zero' parameter in chunk_alloc*().
This commit is contained in:
Jason Evans 2012-04-21 13:33:48 -07:00
parent 606f1fdc3c
commit 8f0e0eb1c0
6 changed files with 20 additions and 15 deletions

View File

@ -70,6 +70,8 @@ found in the git revision history:
invalid statistics and crashes.
- Work around TLS dallocation via free() on Linux. This bug could cause
write-after-free memory corruption.
- Fix chunk_alloc_dss() to stop claiming memory is zeroed. This bug could
cause memory corruption and crashes with --enable-dss specified.
- Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
- Fix realloc(p, 0) to act like free(p).
- Do not enforce minimum alignment in memalign().

View File

@ -444,9 +444,9 @@ for (i = 0; i < nbins; i++) {
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
<option>--enable-dss</option> is specified during configuration, this
allocator uses both <citerefentry><refentrytitle>sbrk</refentrytitle>
allocator uses both <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> and
<citerefentry><refentrytitle>mmap</refentrytitle>
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>, in that order of preference;
otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> is used.</para>

View File

@ -11,7 +11,7 @@
void pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
bool chunk_mmap_boot(void);

View File

@ -125,16 +125,16 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
ret = chunk_recycle(size, alignment, zero);
if (ret != NULL)
goto label_return;
ret = chunk_alloc_mmap(size, alignment, zero);
if (ret != NULL)
goto label_return;
if (config_dss) {
ret = chunk_alloc_dss(size, alignment, zero);
if (ret != NULL)
goto label_return;
}
ret = chunk_alloc_mmap(size, alignment);
if (ret != NULL) {
*zero = true;
goto label_return;
}
/* All strategies for allocation failed. */
ret = NULL;

View File

@ -89,7 +89,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
chunk_dealloc(cpad, cpad_size, true);
*zero = true;
return (ret);
}
} while (dss_prev != (void *)-1);

View File

@ -18,7 +18,7 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
bool unaligned);
bool unaligned, bool *zero);
/******************************************************************************/
@ -87,7 +87,7 @@ pages_purge(void *addr, size_t length)
}
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize, trailsize;
@ -122,11 +122,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
mmap_unaligned_tsd_set(&mu);
}
assert(ret != NULL);
*zero = true;
return (ret);
}
void *
chunk_alloc_mmap(size_t size, size_t alignment)
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
@ -177,8 +179,8 @@ chunk_alloc_mmap(size_t size, size_t alignment)
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, alignment,
true);
return (chunk_alloc_mmap_slow(size, alignment,
true, zero));
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
@ -187,8 +189,10 @@ chunk_alloc_mmap(size_t size, size_t alignment)
}
}
} else
ret = chunk_alloc_mmap_slow(size, alignment, false);
return (chunk_alloc_mmap_slow(size, alignment, false, zero));
assert(ret != NULL);
*zero = true;
return (ret);
}