Fix a memory corruption bug in chunk_alloc_dss().
Fix a memory corruption bug in chunk_alloc_dss() that was due to claiming newly allocated memory is zeroed. Reverse order of preference between mmap() and sbrk() to prefer mmap(). Clean up management of 'zero' parameter in chunk_alloc*().
This commit is contained in:
10
src/chunk.c
10
src/chunk.c
@@ -125,16 +125,16 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
|
||||
ret = chunk_recycle(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
|
||||
ret = chunk_alloc_mmap(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
|
||||
if (config_dss) {
|
||||
ret = chunk_alloc_dss(size, alignment, zero);
|
||||
if (ret != NULL)
|
||||
goto label_return;
|
||||
}
|
||||
ret = chunk_alloc_mmap(size, alignment);
|
||||
if (ret != NULL) {
|
||||
*zero = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
|
@@ -89,7 +89,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_dealloc(cpad, cpad_size, true);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
} while (dss_prev != (void *)-1);
|
||||
|
@@ -18,7 +18,7 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool unaligned);
|
||||
bool unaligned, bool *zero);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@@ -87,7 +87,7 @@ pages_purge(void *addr, size_t length)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
|
||||
{
|
||||
void *ret, *pages;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
@@ -122,11 +122,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
|
||||
mmap_unaligned_tsd_set(&mu);
|
||||
}
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@@ -177,8 +179,8 @@ chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
* the reliable-but-expensive method.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = chunk_alloc_mmap_slow(size, alignment,
|
||||
true);
|
||||
return (chunk_alloc_mmap_slow(size, alignment,
|
||||
true, zero));
|
||||
} else {
|
||||
/* Clean up unneeded leading space. */
|
||||
pages_unmap(ret, chunksize - offset);
|
||||
@@ -187,8 +189,10 @@ chunk_alloc_mmap(size_t size, size_t alignment)
|
||||
}
|
||||
}
|
||||
} else
|
||||
ret = chunk_alloc_mmap_slow(size, alignment, false);
|
||||
return (chunk_alloc_mmap_slow(size, alignment, false, zero));
|
||||
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user