Fix potential VM map fragmentation regression.

Revert 245ae6036c (Support --with-lg-page
values larger than actual page size.), because it could cause VM map
fragmentation if the kernel grows mmap()ed memory downward.

This resolves #391.
This commit is contained in:
Jason Evans 2016-06-07 14:19:50 -07:00
parent 48384dc2d8
commit 05a9e4ac65
3 changed files with 4 additions and 4 deletions

View File

@ -754,7 +754,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize + large_pad + alignment <= arena_maxrun)
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
return (usize);
}
@ -784,7 +784,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment < usize) {
if (usize + alignment - PAGE < usize) {
/* size_t overflow. */
return (0);
}

View File

@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment;
alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(tsdn, &arena->lock);
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);

View File

@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
alloc_size = size + alignment;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);