From 05a9e4ac651eb0c728e83fd883425c4894a2ae2b Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Tue, 7 Jun 2016 14:19:50 -0700 Subject: [PATCH] Fix potential VM map fragmentation regression. Revert 245ae6036c09cc11a72fab4335495d95cddd5beb (Support --with-lg-page values larger than actual page size.), because it could cause VM map fragmentation if the kernel grows mmap()ed memory downward. This resolves #391. --- include/jemalloc/internal/jemalloc_internal.h.in | 4 ++-- src/arena.c | 2 +- src/chunk_mmap.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 51bf8974..8f82edd4 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -754,7 +754,7 @@ sa2u(size_t size, size_t alignment) * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. */ - if (usize + large_pad + alignment <= arena_maxrun) + if (usize + large_pad + alignment - PAGE <= arena_maxrun) return (usize); } @@ -784,7 +784,7 @@ sa2u(size_t size, size_t alignment) * Calculate the multi-chunk mapping that huge_palloc() would need in * order to guarantee the alignment. */ - if (usize + alignment < usize) { + if (usize + alignment - PAGE < usize) { /* size_t overflow. */ return (0); } diff --git a/src/arena.c b/src/arena.c index c605bcd3..ce62590b 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2687,7 +2687,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, return (NULL); alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment; + alloc_size = usize + large_pad + alignment - PAGE; malloc_mutex_lock(tsdn, &arena->lock); run = arena_run_alloc_large(tsdn, arena, alloc_size, false); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index f95ae756..73fc497a 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) void *ret; size_t alloc_size; - alloc_size = size + alignment; + alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL);