Support --with-lg-page values larger than actual page size.

During over-allocation in preparation for creating aligned mappings,
allocate one more page than necessary if PAGE is the actual page size,
so that trimming still succeeds even if the system returns a mapping
that has less than PAGE alignment.  This allows compiling with e.g. 64
KiB "pages" on systems that actually use 4 KiB pages.

Note that for e.g. --with-lg-page=21, it is also necessary to increase
the chunk size (e.g. --with-malloc-conf=lg_chunk:22) so that there are
at least two "pages" per chunk.  In practice this isn't a particularly
compelling configuration because so much (unusable) virtual memory is
dedicated to chunk headers.
This commit is contained in:
Jason Evans 2016-04-06 11:54:44 -07:00
parent 96aa67aca8
commit 245ae6036c
4 changed files with 6 additions and 6 deletions

View File

@ -17,8 +17,8 @@ typedef unsigned long bitmap_t;
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
* force linear search, if we would have to call ffsl more than 2^3 times, use a
* tree instead.
* force linear search, if we would have to call ffs_lu() more than 2^3 times,
* use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE

View File

@ -741,7 +741,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
if (usize + large_pad + alignment - PAGE <= arena_maxrun)
if (usize + large_pad + alignment <= arena_maxrun)
return (usize);
}
@ -771,7 +771,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
if (usize + alignment - PAGE < usize) {
if (usize + alignment < usize) {
/* size_t overflow. */
return (0);
}

View File

@ -2500,7 +2500,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE;
alloc_size = usize + large_pad + alignment;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);

View File

@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
alloc_size = size + alignment - PAGE;
alloc_size = size + alignment;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);