diff --git a/include/jemalloc/internal/bitmap.h b/include/jemalloc/internal/bitmap.h index 894695f4..36f38b59 100644 --- a/include/jemalloc/internal/bitmap.h +++ b/include/jemalloc/internal/bitmap.h @@ -17,8 +17,8 @@ typedef unsigned long bitmap_t; /* * Do some analysis on how big the bitmap is before we use a tree. For a brute - * force linear search, if we would have to call ffsl more than 2^3 times, use a - * tree instead. + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define USE_TREE diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 55ca7140..0b57b82a 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -741,7 +741,7 @@ sa2u(size_t size, size_t alignment) * Calculate the size of the over-size run that arena_palloc() * would need to allocate in order to guarantee the alignment. */ - if (usize + large_pad + alignment - PAGE <= arena_maxrun) + if (usize + large_pad + alignment <= arena_maxrun) return (usize); } @@ -771,7 +771,7 @@ sa2u(size_t size, size_t alignment) * Calculate the multi-chunk mapping that huge_palloc() would need in * order to guarantee the alignment. */ - if (usize + alignment - PAGE < usize) { + if (usize + alignment < usize) { /* size_t overflow. */ return (0); } diff --git a/src/arena.c b/src/arena.c index d884dc4c..3373e1d8 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2500,7 +2500,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, return (NULL); alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; + alloc_size = usize + large_pad + alignment; malloc_mutex_lock(&arena->lock); run = arena_run_alloc_large(arena, alloc_size, false); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 56b2ee42..e2e66bc9 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) void *ret; size_t alloc_size; - alloc_size = size + alignment - PAGE; + alloc_size = size + alignment; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL);