diff --git a/INSTALL b/INSTALL index 042f8291..705f0ff5 100644 --- a/INSTALL +++ b/INSTALL @@ -219,9 +219,12 @@ any of the following arguments (not a definitive list) to 'configure': documentation. --with-lg-page= - Specify the base 2 log of the system page size. This option is only useful - when cross compiling, since the configure script automatically determines - the host's page size by default. + Specify the base 2 log of the allocator page size, which must in turn be at + least as large as the system page size. By default the configure script + determines the host's page size and sets the allocator page size equal to + the system page size, so this option need not be specified unless the + system page size may change between configuration and execution, e.g. when + cross compiling. --with-lg-page-sizes= Specify the comma-separated base 2 logs of the page sizes to support. This diff --git a/include/jemalloc/internal/pages_externs.h b/include/jemalloc/internal/pages_externs.h index 7e34efb3..af9a01b8 100644 --- a/include/jemalloc/internal/pages_externs.h +++ b/include/jemalloc/internal/pages_externs.h @@ -16,16 +16,14 @@ static const bool pages_can_purge_forced = #endif ; -void *pages_map(void *addr, size_t size, bool *commit); +void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); void pages_unmap(void *addr, size_t size); -void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, - size_t size, bool *commit); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge_lazy(void *addr, size_t size); bool pages_purge_forced(void *addr, size_t size); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); -void pages_boot(void); +bool pages_boot(void); #endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index 34c27897..649a689f 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -360,7 +360,6 @@ pages_map pages_nohuge pages_purge_forced pages_purge_lazy -pages_trim pages_unmap percpu_arena_choose percpu_arena_ind_limit diff --git a/src/extent_mmap.c b/src/extent_mmap.c index 9381dc16..b1862753 100644 --- a/src/extent_mmap.c +++ b/src/extent_mmap.c @@ -6,66 +6,14 @@ /******************************************************************************/ -static void * -extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, - bool *commit) { - void *ret; - size_t alloc_size; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) { - return NULL; - } - do { - void *pages; - size_t leadsize; - pages = pages_map(NULL, alloc_size, commit); - if (pages == NULL) { - return NULL; - } - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size, commit); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - return ret; -} - void * extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - - ret = pages_map(new_addr, size, commit); - if (ret == NULL || ret == new_addr) { - return ret; + void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, + PAGE), commit); + if (ret == NULL) { + return NULL; } - assert(new_addr == NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return extent_alloc_mmap_slow(size, alignment, zero, commit); - } - assert(ret != NULL); *zero = true; return ret; diff --git a/src/jemalloc.c b/src/jemalloc.c index 0297cf56..ea632c2e 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1220,7 +1220,9 @@ malloc_init_hard_a0_locked() { } } } - pages_boot(); + if (pages_boot()) { + return true; + } if (base_boot(TSDN_NULL)) { return true; } diff --git a/src/pages.c b/src/pages.c index 7fa254f7..46c307b8 100644 --- a/src/pages.c +++ b/src/pages.c @@ -12,6 +12,9 @@ /******************************************************************************/ /* Data. */ +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + #ifndef _WIN32 # define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) # define PAGES_PROT_DECOMMIT (PROT_NONE) @@ -20,20 +23,26 @@ static int mmap_flags; static bool os_overcommits; /******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -void * -pages_map(void *addr, size_t size, bool *commit) { - assert(PAGE_ADDR2BASE(addr) == addr); - assert(PAGE_CEILING(size) == size); +static void os_pages_unmap(void *addr, size_t size); - void *ret; +/******************************************************************************/ +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); assert(size != 0); if (os_overcommits) { *commit = true; } + void *ret; #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is @@ -59,19 +68,48 @@ pages_map(void *addr, size_t size, bool *commit) { /* * We succeeded in mapping memory, but not in the right place. */ - pages_unmap(ret, size); + os_pages_unmap(ret, size); ret = NULL; } #endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); return ret; } -void -pages_unmap(void *addr, size_t size) { - assert(PAGE_ADDR2BASE(addr) == addr); - assert(PAGE_CEILING(size) == size); +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) @@ -84,50 +122,80 @@ pages_unmap(void *addr, size_t size) { buferror(get_errno(), buf, sizeof(buf)); malloc_printf(": Error in " #ifdef _WIN32 - "VirtualFree" + "VirtualFree" #else - "munmap" + "munmap" #endif - "(): %s\n", buf); + "(): %s\n", buf); if (opt_abort) { abort(); } } } -void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, - bool *commit) { - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size, commit); - if (new_addr == ret) { - return ret; - } - if (new_addr) { - pages_unmap(new_addr, size); - } +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { return NULL; } -#else - { - size_t trailsize = alloc_size - leadsize - size; - if (leadsize != 0) { - pages_unmap(addr, leadsize); - } - if (trailsize != 0) { - pages_unmap((void *)((uintptr_t)ret + size), trailsize); + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void * +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { return ret; } -#endif + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); + } + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); } static bool @@ -155,7 +223,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) { * We succeeded in mapping memory, but not in the right * place. */ - pages_unmap(result, size); + os_pages_unmap(result, size); return true; } return false; @@ -239,6 +307,21 @@ pages_nohuge(void *addr, size_t size) { #endif } +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; +#endif +} + #ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT static bool os_overcommits_sysctl(void) { @@ -300,8 +383,17 @@ os_overcommits_proc(void) { } #endif -void +bool pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write(": Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } + #ifndef _WIN32 mmap_flags = MAP_PRIVATE | MAP_ANON; #endif @@ -318,4 +410,6 @@ pages_boot(void) { #else os_overcommits = false; #endif + + return false; } diff --git a/test/unit/pack.c b/test/unit/pack.c index 5da4ae12..edfc548f 100644 --- a/test/unit/pack.c +++ b/test/unit/pack.c @@ -6,7 +6,7 @@ #if LG_PAGE <= 14 #define SZ (ZU(1) << (LG_PAGE - 2)) #else -#define SZ 4096 +#define SZ ZU(4096) #endif /* diff --git a/test/unit/pages.c b/test/unit/pages.c index 30d69592..4457f369 100644 --- a/test/unit/pages.c +++ b/test/unit/pages.c @@ -7,7 +7,7 @@ TEST_BEGIN(test_pages_huge) { alloc_size = HUGEPAGE * 2 - PAGE; commit = true; - pages = pages_map(NULL, alloc_size, &commit); + pages = pages_map(NULL, alloc_size, PAGE, &commit); assert_ptr_not_null(pages, "Unexpected pages_map() error"); hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));