Support --with-lg-page values larger than system page size.

All mappings continue to be PAGE-aligned, even if the system page size
is smaller.  This change is primarily intended to provide a mechanism
for supporting multiple page sizes with the same binary; smaller page
sizes work better in conjunction with jemalloc's design.

This resolves #467.
This commit is contained in:
Jason Evans 2017-04-16 16:23:32 -07:00
parent 45f087eb03
commit da4cff0279
8 changed files with 155 additions and 111 deletions

View File

@ -219,9 +219,12 @@ any of the following arguments (not a definitive list) to 'configure':
documentation.
--with-lg-page=<lg-page>
Specify the base 2 log of the system page size. This option is only useful
when cross compiling, since the configure script automatically determines
the host's page size by default.
Specify the base 2 log of the allocator page size, which must in turn be at
least as large as the system page size. By default the configure script
determines the host's page size and sets the allocator page size equal to
the system page size, so this option need not be specified unless the
system page size may change between configuration and execution, e.g. when
cross compiling.
--with-lg-page-sizes=<lg-page-sizes>
Specify the comma-separated base 2 logs of the page sizes to support. This

View File

@ -16,16 +16,14 @@ static const bool pages_can_purge_forced =
#endif
;
void *pages_map(void *addr, size_t size, bool *commit);
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge_lazy(void *addr, size_t size);
bool pages_purge_forced(void *addr, size_t size);
bool pages_huge(void *addr, size_t size);
bool pages_nohuge(void *addr, size_t size);
void pages_boot(void);
bool pages_boot(void);
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */

View File

@ -360,7 +360,6 @@ pages_map
pages_nohuge
pages_purge_forced
pages_purge_lazy
pages_trim
pages_unmap
percpu_arena_choose
percpu_arena_ind_limit

View File

@ -6,66 +6,14 @@
/******************************************************************************/
static void *
extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
bool *commit) {
void *ret;
size_t alloc_size;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size) {
return NULL;
}
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL) {
return NULL;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
return ret;
}
void *
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit) {
void *ret;
size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* pages_unmap().
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
assert(alignment != 0);
ret = pages_map(new_addr, size, commit);
if (ret == NULL || ret == new_addr) {
return ret;
void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment,
PAGE), commit);
if (ret == NULL) {
return NULL;
}
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return extent_alloc_mmap_slow(size, alignment, zero, commit);
}
assert(ret != NULL);
*zero = true;
return ret;

View File

@ -1220,7 +1220,9 @@ malloc_init_hard_a0_locked() {
}
}
}
pages_boot();
if (pages_boot()) {
return true;
}
if (base_boot(TSDN_NULL)) {
return true;
}

View File

@ -12,6 +12,9 @@
/******************************************************************************/
/* Data. */
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
static size_t os_page;
#ifndef _WIN32
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
# define PAGES_PROT_DECOMMIT (PROT_NONE)
@ -20,20 +23,26 @@ static int mmap_flags;
static bool os_overcommits;
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
void *
pages_map(void *addr, size_t size, bool *commit) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
static void os_pages_unmap(void *addr, size_t size);
void *ret;
/******************************************************************************/
static void *
os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
assert(ALIGNMENT_CEILING(size, os_page) == size);
assert(size != 0);
if (os_overcommits) {
*commit = true;
}
void *ret;
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
@ -59,19 +68,48 @@ pages_map(void *addr, size_t size, bool *commit) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
pages_unmap(ret, size);
os_pages_unmap(ret, size);
ret = NULL;
}
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
ret == addr));
return ret;
}
void
pages_unmap(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
static void *
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit) {
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
os_pages_unmap(addr, alloc_size);
void *new_addr = os_pages_map(ret, size, PAGE, commit);
if (new_addr == ret) {
return ret;
}
if (new_addr != NULL) {
os_pages_unmap(new_addr, size);
}
return NULL;
#else
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0) {
os_pages_unmap(addr, leadsize);
}
if (trailsize != 0) {
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
}
return ret;
#endif
}
static void
os_pages_unmap(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
assert(ALIGNMENT_CEILING(size, os_page) == size);
#ifdef _WIN32
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
@ -95,39 +133,69 @@ pages_unmap(void *addr, size_t size) {
}
}
void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
bool *commit) {
void *ret = (void *)((uintptr_t)addr + leadsize);
assert(alloc_size >= leadsize + size);
#ifdef _WIN32
{
void *new_addr;
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size, commit);
if (new_addr == ret) {
return ret;
}
if (new_addr) {
pages_unmap(new_addr, size);
}
static void *
pages_map_slow(size_t size, size_t alignment, bool *commit) {
size_t alloc_size = size + alignment - os_page;
/* Beware size_t wrap-around. */
if (alloc_size < size) {
return NULL;
}
#else
{
size_t trailsize = alloc_size - leadsize - size;
if (leadsize != 0) {
pages_unmap(addr, leadsize);
}
if (trailsize != 0) {
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
void *ret;
do {
void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
if (pages == NULL) {
return NULL;
}
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
- (uintptr_t)pages;
ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
assert(PAGE_ADDR2BASE(ret) == ret);
return ret;
}
void *
pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
assert(alignment >= PAGE);
assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
* excess. However, that always results in one or two calls to
* os_pages_unmap(), and it can leave holes in the process's virtual
* memory map if memory grows downward.
*
* Optimistically try mapping precisely the right amount before falling
* back to the slow method, with the expectation that the optimistic
* approach works most of the time.
*/
void *ret = os_pages_map(addr, size, os_page, commit);
if (ret == NULL || ret == addr) {
return ret;
}
#endif
assert(addr == NULL);
if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
os_pages_unmap(ret, size);
return pages_map_slow(size, alignment, commit);
}
assert(PAGE_ADDR2BASE(ret) == ret);
return ret;
}
void
pages_unmap(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
os_pages_unmap(addr, size);
}
static bool
@ -155,7 +223,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
* We succeeded in mapping memory, but not in the right
* place.
*/
pages_unmap(result, size);
os_pages_unmap(result, size);
return true;
}
return false;
@ -239,6 +307,21 @@ pages_nohuge(void *addr, size_t size) {
#endif
}
static size_t
os_page_detect(void) {
#ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
return si.dwPageSize;
#else
long result = sysconf(_SC_PAGESIZE);
if (result == -1) {
return LG_PAGE;
}
return (size_t)result;
#endif
}
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
static bool
os_overcommits_sysctl(void) {
@ -300,8 +383,17 @@ os_overcommits_proc(void) {
}
#endif
void
bool
pages_boot(void) {
os_page = os_page_detect();
if (os_page > PAGE) {
malloc_write("<jemalloc>: Unsupported system page size\n");
if (opt_abort) {
abort();
}
return true;
}
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@ -318,4 +410,6 @@ pages_boot(void) {
#else
os_overcommits = false;
#endif
return false;
}

View File

@ -6,7 +6,7 @@
#if LG_PAGE <= 14
#define SZ (ZU(1) << (LG_PAGE - 2))
#else
#define SZ 4096
#define SZ ZU(4096)
#endif
/*

View File

@ -7,7 +7,7 @@ TEST_BEGIN(test_pages_huge) {
alloc_size = HUGEPAGE * 2 - PAGE;
commit = true;
pages = pages_map(NULL, alloc_size, &commit);
pages = pages_map(NULL, alloc_size, PAGE, &commit);
assert_ptr_not_null(pages, "Unexpected pages_map() error");
hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));