2016-06-02 03:59:02 +08:00
|
|
|
#define JEMALLOC_EXTENT_MMAP_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2012-04-19 00:29:43 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
static void *
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
|
|
|
|
bool *commit) {
|
2015-09-08 20:09:20 +08:00
|
|
|
void *ret;
|
|
|
|
size_t alloc_size;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2016-06-08 05:15:49 +08:00
|
|
|
alloc_size = size + alignment - PAGE;
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2017-01-16 08:56:30 +08:00
|
|
|
if (alloc_size < size) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
do {
|
2015-09-08 20:09:20 +08:00
|
|
|
void *pages;
|
|
|
|
size_t leadsize;
|
2016-05-06 08:45:02 +08:00
|
|
|
pages = pages_map(NULL, alloc_size, commit);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (pages == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
|
|
|
(uintptr_t)pages;
|
2016-05-06 08:45:02 +08:00
|
|
|
ret = pages_trim(pages, alloc_size, leadsize, size, commit);
|
2012-04-22 12:27:46 +08:00
|
|
|
} while (ret == NULL);
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
void *
|
2016-06-02 03:59:02 +08:00
|
|
|
extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool *commit) {
|
2010-01-24 18:53:40 +08:00
|
|
|
void *ret;
|
2012-04-22 10:17:21 +08:00
|
|
|
size_t offset;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
|
|
|
* NetBSD has), but in the absence of such a feature, we have to work
|
|
|
|
* hard to efficiently create aligned mappings. The reliable, but
|
|
|
|
* slow method is to create a mapping that is over-sized, then trim the
|
2012-05-10 04:05:04 +08:00
|
|
|
* excess. However, that always results in one or two calls to
|
2010-01-24 18:53:40 +08:00
|
|
|
* pages_unmap().
|
|
|
|
*
|
2012-05-10 04:05:04 +08:00
|
|
|
* Optimistically try mapping precisely the right amount before falling
|
|
|
|
* back to the slow method, with the expectation that the optimistic
|
|
|
|
* approach works most of the time.
|
2010-01-24 18:53:40 +08:00
|
|
|
*/
|
|
|
|
|
2012-05-10 04:05:04 +08:00
|
|
|
assert(alignment != 0);
|
|
|
|
|
2016-05-06 08:45:02 +08:00
|
|
|
ret = pages_map(new_addr, size, commit);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret == NULL || ret == new_addr) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-02-25 09:18:44 +08:00
|
|
|
assert(new_addr == NULL);
|
2012-04-22 10:17:21 +08:00
|
|
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
|
|
|
if (offset != 0) {
|
2012-05-10 04:05:04 +08:00
|
|
|
pages_unmap(ret, size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return extent_alloc_mmap_slow(size, alignment, zero, commit);
|
2012-04-22 10:17:21 +08:00
|
|
|
}
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
extent_dalloc_mmap(void *addr, size_t size) {
|
|
|
|
if (config_munmap) {
|
2016-06-02 03:59:02 +08:00
|
|
|
pages_unmap(addr, size);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return !config_munmap;
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|