2010-01-24 18:53:40 +08:00
|
|
|
#define JEMALLOC_CHUNK_MMAP_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2012-04-19 00:29:43 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
static void *
|
2015-08-05 01:49:46 +08:00
|
|
|
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
2012-04-12 09:13:45 +08:00
|
|
|
void *ret, *pages;
|
2012-04-22 12:27:46 +08:00
|
|
|
size_t alloc_size, leadsize;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
alloc_size = size + alignment - PAGE;
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2012-04-12 09:13:45 +08:00
|
|
|
if (alloc_size < size)
|
2010-01-24 18:53:40 +08:00
|
|
|
return (NULL);
|
2012-04-22 12:27:46 +08:00
|
|
|
do {
|
|
|
|
pages = pages_map(NULL, alloc_size);
|
|
|
|
if (pages == NULL)
|
|
|
|
return (NULL);
|
|
|
|
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
|
|
|
(uintptr_t)pages;
|
|
|
|
ret = pages_trim(pages, alloc_size, leadsize, size);
|
|
|
|
} while (ret == NULL);
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2015-08-05 01:49:46 +08:00
|
|
|
*commit = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
void *
|
2015-08-05 01:49:46 +08:00
|
|
|
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2012-04-22 10:17:21 +08:00
|
|
|
size_t offset;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
|
|
|
* NetBSD has), but in the absence of such a feature, we have to work
|
|
|
|
* hard to efficiently create aligned mappings. The reliable, but
|
|
|
|
* slow method is to create a mapping that is over-sized, then trim the
|
2012-05-10 04:05:04 +08:00
|
|
|
* excess. However, that always results in one or two calls to
|
2010-01-24 18:53:40 +08:00
|
|
|
* pages_unmap().
|
|
|
|
*
|
2012-05-10 04:05:04 +08:00
|
|
|
* Optimistically try mapping precisely the right amount before falling
|
|
|
|
* back to the slow method, with the expectation that the optimistic
|
|
|
|
* approach works most of the time.
|
2010-01-24 18:53:40 +08:00
|
|
|
*/
|
|
|
|
|
2012-05-10 04:05:04 +08:00
|
|
|
assert(alignment != 0);
|
|
|
|
assert((alignment & chunksize_mask) == 0);
|
|
|
|
|
2012-04-22 10:17:21 +08:00
|
|
|
ret = pages_map(NULL, size);
|
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
|
|
|
if (offset != 0) {
|
2012-05-10 04:05:04 +08:00
|
|
|
pages_unmap(ret, size);
|
2015-08-05 01:49:46 +08:00
|
|
|
return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
|
2012-04-22 10:17:21 +08:00
|
|
|
}
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2015-08-05 01:49:46 +08:00
|
|
|
*commit = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
bool
|
2014-05-16 13:22:27 +08:00
|
|
|
chunk_dalloc_mmap(void *chunk, size_t size)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
if (config_munmap)
|
|
|
|
pages_unmap(chunk, size);
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
return (!config_munmap);
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|