2010-01-24 18:53:40 +08:00
|
|
|
#define JEMALLOC_CHUNK_MMAP_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
|
2010-09-06 01:35:13 +08:00
|
|
|
* potentially avoid some system calls.
|
2010-01-24 18:53:40 +08:00
|
|
|
*/
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_tsd_data(static, mmap_unaligned, bool, false)
|
|
|
|
malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
|
|
|
|
malloc_tsd_no_cleanup)
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
|
|
|
|
2012-04-11 00:19:45 +08:00
|
|
|
static void *pages_map(void *addr, size_t size);
|
2010-01-24 18:53:40 +08:00
|
|
|
static void pages_unmap(void *addr, size_t size);
|
2012-04-11 01:50:33 +08:00
|
|
|
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
2012-04-22 04:33:48 +08:00
|
|
|
bool unaligned, bool *zero);
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
static void *
|
2012-04-11 00:19:45 +08:00
|
|
|
pages_map(void *addr, size_t size)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
|
|
|
* of existing mappings, and we only want to create new mappings.
|
|
|
|
*/
|
2012-04-11 00:19:45 +08:00
|
|
|
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
|
|
|
-1, 0);
|
2010-01-24 18:53:40 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
|
|
|
|
if (ret == MAP_FAILED)
|
|
|
|
ret = NULL;
|
|
|
|
else if (addr != NULL && ret != addr) {
|
|
|
|
/*
|
|
|
|
* We succeeded in mapping memory, but not in the right place.
|
|
|
|
*/
|
|
|
|
if (munmap(ret, size) == -1) {
|
2010-09-21 07:05:41 +08:00
|
|
|
char buf[BUFERROR_BUF];
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2010-09-21 07:05:41 +08:00
|
|
|
buferror(errno, buf, sizeof(buf));
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc: Error in munmap(): %s\n",
|
|
|
|
buf);
|
2010-01-24 18:53:40 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ret == NULL || (addr == NULL && ret != addr)
|
|
|
|
|| (addr != NULL && ret == addr));
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pages_unmap(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (munmap(addr, size) == -1) {
|
2010-09-21 07:05:41 +08:00
|
|
|
char buf[BUFERROR_BUF];
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2010-09-21 07:05:41 +08:00
|
|
|
buferror(errno, buf, sizeof(buf));
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
|
2010-01-24 18:53:40 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-19 00:29:43 +08:00
|
|
|
void
|
|
|
|
pages_purge(void *addr, size_t length)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
|
|
|
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
|
|
|
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
|
|
|
# define JEMALLOC_MADV_PURGE MADV_FREE
|
|
|
|
#else
|
|
|
|
# error "No method defined for purging unused dirty pages."
|
|
|
|
#endif
|
|
|
|
madvise(addr, length, JEMALLOC_MADV_PURGE);
|
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
static void *
|
2012-04-22 04:33:48 +08:00
|
|
|
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
2012-04-12 09:13:45 +08:00
|
|
|
void *ret, *pages;
|
|
|
|
size_t alloc_size, leadsize, trailsize;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
alloc_size = size + alignment - PAGE;
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
2012-04-12 09:13:45 +08:00
|
|
|
if (alloc_size < size)
|
2010-01-24 18:53:40 +08:00
|
|
|
return (NULL);
|
2012-04-12 09:13:45 +08:00
|
|
|
pages = pages_map(NULL, alloc_size);
|
|
|
|
if (pages == NULL)
|
2010-01-24 18:53:40 +08:00
|
|
|
return (NULL);
|
2012-04-12 09:13:45 +08:00
|
|
|
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
|
|
|
|
(uintptr_t)pages;
|
|
|
|
assert(alloc_size >= leadsize + size);
|
|
|
|
trailsize = alloc_size - leadsize - size;
|
|
|
|
ret = (void *)((uintptr_t)pages + leadsize);
|
|
|
|
if (leadsize != 0) {
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Note that mmap() returned an unaligned mapping. */
|
|
|
|
unaligned = true;
|
2012-04-12 09:13:45 +08:00
|
|
|
pages_unmap(pages, leadsize);
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
2012-04-12 09:13:45 +08:00
|
|
|
if (trailsize != 0)
|
|
|
|
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If mmap() returned an aligned mapping, reset mmap_unaligned so that
|
|
|
|
* the next chunk_alloc_mmap() execution tries the fast allocation
|
|
|
|
* method.
|
|
|
|
*/
|
2012-03-22 09:33:03 +08:00
|
|
|
if (unaligned == false && mmap_unaligned_booted) {
|
|
|
|
bool mu = false;
|
|
|
|
mmap_unaligned_tsd_set(&mu);
|
|
|
|
}
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
void *
|
2012-04-22 04:33:48 +08:00
|
|
|
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
|
|
|
* NetBSD has), but in the absence of such a feature, we have to work
|
|
|
|
* hard to efficiently create aligned mappings. The reliable, but
|
|
|
|
* slow method is to create a mapping that is over-sized, then trim the
|
|
|
|
* excess. However, that always results in at least one call to
|
|
|
|
* pages_unmap().
|
|
|
|
*
|
|
|
|
* A more optimistic approach is to try mapping precisely the right
|
|
|
|
* amount, then try to append another mapping if alignment is off. In
|
|
|
|
* practice, this works out well as long as the application is not
|
|
|
|
* interleaving mappings via direct mmap() calls. If we do run into a
|
|
|
|
* situation where there is an interleaved mapping and we are unable to
|
|
|
|
* extend an unaligned mapping, our best option is to switch to the
|
|
|
|
* slow method until mmap() returns another aligned mapping. This will
|
|
|
|
* tend to leave a gap in the memory map that is too small to cause
|
|
|
|
* later problems for the optimistic method.
|
|
|
|
*
|
|
|
|
* Another possible confounding factor is address space layout
|
|
|
|
* randomization (ASLR), which causes mmap(2) to disregard the
|
|
|
|
* requested address. mmap_unaligned tracks whether the previous
|
|
|
|
* chunk_alloc_mmap() execution received any unaligned or relocated
|
|
|
|
* mappings, and if so, the current execution will immediately fall
|
|
|
|
* back to the slow method. However, we keep track of whether the fast
|
|
|
|
* method would have succeeded, and if so, we make a note to try the
|
|
|
|
* fast method next time.
|
|
|
|
*/
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) {
|
2010-01-24 18:53:40 +08:00
|
|
|
size_t offset;
|
|
|
|
|
2012-04-11 00:19:45 +08:00
|
|
|
ret = pages_map(NULL, size);
|
2010-01-24 18:53:40 +08:00
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
|
2010-01-24 18:53:40 +08:00
|
|
|
if (offset != 0) {
|
2012-03-22 09:33:03 +08:00
|
|
|
bool mu = true;
|
|
|
|
mmap_unaligned_tsd_set(&mu);
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Try to extend chunk boundary. */
|
|
|
|
if (pages_map((void *)((uintptr_t)ret + size),
|
2012-04-11 00:19:45 +08:00
|
|
|
chunksize - offset) == NULL) {
|
2010-01-24 18:53:40 +08:00
|
|
|
/*
|
|
|
|
* Extension failed. Clean up, then revert to
|
|
|
|
* the reliable-but-expensive method.
|
|
|
|
*/
|
|
|
|
pages_unmap(ret, size);
|
2012-04-22 04:33:48 +08:00
|
|
|
return (chunk_alloc_mmap_slow(size, alignment,
|
|
|
|
true, zero));
|
2010-01-24 18:53:40 +08:00
|
|
|
} else {
|
|
|
|
/* Clean up unneeded leading space. */
|
|
|
|
pages_unmap(ret, chunksize - offset);
|
|
|
|
ret = (void *)((uintptr_t)ret + (chunksize -
|
|
|
|
offset));
|
|
|
|
}
|
|
|
|
}
|
2010-01-28 10:27:09 +08:00
|
|
|
} else
|
2012-04-22 04:33:48 +08:00
|
|
|
return (chunk_alloc_mmap_slow(size, alignment, false, zero));
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-04-22 04:33:48 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
*zero = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
bool
|
2010-01-24 18:53:40 +08:00
|
|
|
chunk_dealloc_mmap(void *chunk, size_t size)
|
|
|
|
{
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
if (config_munmap)
|
|
|
|
pages_unmap(chunk, size);
|
|
|
|
|
|
|
|
return (config_munmap == false);
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
bool
|
|
|
|
chunk_mmap_boot(void)
|
|
|
|
{
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/*
|
|
|
|
* XXX For the non-TLS implementation of tsd, the first access from
|
|
|
|
* each thread causes memory allocation. The result is a bootstrapping
|
|
|
|
* problem for this particular use case, so for now just disable it by
|
|
|
|
* leaving it in an unbooted state.
|
|
|
|
*/
|
|
|
|
#ifdef JEMALLOC_TLS
|
|
|
|
if (mmap_unaligned_tsd_boot())
|
2010-09-06 01:35:13 +08:00
|
|
|
return (true);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|