Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#define JEMALLOC_PAGES_C_
|
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
void *
|
|
|
|
pages_map(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
/*
|
|
|
|
* If VirtualAlloc can't allocate at the given address when one is
|
|
|
|
* given, it fails and returns NULL.
|
|
|
|
*/
|
|
|
|
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
|
|
|
PAGE_READWRITE);
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
|
|
|
* of existing mappings, and we only want to create new mappings.
|
|
|
|
*/
|
|
|
|
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
|
|
|
-1, 0);
|
|
|
|
assert(ret != NULL);
|
|
|
|
|
|
|
|
if (ret == MAP_FAILED)
|
|
|
|
ret = NULL;
|
|
|
|
else if (addr != NULL && ret != addr) {
|
|
|
|
/*
|
|
|
|
* We succeeded in mapping memory, but not in the right place.
|
|
|
|
*/
|
|
|
|
pages_unmap(ret, size);
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
assert(ret == NULL || (addr == NULL && ret != addr)
|
|
|
|
|| (addr != NULL && ret == addr));
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pages_unmap(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
|
|
|
#else
|
|
|
|
if (munmap(addr, size) == -1)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
char buf[BUFERROR_BUF];
|
|
|
|
|
|
|
|
buferror(get_errno(), buf, sizeof(buf));
|
|
|
|
malloc_printf("<jemalloc>: Error in "
|
|
|
|
#ifdef _WIN32
|
|
|
|
"VirtualFree"
|
|
|
|
#else
|
|
|
|
"munmap"
|
|
|
|
#endif
|
|
|
|
"(): %s\n", buf);
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
|
|
|
{
|
|
|
|
void *ret = (void *)((uintptr_t)addr + leadsize);
|
|
|
|
|
|
|
|
assert(alloc_size >= leadsize + size);
|
|
|
|
#ifdef _WIN32
|
|
|
|
{
|
|
|
|
void *new_addr;
|
|
|
|
|
|
|
|
pages_unmap(addr, alloc_size);
|
|
|
|
new_addr = pages_map(ret, size);
|
|
|
|
if (new_addr == ret)
|
|
|
|
return (ret);
|
|
|
|
if (new_addr)
|
|
|
|
pages_unmap(new_addr, size);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
{
|
|
|
|
size_t trailsize = alloc_size - leadsize - size;
|
|
|
|
|
|
|
|
if (leadsize != 0)
|
|
|
|
pages_unmap(addr, leadsize);
|
|
|
|
if (trailsize != 0)
|
|
|
|
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
pages_commit_impl(void *addr, size_t size, bool commit)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifndef _WIN32
|
2015-08-13 01:26:54 +08:00
|
|
|
/*
|
|
|
|
* The following decommit/commit implementation is functional, but
|
|
|
|
* always disabled because it doesn't add value beyong improved
|
|
|
|
* debugging (at the cost of extra system calls) on systems that
|
|
|
|
* overcommit.
|
|
|
|
*/
|
|
|
|
if (false) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
|
|
|
|
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
|
|
|
|
MAP_FIXED, -1, 0);
|
|
|
|
if (result == MAP_FAILED)
|
|
|
|
return (true);
|
|
|
|
if (result != addr) {
|
|
|
|
/*
|
|
|
|
* We succeeded in mapping memory, but not in the right
|
|
|
|
* place.
|
|
|
|
*/
|
|
|
|
pages_unmap(result, size);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
pages_commit(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (pages_commit_impl(addr, size, true));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
pages_decommit(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (pages_commit_impl(addr, size, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
pages_purge(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
bool unzeroed;
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
|
|
|
unzeroed = true;
|
|
|
|
#elif defined(JEMALLOC_HAVE_MADVISE)
|
|
|
|
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
|
|
|
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
|
|
|
# define JEMALLOC_MADV_ZEROS true
|
|
|
|
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
|
|
|
# define JEMALLOC_MADV_PURGE MADV_FREE
|
|
|
|
# define JEMALLOC_MADV_ZEROS false
|
|
|
|
# else
|
|
|
|
# error "No madvise(2) flag defined for purging unused dirty pages."
|
|
|
|
# endif
|
|
|
|
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
|
|
|
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
|
|
|
# undef JEMALLOC_MADV_PURGE
|
|
|
|
# undef JEMALLOC_MADV_ZEROS
|
|
|
|
#else
|
|
|
|
/* Last resort no-op. */
|
|
|
|
unzeroed = true;
|
|
|
|
#endif
|
|
|
|
return (unzeroed);
|
|
|
|
}
|
|
|
|
|