2017-01-20 13:41:41 +08:00
|
|
|
#define JEMALLOC_PAGES_C_
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
2017-04-25 09:05:15 +08:00
|
|
|
|
|
|
|
#include "jemalloc/internal/pages.h"
|
|
|
|
|
2017-04-11 09:17:55 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2017-04-12 05:43:12 +08:00
|
|
|
#include "jemalloc/internal/assert.h"
|
2017-04-12 04:06:31 +08:00
|
|
|
#include "jemalloc/internal/malloc_io.h"
|
|
|
|
|
2016-05-06 08:45:02 +08:00
|
|
|
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
|
|
|
static size_t os_page;
|
|
|
|
|
2016-05-06 08:45:02 +08:00
|
|
|
#ifndef _WIN32
|
|
|
|
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
|
|
|
# define PAGES_PROT_DECOMMIT (PROT_NONE)
|
|
|
|
static int mmap_flags;
|
|
|
|
#endif
|
|
|
|
static bool os_overcommits;
|
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
bool thp_state_madvise;
|
|
|
|
|
2017-09-27 04:45:21 +08:00
|
|
|
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
|
|
|
|
static bool pages_can_purge_lazy_runtime = true;
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/******************************************************************************/
|
2017-04-17 07:23:32 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
static void os_pages_unmap(void *addr, size_t size);
|
2017-03-10 08:39:17 +08:00
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
/******************************************************************************/
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
static void *
|
|
|
|
os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
|
|
|
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
assert(size != 0);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (os_overcommits) {
|
2016-05-06 08:45:02 +08:00
|
|
|
*commit = true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
void *ret;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
/*
|
|
|
|
* If VirtualAlloc can't allocate at the given address when one is
|
|
|
|
* given, it fails and returns NULL.
|
|
|
|
*/
|
2016-05-06 08:45:02 +08:00
|
|
|
ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
PAGE_READWRITE);
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
|
|
|
* of existing mappings, and we only want to create new mappings.
|
|
|
|
*/
|
2016-05-06 08:45:02 +08:00
|
|
|
{
|
|
|
|
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
|
|
|
|
|
|
|
ret = mmap(addr, size, prot, mmap_flags, -1, 0);
|
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
assert(ret != NULL);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret == MAP_FAILED) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
ret = NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if (addr != NULL && ret != addr) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/*
|
|
|
|
* We succeeded in mapping memory, but not in the right place.
|
|
|
|
*/
|
2017-04-17 07:23:32 +08:00
|
|
|
os_pages_unmap(ret, size);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
2017-04-17 07:23:32 +08:00
|
|
|
assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
|
|
|
|
ret == addr));
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
static void *
|
|
|
|
os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
|
|
|
|
bool *commit) {
|
|
|
|
void *ret = (void *)((uintptr_t)addr + leadsize);
|
|
|
|
|
|
|
|
assert(alloc_size >= leadsize + size);
|
|
|
|
#ifdef _WIN32
|
|
|
|
os_pages_unmap(addr, alloc_size);
|
|
|
|
void *new_addr = os_pages_map(ret, size, PAGE, commit);
|
|
|
|
if (new_addr == ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (new_addr != NULL) {
|
|
|
|
os_pages_unmap(new_addr, size);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
#else
|
|
|
|
size_t trailsize = alloc_size - leadsize - size;
|
|
|
|
|
|
|
|
if (leadsize != 0) {
|
|
|
|
os_pages_unmap(addr, leadsize);
|
|
|
|
}
|
|
|
|
if (trailsize != 0) {
|
|
|
|
os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
os_pages_unmap(void *addr, size_t size) {
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
|
|
|
assert(ALIGNMENT_CEILING(size, os_page) == size);
|
2017-03-10 08:39:17 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
|
|
|
#else
|
|
|
|
if (munmap(addr, size) == -1)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
char buf[BUFERROR_BUF];
|
|
|
|
|
|
|
|
buferror(get_errno(), buf, sizeof(buf));
|
|
|
|
malloc_printf("<jemalloc>: Error in "
|
|
|
|
#ifdef _WIN32
|
2017-04-17 07:23:32 +08:00
|
|
|
"VirtualFree"
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#else
|
2017-04-17 07:23:32 +08:00
|
|
|
"munmap"
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#endif
|
2017-04-17 07:23:32 +08:00
|
|
|
"(): %s\n", buf);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (opt_abort) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
abort();
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
static void *
|
|
|
|
pages_map_slow(size_t size, size_t alignment, bool *commit) {
|
|
|
|
size_t alloc_size = size + alignment - os_page;
|
|
|
|
/* Beware size_t wrap-around. */
|
|
|
|
if (alloc_size < size) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
void *ret;
|
|
|
|
do {
|
|
|
|
void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
|
|
|
|
if (pages == NULL) {
|
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-04-17 07:23:32 +08:00
|
|
|
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
|
|
|
|
- (uintptr_t)pages;
|
|
|
|
ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
|
|
|
|
} while (ret == NULL);
|
|
|
|
|
|
|
|
assert(ret != NULL);
|
|
|
|
assert(PAGE_ADDR2BASE(ret) == ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
|
|
|
|
assert(alignment >= PAGE);
|
|
|
|
assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ideally, there would be a way to specify alignment to mmap() (like
|
|
|
|
* NetBSD has), but in the absence of such a feature, we have to work
|
|
|
|
* hard to efficiently create aligned mappings. The reliable, but
|
|
|
|
* slow method is to create a mapping that is over-sized, then trim the
|
|
|
|
* excess. However, that always results in one or two calls to
|
|
|
|
* os_pages_unmap(), and it can leave holes in the process's virtual
|
|
|
|
* memory map if memory grows downward.
|
|
|
|
*
|
|
|
|
* Optimistically try mapping precisely the right amount before falling
|
|
|
|
* back to the slow method, with the expectation that the optimistic
|
|
|
|
* approach works most of the time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void *ret = os_pages_map(addr, size, os_page, commit);
|
|
|
|
if (ret == NULL || ret == addr) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
2017-04-17 07:23:32 +08:00
|
|
|
assert(addr == NULL);
|
|
|
|
if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
|
|
|
|
os_pages_unmap(ret, size);
|
|
|
|
return pages_map_slow(size, alignment, commit);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(PAGE_ADDR2BASE(ret) == ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pages_unmap(void *addr, size_t size) {
|
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
|
|
|
|
os_pages_unmap(addr, size);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_commit_impl(void *addr, size_t size, bool commit) {
|
2017-03-10 08:39:17 +08:00
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (os_overcommits) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
|
|
|
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
|
|
|
#else
|
|
|
|
{
|
|
|
|
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
|
|
|
|
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
|
|
|
|
-1, 0);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (result == MAP_FAILED) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (result != addr) {
|
|
|
|
/*
|
|
|
|
* We succeeded in mapping memory, but not in the right
|
|
|
|
* place.
|
|
|
|
*/
|
2017-04-17 07:23:32 +08:00
|
|
|
os_pages_unmap(result, size);
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_commit(void *addr, size_t size) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_commit_impl(addr, size, true);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_decommit(void *addr, size_t size) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return pages_commit_impl(addr, size, false);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_purge_lazy(void *addr, size_t size) {
|
2017-03-10 08:39:17 +08:00
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!pages_can_purge_lazy) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-09-27 04:45:21 +08:00
|
|
|
if (!pages_can_purge_lazy_runtime) {
|
|
|
|
/*
|
|
|
|
* Built with lazy purge enabled, but detected it was not
|
|
|
|
* supported on the current system.
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
2017-03-16 04:09:43 +08:00
|
|
|
return false;
|
2016-12-04 07:38:25 +08:00
|
|
|
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
2017-10-06 07:28:55 +08:00
|
|
|
return (madvise(addr, size,
|
|
|
|
# ifdef MADV_FREE
|
|
|
|
MADV_FREE
|
|
|
|
# else
|
|
|
|
JEMALLOC_MADV_FREE
|
|
|
|
# endif
|
|
|
|
) != 0);
|
2017-03-10 10:20:02 +08:00
|
|
|
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
|
|
|
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
2017-03-16 04:09:43 +08:00
|
|
|
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
2016-12-04 07:38:25 +08:00
|
|
|
#else
|
|
|
|
not_reached();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_purge_forced(void *addr, size_t size) {
|
2017-03-10 08:39:17 +08:00
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (!pages_can_purge_forced) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-12-04 07:38:25 +08:00
|
|
|
|
2017-03-10 10:20:02 +08:00
|
|
|
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
|
|
|
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
|
2016-12-04 07:38:25 +08:00
|
|
|
return (madvise(addr, size, MADV_DONTNEED) != 0);
|
2017-03-10 10:20:02 +08:00
|
|
|
#elif defined(JEMALLOC_MAPS_COALESCE)
|
|
|
|
/* Try to overlay a new demand-zeroed mapping. */
|
|
|
|
return pages_commit(addr, size);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#else
|
2016-12-04 07:38:25 +08:00
|
|
|
not_reached();
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-11-18 05:36:17 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_huge(void *addr, size_t size) {
|
2016-11-18 05:36:17 +08:00
|
|
|
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(HUGEPAGE_CEILING(size) == size);
|
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
2016-11-18 05:36:17 +08:00
|
|
|
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
|
|
|
|
#else
|
2017-01-20 10:15:45 +08:00
|
|
|
return true;
|
2016-11-18 05:36:17 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_nohuge(void *addr, size_t size) {
|
2016-11-18 05:36:17 +08:00
|
|
|
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(HUGEPAGE_CEILING(size) == size);
|
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
|
2016-11-18 05:36:17 +08:00
|
|
|
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
|
|
|
|
#else
|
2017-01-20 10:15:45 +08:00
|
|
|
return false;
|
2016-11-18 05:36:17 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-09-19 06:10:01 +08:00
|
|
|
bool
|
|
|
|
pages_dontdump(void *addr, size_t size) {
|
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
|
|
|
return madvise(addr, size, MADV_DONTDUMP) != 0;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
pages_dodump(void *addr, size_t size) {
|
|
|
|
assert(PAGE_ADDR2BASE(addr) == addr);
|
|
|
|
assert(PAGE_CEILING(size) == size);
|
|
|
|
#ifdef JEMALLOC_MADVISE_DONTDUMP
|
|
|
|
return madvise(addr, size, MADV_DODUMP) != 0;
|
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
static size_t
|
|
|
|
os_page_detect(void) {
|
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
return si.dwPageSize;
|
|
|
|
#else
|
|
|
|
long result = sysconf(_SC_PAGESIZE);
|
|
|
|
if (result == -1) {
|
|
|
|
return LG_PAGE;
|
|
|
|
}
|
|
|
|
return (size_t)result;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-06 08:45:02 +08:00
|
|
|
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
os_overcommits_sysctl(void) {
|
2016-05-06 08:45:02 +08:00
|
|
|
int vm_overcommit;
|
|
|
|
size_t sz;
|
|
|
|
|
|
|
|
sz = sizeof(vm_overcommit);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false; /* Error. */
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
|
|
|
|
return ((vm_overcommit & 0x3) == 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
|
2016-10-30 13:41:04 +08:00
|
|
|
/*
|
|
|
|
* Use syscall(2) rather than {open,read,close}(2) when possible to avoid
|
|
|
|
* reentry during bootstrapping if another library has interposed system call
|
|
|
|
* wrappers.
|
|
|
|
*/
|
2016-05-06 08:45:02 +08:00
|
|
|
static bool
|
2017-01-16 08:56:30 +08:00
|
|
|
os_overcommits_proc(void) {
|
2016-05-06 08:45:02 +08:00
|
|
|
int fd;
|
|
|
|
char buf[1];
|
|
|
|
ssize_t nread;
|
|
|
|
|
2016-12-04 08:47:36 +08:00
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
2017-07-20 23:02:23 +08:00
|
|
|
#if defined(O_CLOEXEC)
|
|
|
|
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
|
|
|
|
O_CLOEXEC);
|
|
|
|
#else
|
|
|
|
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
2017-07-21 21:40:29 +08:00
|
|
|
if (fd != -1) {
|
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
}
|
2017-07-20 23:02:23 +08:00
|
|
|
#endif
|
2017-04-20 03:22:10 +08:00
|
|
|
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
2017-07-20 23:02:23 +08:00
|
|
|
#if defined(O_CLOEXEC)
|
|
|
|
fd = (int)syscall(SYS_openat,
|
|
|
|
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
|
|
|
#else
|
|
|
|
fd = (int)syscall(SYS_openat,
|
|
|
|
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
2017-07-21 21:40:29 +08:00
|
|
|
if (fd != -1) {
|
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
}
|
2017-07-20 23:02:23 +08:00
|
|
|
#endif
|
2016-10-30 13:41:04 +08:00
|
|
|
#else
|
2017-07-20 23:02:23 +08:00
|
|
|
#if defined(O_CLOEXEC)
|
|
|
|
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
|
|
|
|
#else
|
|
|
|
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
2017-07-21 21:40:29 +08:00
|
|
|
if (fd != -1) {
|
|
|
|
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
|
|
|
|
}
|
2017-07-20 23:02:23 +08:00
|
|
|
#endif
|
2016-10-30 13:41:04 +08:00
|
|
|
#endif
|
2017-07-20 23:02:23 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (fd == -1) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false; /* Error. */
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
|
2016-12-04 08:47:36 +08:00
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
2016-10-30 13:41:04 +08:00
|
|
|
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
|
|
|
#else
|
2016-05-06 08:45:02 +08:00
|
|
|
nread = read(fd, &buf, sizeof(buf));
|
2016-10-30 13:41:04 +08:00
|
|
|
#endif
|
|
|
|
|
2016-12-04 08:47:36 +08:00
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
2016-10-30 13:41:04 +08:00
|
|
|
syscall(SYS_close, fd);
|
|
|
|
#else
|
2016-09-27 06:55:40 +08:00
|
|
|
close(fd);
|
2016-10-30 13:41:04 +08:00
|
|
|
#endif
|
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (nread < 1) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return false; /* Error. */
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
/*
|
|
|
|
* /proc/sys/vm/overcommit_memory meanings:
|
|
|
|
* 0: Heuristic overcommit.
|
|
|
|
* 1: Always overcommit.
|
|
|
|
* 2: Never overcommit.
|
|
|
|
*/
|
|
|
|
return (buf[0] == '0' || buf[0] == '1');
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
static void
|
|
|
|
init_thp_state(void) {
|
2017-08-12 06:41:52 +08:00
|
|
|
if (!have_madvise_huge) {
|
2017-08-25 05:29:28 +08:00
|
|
|
if (metadata_thp_enabled() && opt_abort) {
|
2017-08-12 06:41:52 +08:00
|
|
|
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
goto label_error;
|
2017-08-11 04:14:26 +08:00
|
|
|
}
|
2017-08-12 06:41:52 +08:00
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
static const char madvise_state[] = "always [madvise] never\n";
|
|
|
|
char buf[sizeof(madvise_state)];
|
|
|
|
|
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
|
|
|
int fd = (int)syscall(SYS_open,
|
|
|
|
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
|
|
|
#else
|
|
|
|
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
|
|
|
#endif
|
|
|
|
if (fd == -1) {
|
|
|
|
goto label_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
|
|
|
|
ssize_t nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
|
|
|
|
#else
|
|
|
|
ssize_t nread = read(fd, &buf, sizeof(buf));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
|
|
|
|
syscall(SYS_close, fd);
|
|
|
|
#else
|
|
|
|
close(fd);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (nread < 1) {
|
|
|
|
goto label_error;
|
|
|
|
}
|
|
|
|
if (strncmp(buf, madvise_state, (size_t)nread) == 0) {
|
|
|
|
thp_state_madvise = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
label_error:
|
|
|
|
thp_state_madvise = false;
|
|
|
|
}
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
bool
|
2017-01-16 08:56:30 +08:00
|
|
|
pages_boot(void) {
|
2017-04-17 07:23:32 +08:00
|
|
|
os_page = os_page_detect();
|
|
|
|
if (os_page > PAGE) {
|
|
|
|
malloc_write("<jemalloc>: Unsupported system page size\n");
|
|
|
|
if (opt_abort) {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-06 08:45:02 +08:00
|
|
|
#ifndef _WIN32
|
|
|
|
mmap_flags = MAP_PRIVATE | MAP_ANON;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
|
|
|
|
os_overcommits = os_overcommits_sysctl();
|
|
|
|
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
|
|
|
|
os_overcommits = os_overcommits_proc();
|
|
|
|
# ifdef MAP_NORESERVE
|
2017-01-16 08:56:30 +08:00
|
|
|
if (os_overcommits) {
|
2016-05-06 08:45:02 +08:00
|
|
|
mmap_flags |= MAP_NORESERVE;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
os_overcommits = false;
|
|
|
|
#endif
|
2017-04-17 07:23:32 +08:00
|
|
|
|
2017-08-11 04:14:26 +08:00
|
|
|
init_thp_state();
|
|
|
|
|
2017-09-27 04:45:21 +08:00
|
|
|
/* Detect lazy purge runtime support. */
|
|
|
|
if (pages_can_purge_lazy) {
|
|
|
|
bool committed = false;
|
|
|
|
void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
|
|
|
|
if (madv_free_page == NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
assert(pages_can_purge_lazy_runtime);
|
|
|
|
if (pages_purge_lazy(madv_free_page, PAGE)) {
|
|
|
|
pages_can_purge_lazy_runtime = false;
|
|
|
|
}
|
|
|
|
os_pages_unmap(madv_free_page, PAGE);
|
|
|
|
}
|
|
|
|
|
2017-04-17 07:23:32 +08:00
|
|
|
return false;
|
2016-05-06 08:45:02 +08:00
|
|
|
}
|