2017-04-25 09:05:15 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
|
|
|
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
|
2016-04-07 22:14:37 +08:00
|
|
|
/* Page size. LG_PAGE is determined by the configure script. */
|
|
|
|
#ifdef PAGE_MASK
|
|
|
|
# undef PAGE_MASK
|
|
|
|
#endif
|
2017-01-20 13:41:41 +08:00
|
|
|
#define PAGE ((size_t)(1U << LG_PAGE))
|
|
|
|
#define PAGE_MASK ((size_t)(PAGE - 1))
|
2016-04-07 22:14:37 +08:00
|
|
|
/* Return the page base address for the page containing address a. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define PAGE_ADDR2BASE(a) \
|
2016-04-07 22:14:37 +08:00
|
|
|
((void *)((uintptr_t)(a) & ~PAGE_MASK))
|
|
|
|
/* Return the smallest pagesize multiple that is >= s. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define PAGE_CEILING(s) \
|
2016-04-07 22:14:37 +08:00
|
|
|
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
|
|
|
|
2016-11-18 05:36:17 +08:00
|
|
|
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
|
|
|
|
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
|
2016-11-18 05:36:17 +08:00
|
|
|
/* Return the huge page base address for the huge page containing address a. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define HUGEPAGE_ADDR2BASE(a) \
|
2016-11-18 05:36:17 +08:00
|
|
|
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
|
|
|
|
/* Return the smallest pagesize multiple that is >= s. */
|
2017-01-20 13:41:41 +08:00
|
|
|
#define HUGEPAGE_CEILING(s) \
|
2016-11-18 05:36:17 +08:00
|
|
|
(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
|
|
|
|
|
2016-12-04 07:38:25 +08:00
|
|
|
/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
|
|
|
|
#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
|
|
|
|
# define PAGES_CAN_PURGE_LAZY
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
|
|
|
|
*
|
|
|
|
* The only supported way to hard-purge on Windows is to decommit and then
|
|
|
|
* re-commit, but doing so is racy, and if re-commit fails it's a pain to
|
|
|
|
* propagate the "poisoned" memory state. Since we typically decommit as the
|
|
|
|
* next step after purging on Windows anyway, there's no point in adding such
|
|
|
|
* complexity.
|
|
|
|
*/
|
2017-03-10 10:20:02 +08:00
|
|
|
#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
|
|
|
|
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
|
|
|
|
defined(JEMALLOC_MAPS_COALESCE))
|
2016-12-04 07:38:25 +08:00
|
|
|
# define PAGES_CAN_PURGE_FORCED
|
|
|
|
#endif
|
|
|
|
|
2017-04-25 09:05:15 +08:00
|
|
|
static const bool pages_can_purge_lazy =
|
|
|
|
#ifdef PAGES_CAN_PURGE_LAZY
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
static const bool pages_can_purge_forced =
|
|
|
|
#ifdef PAGES_CAN_PURGE_FORCED
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2020-12-06 01:10:15 +08:00
|
|
|
#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
|
|
|
|
# define PAGES_CAN_HUGIFY
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const bool pages_can_hugify =
|
|
|
|
#ifdef PAGES_CAN_HUGIFY
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2018-02-17 06:19:19 +08:00
|
|
|
typedef enum {
|
|
|
|
thp_mode_default = 0, /* Do not change hugepage settings. */
|
|
|
|
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
|
|
|
|
thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */
|
|
|
|
|
|
|
|
thp_mode_names_limit = 3, /* Used for option processing. */
|
|
|
|
thp_mode_not_supported = 3 /* No THP support detected. */
|
|
|
|
} thp_mode_t;
|
|
|
|
|
|
|
|
#define THP_MODE_DEFAULT thp_mode_default
|
|
|
|
extern thp_mode_t opt_thp;
|
|
|
|
extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
|
|
|
|
extern const char *thp_mode_names[];
|
2017-08-11 04:14:26 +08:00
|
|
|
|
2017-04-25 09:05:15 +08:00
|
|
|
void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
|
|
|
|
void pages_unmap(void *addr, size_t size);
|
|
|
|
bool pages_commit(void *addr, size_t size);
|
|
|
|
bool pages_decommit(void *addr, size_t size);
|
|
|
|
bool pages_purge_lazy(void *addr, size_t size);
|
|
|
|
bool pages_purge_forced(void *addr, size_t size);
|
|
|
|
bool pages_huge(void *addr, size_t size);
|
|
|
|
bool pages_nohuge(void *addr, size_t size);
|
2017-09-19 06:10:01 +08:00
|
|
|
bool pages_dontdump(void *addr, size_t size);
|
|
|
|
bool pages_dodump(void *addr, size_t size);
|
2017-04-25 09:05:15 +08:00
|
|
|
bool pages_boot(void);
|
2018-02-17 06:19:19 +08:00
|
|
|
void pages_set_thp_state (void *ptr, size_t size);
|
2017-04-25 09:05:15 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|