Implement guard pages.
Adding guarded extents, which are regular extents surrounded by guard pages (mprotected). To reduce syscalls, small guarded extents are cached as a separate eset in ecache, and decay through the dirty / muzzy / retained pipeline as usual.
This commit is contained in:
parent
7bb05e04be
commit
deb8e62a83
@ -119,6 +119,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/extent_dss.c \
|
||||
$(srcroot)src/extent_mmap.c \
|
||||
$(srcroot)src/fxp.c \
|
||||
$(srcroot)src/guard.c \
|
||||
$(srcroot)src/hook.c \
|
||||
$(srcroot)src/hpa.c \
|
||||
$(srcroot)src/hpa_hooks.c \
|
||||
@ -218,6 +219,7 @@ TESTS_UNIT := \
|
||||
${srcroot}test/unit/fb.c \
|
||||
$(srcroot)test/unit/fork.c \
|
||||
${srcroot}test/unit/fxp.c \
|
||||
${srcroot}test/unit/guard.c \
|
||||
$(srcroot)test/unit/hash.c \
|
||||
$(srcroot)test/unit/hook.c \
|
||||
$(srcroot)test/unit/hpa.c \
|
||||
|
12
configure.ac
12
configure.ac
@ -2256,6 +2256,18 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check for mprotect(2).
|
||||
|
||||
JE_COMPILABLE([mprotect(2)], [
|
||||
#include <sys/mman.h>
|
||||
], [
|
||||
mprotect((void *)0, 0, PROT_NONE);
|
||||
], [je_cv_mprotect])
|
||||
if test "x${je_cv_mprotect}" = "xyes" ; then
|
||||
AC_DEFINE([JEMALLOC_HAVE_MPROTECT], [ ])
|
||||
fi
|
||||
|
||||
dnl ============================================================================
|
||||
dnl Check for __builtin_clz(), __builtin_clzl(), and __builtin_clzll().
|
||||
|
||||
|
@ -221,7 +221,8 @@ large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
|
||||
* The cost is low enough (as edata will be accessed anyway) to be
|
||||
* enabled all the time.
|
||||
*/
|
||||
if (unlikely(edata_state_get(edata) != extent_state_active)) {
|
||||
if (unlikely(edata == NULL ||
|
||||
edata_state_get(edata) != extent_state_active)) {
|
||||
safety_check_fail("Invalid deallocation detected: "
|
||||
"pages being freed (%p) not currently active, "
|
||||
"possibly caused by double free bugs.",
|
||||
|
@ -2,12 +2,14 @@
|
||||
#define JEMALLOC_INTERNAL_ECACHE_H
|
||||
|
||||
#include "jemalloc/internal/eset.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct ecache_s ecache_t;
|
||||
struct ecache_s {
|
||||
malloc_mutex_t mtx;
|
||||
eset_t eset;
|
||||
eset_t guarded_eset;
|
||||
/* All stored extents must be in the same state. */
|
||||
extent_state_t state;
|
||||
/* The index of the ehooks the ecache is associated with. */
|
||||
@ -21,17 +23,22 @@ struct ecache_s {
|
||||
|
||||
static inline size_t
|
||||
ecache_npages_get(ecache_t *ecache) {
|
||||
return eset_npages_get(&ecache->eset);
|
||||
return eset_npages_get(&ecache->eset) +
|
||||
eset_npages_get(&ecache->guarded_eset);
|
||||
}
|
||||
|
||||
/* Get the number of extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nextents_get(&ecache->eset, ind);
|
||||
return eset_nextents_get(&ecache->eset, ind) +
|
||||
eset_nextents_get(&ecache->guarded_eset, ind);
|
||||
}
|
||||
|
||||
/* Get the sum total bytes of the extents in the given page size index. */
|
||||
static inline size_t
|
||||
ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
|
||||
return eset_nbytes_get(&ecache->eset, ind);
|
||||
return eset_nbytes_get(&ecache->eset, ind) +
|
||||
eset_nbytes_get(&ecache->guarded_eset, ind);
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
|
@ -98,12 +98,13 @@ struct edata_s {
|
||||
* c: committed
|
||||
* p: pai
|
||||
* z: zeroed
|
||||
* g: guarded
|
||||
* t: state
|
||||
* i: szind
|
||||
* f: nfree
|
||||
* s: bin_shard
|
||||
*
|
||||
* 00000000 ... 00000sss sssfffff fffffiii iiiiittt zpcbaaaa aaaaaaaa
|
||||
* 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
|
||||
*
|
||||
* arena_ind: Arena from which this extent came, or all 1 bits if
|
||||
* unassociated.
|
||||
@ -123,6 +124,9 @@ struct edata_s {
|
||||
* zeroed: The zeroed flag is used by extent recycling code to track
|
||||
* whether memory is zero-filled.
|
||||
*
|
||||
* guarded: The guarded flag is use by the sanitizer to track whether
|
||||
* the extent has page guards around it.
|
||||
*
|
||||
* state: The state flag is an extent_state_t.
|
||||
*
|
||||
* szind: The szind flag indicates usable size class index for
|
||||
@ -158,8 +162,12 @@ struct edata_s {
|
||||
#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
|
||||
#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
|
||||
|
||||
#define EDATA_BITS_GUARDED_WIDTH 1
|
||||
#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
|
||||
#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
|
||||
|
||||
#define EDATA_BITS_STATE_WIDTH 3
|
||||
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
|
||||
#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
|
||||
#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
|
||||
|
||||
#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
|
||||
@ -293,6 +301,12 @@ edata_state_get(const edata_t *edata) {
|
||||
EDATA_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_guarded_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
|
||||
EDATA_BITS_GUARDED_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
edata_zeroed_get(const edata_t *edata) {
|
||||
return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
|
||||
@ -505,6 +519,12 @@ edata_state_set(edata_t *edata, extent_state_t state) {
|
||||
((uint64_t)state << EDATA_BITS_STATE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_guarded_set(edata_t *edata, bool guarded) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
|
||||
((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_zeroed_set(edata_t *edata, bool zeroed) {
|
||||
edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
|
||||
@ -588,6 +608,7 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
||||
edata_szind_set(edata, szind);
|
||||
edata_sn_set(edata, sn);
|
||||
edata_state_set(edata, state);
|
||||
edata_guarded_set(edata, false);
|
||||
edata_zeroed_set(edata, zeroed);
|
||||
edata_committed_set(edata, committed);
|
||||
edata_pai_set(edata, pai);
|
||||
@ -606,6 +627,7 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
edata_sn_set(edata, sn);
|
||||
edata_state_set(edata, extent_state_active);
|
||||
edata_guarded_set(edata, false);
|
||||
edata_zeroed_set(edata, true);
|
||||
edata_committed_set(edata, true);
|
||||
/*
|
||||
|
@ -63,6 +63,8 @@ bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
|
||||
unsigned arena_ind);
|
||||
bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
|
||||
void ehooks_default_zero_impl(void *addr, size_t size);
|
||||
void ehooks_default_guard_impl(void *guard1, void *guard2);
|
||||
void ehooks_default_unguard_impl(void *guard1, void *guard2);
|
||||
|
||||
/*
|
||||
* We don't officially support reentrancy from wtihin the extent hooks. But
|
||||
@ -139,6 +141,15 @@ ehooks_merge_will_fail(ehooks_t *ehooks) {
|
||||
return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_guard_will_fail(ehooks_t *ehooks) {
|
||||
/*
|
||||
* Before the guard hooks are officially introduced, limit the use to
|
||||
* the default hooks only.
|
||||
*/
|
||||
return !ehooks_are_default(ehooks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Some hooks are required to return zeroed memory in certain situations. In
|
||||
* debug mode, we do some heuristic checks that they did what they were supposed
|
||||
@ -368,4 +379,34 @@ ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
|
||||
bool err;
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_guard_impl(guard1, guard2);
|
||||
err = false;
|
||||
} else {
|
||||
err = true;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
|
||||
bool err;
|
||||
extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
|
||||
|
||||
if (extent_hooks == &ehooks_default_extent_hooks) {
|
||||
ehooks_default_unguard_impl(guard1, guard2);
|
||||
err = false;
|
||||
} else {
|
||||
err = true;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
|
||||
|
@ -21,10 +21,10 @@ extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero);
|
||||
bool zero, bool guarded);
|
||||
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero);
|
||||
bool zero, bool guarded);
|
||||
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
76
include/jemalloc/internal/guard.h
Normal file
76
include/jemalloc/internal/guard.h
Normal file
@ -0,0 +1,76 @@
|
||||
#ifndef JEMALLOC_INTERNAL_GUARD_H
|
||||
#define JEMALLOC_INTERNAL_GUARD_H
|
||||
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/emap.h"
|
||||
|
||||
#define PAGE_GUARDS_SIZE (2 * PAGE)
|
||||
|
||||
#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
|
||||
#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
|
||||
|
||||
/* 0 means disabled, i.e. never guarded. */
|
||||
extern size_t opt_san_guard_large;
|
||||
extern size_t opt_san_guard_small;
|
||||
|
||||
void guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap);
|
||||
void unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap);
|
||||
void tsd_san_init(tsd_t *tsd);
|
||||
|
||||
static inline bool
|
||||
san_enabled(void) {
|
||||
return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
|
||||
size_t alignment) {
|
||||
if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
|
||||
tsdn_null(tsdn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
|
||||
assert(n >= 1);
|
||||
if (n > 1) {
|
||||
/*
|
||||
* Subtract conditionally because the guard may not happen due
|
||||
* to alignment or size restriction below.
|
||||
*/
|
||||
*tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
|
||||
}
|
||||
|
||||
if (n == 1 && (alignment <= PAGE) &&
|
||||
(size + PAGE_GUARDS_SIZE <= SC_LARGE_MAXCLASS)) {
|
||||
*tsd_san_extents_until_guard_largep_get(tsd) =
|
||||
opt_san_guard_large;
|
||||
return true;
|
||||
} else {
|
||||
assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
|
||||
if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
|
||||
tsdn_null(tsdn)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tsd_t *tsd = tsdn_tsd(tsdn);
|
||||
uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
|
||||
assert(n >= 1);
|
||||
if (n == 1) {
|
||||
*tsd_san_extents_until_guard_smallp_get(tsd) =
|
||||
opt_san_guard_small;
|
||||
return true;
|
||||
} else {
|
||||
*tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
|
||||
assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_GUARD_H */
|
@ -312,6 +312,9 @@
|
||||
*/
|
||||
#undef JEMALLOC_MADVISE_NOCORE
|
||||
|
||||
/* Defined if mprotect(2) is available. */
|
||||
#undef JEMALLOC_HAVE_MPROTECT
|
||||
|
||||
/*
|
||||
* Defined if transparent huge pages (THPs) are supported via the
|
||||
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
|
||||
|
@ -167,7 +167,7 @@ void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
|
||||
|
||||
/* Gets an edata for the given allocation. */
|
||||
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool slab, szind_t szind, bool zero,
|
||||
size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
|
||||
bool *deferred_work_generated);
|
||||
/* Returns true on error, in which case nothing changed. */
|
||||
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
|
@ -110,5 +110,7 @@ bool pages_dontdump(void *addr, size_t size);
|
||||
bool pages_dodump(void *addr, size_t size);
|
||||
bool pages_boot(void);
|
||||
void pages_set_thp_state (void *ptr, size_t size);
|
||||
void pages_mark_guards(void *head, void *tail);
|
||||
void pages_unmark_guards(void *head, void *tail);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
|
||||
|
@ -7,7 +7,8 @@ typedef struct pai_s pai_t;
|
||||
struct pai_s {
|
||||
/* Returns NULL on failure. */
|
||||
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded,
|
||||
bool *deferred_work_generated);
|
||||
/*
|
||||
* Returns the number of extents added to the list (which may be fewer
|
||||
* than requested, in case of OOM). The list should already be
|
||||
@ -37,8 +38,8 @@ struct pai_s {
|
||||
|
||||
static inline edata_t *
|
||||
pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
return self->alloc(tsdn, self, size, alignment, zero,
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
return self->alloc(tsdn, self, size, alignment, zero, guarded,
|
||||
deferred_work_generated);
|
||||
}
|
||||
|
||||
|
@ -73,6 +73,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
|
||||
O(peak_dalloc_event_wait, uint64_t, uint64_t) \
|
||||
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
|
||||
O(prng_state, uint64_t, uint64_t) \
|
||||
O(san_extents_until_guard_small, uint64_t, uint64_t) \
|
||||
O(san_extents_until_guard_large, uint64_t, uint64_t) \
|
||||
O(iarena, arena_t *, arena_t *) \
|
||||
O(arena, arena_t *, arena_t *) \
|
||||
O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
|
||||
@ -103,6 +105,8 @@ typedef ql_elm(tsd_t) tsd_link_t;
|
||||
/* peak_dalloc_event_wait */ 0, \
|
||||
/* prof_tdata */ NULL, \
|
||||
/* prng_state */ 0, \
|
||||
/* san_extents_until_guard_small */ 0, \
|
||||
/* san_extents_until_guard_large */ 0, \
|
||||
/* iarena */ NULL, \
|
||||
/* arena */ NULL, \
|
||||
/* arena_decay_ticker */ \
|
||||
|
@ -59,6 +59,7 @@
|
||||
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
|
||||
<ClCompile Include="..\..\..\..\src\fxp.c" />
|
||||
<ClCompile Include="..\..\..\..\src\guard.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hook.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hpa.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
|
||||
|
@ -61,6 +61,9 @@
|
||||
<ClCompile Include="..\..\..\..\src\fxp.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\guard.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\hook.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -59,6 +59,7 @@
|
||||
<ClCompile Include="..\..\..\..\src\extent_dss.c" />
|
||||
<ClCompile Include="..\..\..\..\src\extent_mmap.c" />
|
||||
<ClCompile Include="..\..\..\..\src\fxp.c" />
|
||||
<ClCompile Include="..\..\..\..\src\guard.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hook.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hpa.c" />
|
||||
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
|
||||
|
@ -61,6 +61,9 @@
|
||||
<ClCompile Include="..\..\..\..\src\fxp.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\guard.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\hook.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
11
src/arena.c
11
src/arena.c
@ -6,6 +6,7 @@
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
@ -327,9 +328,10 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
szind_t szind = sz_size2index(usize);
|
||||
size_t esize = usize + sz_large_pad;
|
||||
|
||||
bool guarded = large_extent_decide_guard(tsdn, arena_get_ehooks(arena),
|
||||
esize, alignment);
|
||||
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
|
||||
/* slab */ false, szind, zero, &deferred_work_generated);
|
||||
|
||||
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
|
||||
assert(deferred_work_generated == false);
|
||||
|
||||
if (edata != NULL) {
|
||||
@ -827,9 +829,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
bool guarded = slab_extent_decide_guard(tsdn, arena_get_ehooks(arena));
|
||||
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
||||
PAGE, /* slab */ true, /* szind */ binind, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* alignment */ PAGE, /* slab */ true, /* szind */ binind,
|
||||
/* zero */ false, guarded, &deferred_work_generated);
|
||||
|
||||
if (deferred_work_generated) {
|
||||
arena_handle_deferred_work(tsdn, arena);
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
bool
|
||||
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
|
||||
bool delay_coalesce) {
|
||||
@ -12,6 +14,8 @@ ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
|
||||
ecache->ind = ind;
|
||||
ecache->delay_coalesce = delay_coalesce;
|
||||
eset_init(&ecache->eset, state);
|
||||
eset_init(&ecache->guarded_eset, state);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
10
src/ehooks.c
10
src/ehooks.c
@ -244,6 +244,16 @@ ehooks_default_zero_impl(void *addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ehooks_default_guard_impl(void *guard1, void *guard2) {
|
||||
pages_mark_guards(guard1, guard2);
|
||||
}
|
||||
|
||||
void
|
||||
ehooks_default_unguard_impl(void *guard1, void *guard2) {
|
||||
pages_unmark_guards(guard1, guard2);
|
||||
}
|
||||
|
||||
const extent_hooks_t ehooks_default_extent_hooks = {
|
||||
ehooks_default_alloc,
|
||||
ehooks_default_dalloc,
|
||||
|
76
src/extent.c
76
src/extent.c
@ -37,14 +37,14 @@ static atomic_zu_t highpages;
|
||||
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
|
||||
static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
|
||||
bool zero, bool *commit, bool growing_retained);
|
||||
bool zero, bool *commit, bool growing_retained, bool guarded);
|
||||
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata, bool *coalesced);
|
||||
static void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
|
||||
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero, bool *commit);
|
||||
bool zero, bool *commit, bool guarded);
|
||||
static edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit);
|
||||
|
||||
@ -80,7 +80,8 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
edata_t *
|
||||
ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
@ -88,14 +89,15 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
|
||||
size, alignment, zero, &commit, false);
|
||||
size, alignment, zero, &commit, false, guarded);
|
||||
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
return edata;
|
||||
}
|
||||
|
||||
edata_t *
|
||||
ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
@ -103,7 +105,7 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
|
||||
size, alignment, zero, &commit);
|
||||
size, alignment, zero, &commit, guarded);
|
||||
if (edata == NULL) {
|
||||
if (opt_retain && expand_edata != NULL) {
|
||||
/*
|
||||
@ -114,6 +116,14 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
if (guarded) {
|
||||
/*
|
||||
* Means no cached guarded extents available (and no
|
||||
* grow_retained was attempted). The pac_alloc flow
|
||||
* will alloc regular extents to make new guarded ones.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
void *new_addr = (expand_edata == NULL) ? NULL :
|
||||
edata_past_get(expand_edata);
|
||||
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
|
||||
@ -151,17 +161,27 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *edata;
|
||||
while (true) {
|
||||
/* Get the LRU extent, if any. */
|
||||
edata = edata_list_inactive_first(&ecache->eset.lru);
|
||||
eset_t *eset = &ecache->eset;
|
||||
edata = edata_list_inactive_first(&eset->lru);
|
||||
if (edata == NULL) {
|
||||
/*
|
||||
* Next check if there are guarded extents. They are
|
||||
* more expensive to purge (since they are not
|
||||
* mergeable), thus in favor of caching them longer.
|
||||
*/
|
||||
eset = &ecache->guarded_eset;
|
||||
edata = edata_list_inactive_first(&eset->lru);
|
||||
if (edata == NULL) {
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
/* Check the eviction limit. */
|
||||
size_t extents_npages = ecache_npages_get(ecache);
|
||||
if (extents_npages <= npages_min) {
|
||||
edata = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
eset_remove(&ecache->eset, edata);
|
||||
eset_remove(eset, edata);
|
||||
if (!ecache->delay_coalesce) {
|
||||
break;
|
||||
}
|
||||
@ -234,17 +254,19 @@ extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
|
||||
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
|
||||
eset_insert(&ecache->eset, edata);
|
||||
eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
|
||||
&ecache->eset;
|
||||
eset_insert(eset, edata);
|
||||
}
|
||||
|
||||
static void
|
||||
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
|
||||
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
|
||||
edata_t *edata) {
|
||||
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
|
||||
assert(edata_state_get(edata) == ecache->state ||
|
||||
edata_state_get(edata) == extent_state_merging);
|
||||
|
||||
eset_remove(&ecache->eset, edata);
|
||||
eset_remove(eset, edata);
|
||||
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
|
||||
}
|
||||
|
||||
@ -350,7 +372,8 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
|
||||
*/
|
||||
static edata_t *
|
||||
extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment) {
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool guarded) {
|
||||
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
|
||||
assert(alignment > 0);
|
||||
if (config_debug && expand_edata != NULL) {
|
||||
@ -366,6 +389,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
}
|
||||
|
||||
edata_t *edata;
|
||||
eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
|
||||
if (expand_edata != NULL) {
|
||||
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
|
||||
expand_edata, EXTENT_PAI_PAC, ecache->state);
|
||||
@ -382,7 +406,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
* If split and merge are not allowed (Windows w/o retain), try
|
||||
* exact fit only.
|
||||
*/
|
||||
bool exact_only = (!maps_coalesce && !opt_retain);
|
||||
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
|
||||
/*
|
||||
* A large extent might be broken up from its original size to
|
||||
* some small size to satisfy a small request. When that small
|
||||
@ -394,13 +418,13 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
*/
|
||||
unsigned lg_max_fit = ecache->delay_coalesce
|
||||
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
|
||||
edata = eset_fit(&ecache->eset, size, alignment, exact_only,
|
||||
lg_max_fit);
|
||||
edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit);
|
||||
}
|
||||
if (edata == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
extent_activate_locked(tsdn, pac, ecache, edata);
|
||||
assert(!guarded || edata_guarded_get(edata));
|
||||
extent_activate_locked(tsdn, pac, ecache, eset, edata);
|
||||
|
||||
return edata;
|
||||
}
|
||||
@ -551,13 +575,14 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
static edata_t *
|
||||
extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit, bool growing_retained) {
|
||||
bool *commit, bool growing_retained, bool guarded) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
assert(!guarded || expand_edata == NULL);
|
||||
|
||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
|
||||
expand_edata, size, alignment);
|
||||
expand_edata, size, alignment, guarded);
|
||||
if (edata == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
||||
return NULL;
|
||||
@ -734,7 +759,7 @@ label_err:
|
||||
static edata_t *
|
||||
extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit) {
|
||||
bool *commit, bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
|
||||
@ -742,13 +767,13 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, expand_edata, size, alignment, zero, commit,
|
||||
/* growing_retained */ true);
|
||||
/* growing_retained */ true, guarded);
|
||||
if (edata != NULL) {
|
||||
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
} else if (opt_retain && expand_edata == NULL) {
|
||||
} else if (opt_retain && expand_edata == NULL && !guarded) {
|
||||
edata = extent_grow_retained(tsdn, pac, ehooks, size,
|
||||
alignment, zero, commit);
|
||||
/* extent_grow_retained() always releases pac->grow_mtx. */
|
||||
@ -910,6 +935,9 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
emap_assert_mapped(tsdn, pac->emap, edata);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
goto label_skip_coalesce;
|
||||
}
|
||||
if (!ecache->delay_coalesce) {
|
||||
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
|
||||
NULL);
|
||||
@ -931,6 +959,7 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
return;
|
||||
}
|
||||
}
|
||||
label_skip_coalesce:
|
||||
extent_deactivate_locked(tsdn, pac, ecache, edata);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
||||
@ -981,6 +1010,11 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
/* Avoid calling the default extent_dalloc unless have to. */
|
||||
if (!ehooks_dalloc_will_fail(ehooks)) {
|
||||
/* Restore guard pages for dalloc / unmap. */
|
||||
if (edata_guarded_get(edata)) {
|
||||
assert(ehooks_are_default(ehooks));
|
||||
unguard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
/*
|
||||
* Deregister first to avoid a race with other allocating
|
||||
* threads, and reregister if deallocation fails.
|
||||
|
63
src/guard.c
Normal file
63
src/guard.c
Normal file
@ -0,0 +1,63 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* The sanitizer options. */
|
||||
size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
|
||||
size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
|
||||
|
||||
void
|
||||
guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
|
||||
emap_deregister_boundary(tsdn, emap, edata);
|
||||
|
||||
size_t size_with_guards = edata_size_get(edata);
|
||||
size_t usize = size_with_guards - PAGE_GUARDS_SIZE;
|
||||
|
||||
uintptr_t guard1 = (uintptr_t)edata_base_get(edata);
|
||||
uintptr_t addr = guard1 + PAGE;
|
||||
uintptr_t guard2 = addr + usize;
|
||||
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
|
||||
|
||||
/* Update the guarded addr and usable size of the edata. */
|
||||
edata_size_set(edata, usize);
|
||||
edata_addr_set(edata, (void *)addr);
|
||||
edata_guarded_set(edata, true);
|
||||
|
||||
/* The new boundary will be registered on the pa_alloc path. */
|
||||
}
|
||||
|
||||
void
|
||||
unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
|
||||
/* Remove the inner boundary which no longer exists. */
|
||||
emap_deregister_boundary(tsdn, emap, edata);
|
||||
|
||||
size_t size = edata_size_get(edata);
|
||||
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
|
||||
|
||||
uintptr_t addr = (uintptr_t)edata_base_get(edata);
|
||||
uintptr_t guard1 = addr - PAGE;
|
||||
uintptr_t guard2 = addr + size;
|
||||
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
|
||||
|
||||
/* Update the true addr and usable size of the edata. */
|
||||
edata_size_set(edata, size_with_guards);
|
||||
edata_addr_set(edata, (void *)guard1);
|
||||
edata_guarded_set(edata, false);
|
||||
|
||||
/* Then re-register the outer boundary including the guards. */
|
||||
emap_register_boundary(tsdn, emap, edata, SC_NSIZES, /* slab */ false);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_san_init(tsd_t *tsd) {
|
||||
*tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
|
||||
*tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
|
||||
}
|
@ -9,7 +9,7 @@
|
||||
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
||||
|
||||
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@ -750,8 +750,9 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
|
||||
static edata_t *
|
||||
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -796,7 +797,6 @@ hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
assert(edata_arena_ind_get(edata) == shard->ind);
|
||||
assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
|
||||
assert(!edata_slab_get(edata));
|
||||
assert(edata_committed_get(edata));
|
||||
assert(edata_base_get(edata) != NULL);
|
||||
|
||||
@ -865,6 +865,7 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
||||
static void
|
||||
hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!edata_guarded_get(edata));
|
||||
/* Just a dalloc_batch of size 1; this lets us share logic. */
|
||||
edata_list_active_t dalloc_list;
|
||||
edata_list_active_init(&dalloc_list);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/fxp.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/log.h"
|
||||
@ -1616,6 +1617,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
|
||||
CONF_HANDLE_SIZE_T(opt_san_guard_small,
|
||||
"san_guard_small", 0, SIZE_T_MAX,
|
||||
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
CONF_HANDLE_SIZE_T(opt_san_guard_large,
|
||||
"san_guard_large", 0, SIZE_T_MAX,
|
||||
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
|
||||
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
|
||||
#undef CONF_ERROR
|
||||
#undef CONF_CONTINUE
|
||||
|
27
src/pa.c
27
src/pa.c
@ -1,6 +1,7 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/hpa.h"
|
||||
|
||||
static void
|
||||
@ -118,15 +119,17 @@ pa_get_pai(pa_shard_t *shard, edata_t *edata) {
|
||||
|
||||
edata_t *
|
||||
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
bool slab, szind_t szind, bool zero, bool *deferred_work_generated) {
|
||||
bool slab, szind_t szind, bool zero, bool guarded,
|
||||
bool *deferred_work_generated) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
assert(!guarded || alignment <= PAGE);
|
||||
|
||||
edata_t *edata = NULL;
|
||||
*deferred_work_generated = false;
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
||||
zero, deferred_work_generated);
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
/*
|
||||
* Fall back to the PAC if the HPA is off or couldn't serve the given
|
||||
@ -134,10 +137,10 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
*/
|
||||
if (edata == NULL) {
|
||||
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
||||
deferred_work_generated);
|
||||
guarded, deferred_work_generated);
|
||||
}
|
||||
|
||||
if (edata != NULL) {
|
||||
assert(edata_size_get(edata) == size);
|
||||
pa_nactive_add(shard, size >> LG_PAGE);
|
||||
emap_remap(tsdn, shard->emap, edata, szind, slab);
|
||||
edata_szind_set(edata, szind);
|
||||
@ -145,8 +148,6 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
if (slab && (size > 2 * PAGE)) {
|
||||
emap_register_interior(tsdn, shard->emap, edata, szind);
|
||||
}
|
||||
}
|
||||
if (edata != NULL) {
|
||||
assert(edata_arena_ind_get(edata) == shard->ind);
|
||||
}
|
||||
return edata;
|
||||
@ -158,7 +159,9 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
assert(new_size > old_size);
|
||||
assert(edata_size_get(edata) == old_size);
|
||||
assert((new_size & PAGE_MASK) == 0);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
return true;
|
||||
}
|
||||
size_t expand_amount = new_size - old_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
@ -181,6 +184,9 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
assert(new_size < old_size);
|
||||
assert(edata_size_get(edata) == old_size);
|
||||
assert((new_size & PAGE_MASK) == 0);
|
||||
if (edata_guarded_get(edata)) {
|
||||
return true;
|
||||
}
|
||||
size_t shrink_amount = old_size - new_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
@ -202,7 +208,10 @@ pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
||||
emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
|
||||
if (edata_slab_get(edata)) {
|
||||
emap_deregister_interior(tsdn, shard->emap, edata);
|
||||
edata_slab_set(edata, false);
|
||||
/*
|
||||
* The slab state of the extent isn't cleared. It may be used
|
||||
* by the pai implementation, e.g. to make caching decisions.
|
||||
*/
|
||||
}
|
||||
edata_addr_set(edata, edata_base_get(edata));
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
|
86
src/pac.c
86
src/pac.c
@ -2,9 +2,10 @@
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/pac.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@ -109,28 +110,66 @@ pac_may_have_muzzy(pac_t *pac) {
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
size_t alignment, bool zero, bool guarded) {
|
||||
assert(!guarded || alignment <= PAGE);
|
||||
|
||||
*deferred_work_generated = false;
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
|
||||
NULL, size, alignment, zero);
|
||||
NULL, size, alignment, zero, guarded);
|
||||
|
||||
if (edata == NULL && pac_may_have_muzzy(pac)) {
|
||||
edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
|
||||
NULL, size, alignment, zero);
|
||||
NULL, size, alignment, zero, guarded);
|
||||
}
|
||||
if (edata == NULL) {
|
||||
edata = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, NULL, size, alignment, zero);
|
||||
&pac->ecache_retained, NULL, size, alignment, zero,
|
||||
guarded);
|
||||
if (config_stats && edata != NULL) {
|
||||
atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
}
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
size_t alignment, bool zero) {
|
||||
assert(alignment <= PAGE);
|
||||
|
||||
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
|
||||
/* Alloc a non-guarded extent first.*/
|
||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
|
||||
/* alignment */ PAGE, zero, /* guarded */ false);
|
||||
if (edata != NULL) {
|
||||
/* Add guards around it. */
|
||||
assert(edata_size_get(edata) == size_with_guards);
|
||||
guard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
assert(edata == NULL || (edata_guarded_get(edata) &&
|
||||
edata_size_get(edata) == size));
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool *deferred_work_generated) {
|
||||
*deferred_work_generated = false;
|
||||
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
|
||||
zero, guarded);
|
||||
if (edata == NULL && guarded) {
|
||||
/* No cached guarded extents; creating a new one. */
|
||||
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
|
||||
alignment, zero);
|
||||
}
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
@ -149,15 +188,15 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
return true;
|
||||
}
|
||||
edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
|
||||
edata, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero, /* guarded*/ false);
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
|
||||
edata, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero, /* guarded*/ false);
|
||||
}
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, edata, expand_amount, PAGE,
|
||||
zero);
|
||||
zero, /* guarded */ false);
|
||||
mapped_add = expand_amount;
|
||||
}
|
||||
if (trail == NULL) {
|
||||
@ -203,6 +242,27 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
/*
|
||||
* Because cached guarded extents do exact fit only, large
|
||||
* guarded extents are restored on dalloc eagerly (otherwise
|
||||
* they will not be reused efficiently). Slab sizes have a
|
||||
* limited number of size classes, and tend to cycle faster.
|
||||
*
|
||||
* In the case where coalesce is restrained (VirtualFree on
|
||||
* Windows), guarded extents are also not cached -- otherwise
|
||||
* during arena destroy / reset, the retained extents would not
|
||||
* be whole regions (i.e. they are split between regular and
|
||||
* guarded).
|
||||
*/
|
||||
if (!edata_slab_get(edata) || !maps_coalesce) {
|
||||
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
|
||||
!maps_coalesce);
|
||||
unguard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
}
|
||||
|
||||
ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
|
||||
/* Purging of deallocated pages is deferred */
|
||||
*deferred_work_generated = true;
|
||||
|
47
src/pages.c
47
src/pages.c
@ -316,14 +316,10 @@ pages_unmap(void *addr, size_t size) {
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
os_pages_commit(void *addr, size_t size, bool commit) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
||||
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
||||
@ -348,6 +344,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return os_pages_commit(addr, size, commit);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, true);
|
||||
@ -358,6 +363,38 @@ pages_decommit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, false);
|
||||
}
|
||||
|
||||
void
|
||||
pages_mark_guards(void *head, void *tail) {
|
||||
assert(head != NULL && tail != NULL);
|
||||
assert((uintptr_t)head < (uintptr_t)tail);
|
||||
#ifdef JEMALLOC_HAVE_MPROTECT
|
||||
mprotect(head, PAGE, PROT_NONE);
|
||||
mprotect(tail, PAGE, PROT_NONE);
|
||||
#else
|
||||
/* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
|
||||
os_pages_commit(head, PAGE, false);
|
||||
os_pages_commit(tail, PAGE, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmark_guards(void *head, void *tail) {
|
||||
assert(head != NULL && tail != NULL);
|
||||
assert((uintptr_t)head < (uintptr_t)tail);
|
||||
#ifdef JEMALLOC_HAVE_MPROTECT
|
||||
size_t range = (uintptr_t)tail - (uintptr_t)head + PAGE;
|
||||
if (range <= SC_LARGE_MINCLASS) {
|
||||
mprotect(head, range, PROT_READ | PROT_WRITE);
|
||||
} else {
|
||||
mprotect(head, PAGE, PROT_READ | PROT_WRITE);
|
||||
mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
|
||||
}
|
||||
#else
|
||||
os_pages_commit(head, PAGE, true);
|
||||
os_pages_commit(tail, PAGE, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_lazy(void *addr, size_t size) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
|
@ -7,7 +7,7 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
for (size_t i = 0; i < nallocs; i++) {
|
||||
bool deferred_by_alloc = false;
|
||||
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
|
||||
/* zero */ false, &deferred_by_alloc);
|
||||
/* zero */ false, /* guarded */ false, &deferred_by_alloc);
|
||||
*deferred_work_generated |= deferred_by_alloc;
|
||||
if (edata == NULL) {
|
||||
return i;
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "jemalloc/internal/sec.h"
|
||||
|
||||
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@ -218,8 +218,9 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
|
||||
|
||||
static edata_t *
|
||||
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
|
||||
sec_t *sec = (sec_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
@ -227,7 +228,7 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
||||
|| size > sec->opts.max_alloc) {
|
||||
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
|
||||
deferred_work_generated);
|
||||
/* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
pszind_t pszind = sz_psz2ind(size);
|
||||
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
||||
@ -250,7 +251,7 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
size);
|
||||
} else {
|
||||
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
|
||||
zero, deferred_work_generated);
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
}
|
||||
return edata;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
@ -242,6 +243,7 @@ tsd_data_init(tsd_t *tsd) {
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
tsd_prng_state_init(tsd);
|
||||
tsd_te_init(tsd); /* event_init may use the prng state above. */
|
||||
tsd_san_init(tsd);
|
||||
return tsd_tcache_enabled_data_init(tsd);
|
||||
}
|
||||
|
||||
@ -269,6 +271,7 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
|
||||
*tsd_reentrancy_levelp_get(tsd) = 1;
|
||||
tsd_prng_state_init(tsd);
|
||||
tsd_te_init(tsd); /* event_init may use the prng state above. */
|
||||
tsd_san_init(tsd);
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
|
||||
return false;
|
||||
|
149
test/include/test/arena_decay.h
Normal file
149
test/include/test/arena_decay.h
Normal file
@ -0,0 +1,149 @@
|
||||
static unsigned
|
||||
do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
unsigned arena_ind;
|
||||
size_t sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
static void
|
||||
do_arena_destroy(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static void
|
||||
do_epoch(void) {
|
||||
uint64_t epoch = 1;
|
||||
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
}
|
||||
|
||||
static void
|
||||
do_purge(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static void
|
||||
do_decay(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
uint64_t npurge = 0;
|
||||
size_t sz = sizeof(npurge);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
|
||||
config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
|
||||
return npurge;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_purged(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_muzzy_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
|
||||
get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pdirty(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pdirty;
|
||||
size_t sz = sizeof(pdirty);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pdirty;
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pmuzzy(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pmuzzy;
|
||||
size_t sz = sizeof(pmuzzy);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pmuzzy;
|
||||
}
|
||||
|
||||
static void *
|
||||
do_mallocx(size_t size, int flags) {
|
||||
void *p = mallocx(size, flags);
|
||||
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
return p;
|
||||
}
|
||||
|
||||
static void
|
||||
generate_dirty(unsigned arena_ind, size_t size) {
|
||||
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
void *p = do_mallocx(size, flags);
|
||||
dallocx(p, flags);
|
||||
}
|
||||
|
6
test/include/test/guard.h
Normal file
6
test/include/test/guard.h
Normal file
@ -0,0 +1,6 @@
|
||||
static inline bool
|
||||
extent_is_guarded(tsdn_t *tsdn, void *ptr) {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
||||
return edata_guarded_get(edata);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/arena_decay.h"
|
||||
|
||||
#include "jemalloc/internal/ticker.h"
|
||||
|
||||
@ -22,155 +23,6 @@ nstime_update_mock(nstime_t *time) {
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
|
||||
unsigned arena_ind;
|
||||
size_t sz = sizeof(unsigned);
|
||||
expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
||||
0, "Unexpected mallctl() failure");
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
|
||||
0, "Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
|
||||
(void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
|
||||
return arena_ind;
|
||||
}
|
||||
|
||||
static void
|
||||
do_arena_destroy(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_epoch(void) {
|
||||
uint64_t epoch = 1;
|
||||
expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
0, "Unexpected mallctl() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_purge(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
void
|
||||
do_decay(unsigned arena_ind) {
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[1] = (size_t)arena_ind;
|
||||
expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
uint64_t npurge = 0;
|
||||
size_t sz = sizeof(npurge);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
|
||||
config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
|
||||
return npurge;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_dirty_purged(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_muzzy_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
get_arena_npurge(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
|
||||
get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pdirty(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pdirty;
|
||||
size_t sz = sizeof(pdirty);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pdirty;
|
||||
}
|
||||
|
||||
static size_t
|
||||
get_arena_pmuzzy(unsigned arena_ind) {
|
||||
do_epoch();
|
||||
size_t mib[4];
|
||||
size_t miblen = sizeof(mib)/sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
mib[2] = (size_t)arena_ind;
|
||||
size_t pmuzzy;
|
||||
size_t sz = sizeof(pmuzzy);
|
||||
expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
return pmuzzy;
|
||||
}
|
||||
|
||||
static void *
|
||||
do_mallocx(size_t size, int flags) {
|
||||
void *p = mallocx(size, flags);
|
||||
expect_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
return p;
|
||||
}
|
||||
|
||||
static void
|
||||
generate_dirty(unsigned arena_ind, size_t size) {
|
||||
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
void *p = do_mallocx(size, flags);
|
||||
dallocx(p, flags);
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_decay_ticks) {
|
||||
test_skip_if(is_background_thread_enabled());
|
||||
test_skip_if(opt_hpa);
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/guard.h"
|
||||
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
|
||||
@ -30,8 +31,18 @@ TEST_BEGIN(test_large_double_free_tcache) {
|
||||
|
||||
test_large_double_free_pre();
|
||||
char *ptr = malloc(SC_LARGE_MINCLASS);
|
||||
bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
|
||||
free(ptr);
|
||||
if (!guarded) {
|
||||
free(ptr);
|
||||
} else {
|
||||
/*
|
||||
* Skip because guarded extents may unguard immediately on
|
||||
* deallocation, in which case the second free will crash before
|
||||
* reaching the intended safety check.
|
||||
*/
|
||||
fake_abort_called = true;
|
||||
}
|
||||
mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
|
||||
test_large_double_free_post();
|
||||
}
|
||||
@ -43,8 +54,18 @@ TEST_BEGIN(test_large_double_free_no_tcache) {
|
||||
|
||||
test_large_double_free_pre();
|
||||
char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
|
||||
bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
if (!guarded) {
|
||||
dallocx(ptr, MALLOCX_TCACHE_NONE);
|
||||
} else {
|
||||
/*
|
||||
* Skip because guarded extents may unguard immediately on
|
||||
* deallocation, in which case the second free will crash before
|
||||
* reaching the intended safety check.
|
||||
*/
|
||||
fake_abort_called = true;
|
||||
}
|
||||
test_large_double_free_post();
|
||||
}
|
||||
TEST_END
|
||||
|
201
test/unit/guard.c
Normal file
201
test/unit/guard.c
Normal file
@ -0,0 +1,201 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/arena_decay.h"
|
||||
#include "test/guard.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
static void
|
||||
verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
|
||||
expect_true(extent_is_guarded(tsdn, ptr),
|
||||
"All extents should be guarded.");
|
||||
}
|
||||
|
||||
#define MAX_SMALL_ALLOCATIONS 4096
|
||||
void *small_alloc[MAX_SMALL_ALLOCATIONS];
|
||||
|
||||
TEST_BEGIN(test_guarded_small) {
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
unsigned npages = 16, pages_found = 0, ends_found = 0;
|
||||
VARIABLE_ARRAY(uintptr_t, pages, npages);
|
||||
|
||||
/* Allocate to get sanitized pointers. */
|
||||
size_t sz = PAGE / 8;
|
||||
unsigned n_alloc = 0;
|
||||
while (n_alloc < MAX_SMALL_ALLOCATIONS) {
|
||||
void *ptr = malloc(sz);
|
||||
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
|
||||
small_alloc[n_alloc] = ptr;
|
||||
verify_extent_guarded(tsdn, ptr);
|
||||
if ((uintptr_t)ptr % PAGE == 0) {
|
||||
pages[pages_found++] = (uintptr_t)ptr;
|
||||
}
|
||||
if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
|
||||
ends_found++;
|
||||
}
|
||||
n_alloc++;
|
||||
if (pages_found == npages && ends_found == npages) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Should found the ptrs being checked for overflow and underflow. */
|
||||
expect_u_eq(pages_found, npages, "Could not found the expected pages.");
|
||||
expect_u_eq(ends_found, npages, "Could not found the expected pages.");
|
||||
|
||||
/* Verify the pages are not continuous, i.e. separated by guards. */
|
||||
for (unsigned i = 0; i < npages - 1; i++) {
|
||||
for (unsigned j = i + 1; j < npages; j++) {
|
||||
uintptr_t ptr_diff = pages[i] > pages[j] ?
|
||||
pages[i] - pages[j] : pages[j] - pages[i];
|
||||
expect_zu_gt((size_t)ptr_diff, 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < n_alloc + 1; i++) {
|
||||
free(small_alloc[i]);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_guarded_large) {
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
unsigned nlarge = 32;
|
||||
VARIABLE_ARRAY(uintptr_t, large, nlarge);
|
||||
|
||||
/* Allocate to get sanitized pointers. */
|
||||
size_t large_sz = SC_LARGE_MINCLASS;
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
void *ptr = malloc(large_sz);
|
||||
verify_extent_guarded(tsdn, ptr);
|
||||
expect_ptr_not_null(ptr, "Unexpected malloc() failure");
|
||||
large[i] = (uintptr_t)ptr;
|
||||
}
|
||||
|
||||
/* Verify the pages are not continuous, i.e. separated by guards. */
|
||||
uintptr_t min_diff = (uintptr_t)-1;
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
for (unsigned j = i + 1; j < nlarge; j++) {
|
||||
uintptr_t ptr_diff = large[i] > large[j] ?
|
||||
large[i] - large[j] : large[j] - large[i];
|
||||
expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
if (ptr_diff < min_diff) {
|
||||
min_diff = ptr_diff;
|
||||
}
|
||||
}
|
||||
}
|
||||
expect_zu_ge((size_t)min_diff, large_sz + 2 * PAGE,
|
||||
"Pages should not be next to each other.");
|
||||
|
||||
for (unsigned i = 0; i < nlarge; i++) {
|
||||
free((void *)large[i]);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
static void
|
||||
verify_pdirty(unsigned arena_ind, uint64_t expected) {
|
||||
uint64_t pdirty = get_arena_pdirty(arena_ind);
|
||||
expect_u64_eq(pdirty, expected / PAGE,
|
||||
"Unexpected dirty page amount.");
|
||||
}
|
||||
|
||||
static void
|
||||
verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
|
||||
uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
|
||||
expect_u64_eq(pmuzzy, expected / PAGE,
|
||||
"Unexpected muzzy page amount.");
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_guarded_decay) {
|
||||
unsigned arena_ind = do_arena_create(-1, -1);
|
||||
do_decay(arena_ind);
|
||||
do_purge(arena_ind);
|
||||
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Verify that guarded extents as dirty. */
|
||||
size_t sz1 = PAGE, sz2 = PAGE * 2;
|
||||
/* W/o maps_coalesce, guarded extents are unguarded eagerly. */
|
||||
size_t add_guard_size = maps_coalesce ? 0 : PAGE_GUARDS_SIZE;
|
||||
generate_dirty(arena_ind, sz1);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Should reuse the first extent. */
|
||||
generate_dirty(arena_ind, sz1);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
/* Should not reuse; expect new dirty pages. */
|
||||
generate_dirty(arena_ind, sz2);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
|
||||
int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
|
||||
|
||||
/* Should reuse dirty extents for the two mallocx. */
|
||||
void *p1 = do_mallocx(sz1, flags);
|
||||
verify_extent_guarded(tsdn, p1);
|
||||
verify_pdirty(arena_ind, sz2 + add_guard_size);
|
||||
|
||||
void *p2 = do_mallocx(sz2, flags);
|
||||
verify_extent_guarded(tsdn, p2);
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
dallocx(p1, flags);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
dallocx(p2, flags);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
do_purge(arena_ind);
|
||||
verify_pdirty(arena_ind, 0);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
|
||||
if (config_stats) {
|
||||
expect_u64_eq(get_arena_npurge(arena_ind), 1,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_dirty_purged(arena_ind),
|
||||
(sz1 + sz2 + 2 * add_guard_size) / PAGE,
|
||||
"Expected purging to occur");
|
||||
expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
|
||||
"Expected purging to occur");
|
||||
}
|
||||
|
||||
if (opt_retain) {
|
||||
/*
|
||||
* With retain, guarded extents are not mergable and will be
|
||||
* cached in ecache_retained. They should be reused.
|
||||
*/
|
||||
void *new_p1 = do_mallocx(sz1, flags);
|
||||
verify_extent_guarded(tsdn, p1);
|
||||
expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
|
||||
|
||||
void *new_p2 = do_mallocx(sz2, flags);
|
||||
verify_extent_guarded(tsdn, p2);
|
||||
expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
|
||||
|
||||
dallocx(new_p1, flags);
|
||||
verify_pdirty(arena_ind, sz1 + add_guard_size);
|
||||
dallocx(new_p2, flags);
|
||||
verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
|
||||
verify_pmuzzy(arena_ind, 0);
|
||||
}
|
||||
|
||||
do_arena_destroy(arena_ind);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_guarded_small,
|
||||
test_guarded_large,
|
||||
test_guarded_decay);
|
||||
}
|
3
test/unit/guard.sh
Normal file
3
test/unit/guard.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
|
@ -80,11 +80,11 @@ TEST_BEGIN(test_alloc_max) {
|
||||
|
||||
/* Small max */
|
||||
bool deferred_work_generated;
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false,
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Allocation of small max failed");
|
||||
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_null(edata, "Allocation of larger than small max succeeded");
|
||||
|
||||
destroy_test_data(shard);
|
||||
@ -188,7 +188,7 @@ TEST_BEGIN(test_stress) {
|
||||
size_t npages = npages_min + prng_range_zu(&prng_state,
|
||||
npages_max - npages_min);
|
||||
edata_t *edata = pai_alloc(tsdn, &shard->pai,
|
||||
npages * PAGE, PAGE, false,
|
||||
npages * PAGE, PAGE, false, false,
|
||||
&deferred_work_generated);
|
||||
assert_ptr_not_null(edata,
|
||||
"Unexpected allocation failure");
|
||||
@ -263,7 +263,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
*/
|
||||
for (size_t i = 0; i < NALLOCS / 2; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
edata_list_active_t allocs_list;
|
||||
@ -299,7 +300,8 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
|
||||
/* Reallocate (individually), and ensure reuse and contiguity. */
|
||||
for (size_t i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
|
||||
}
|
||||
void *new_base = edata_base_get(allocs[0]);
|
||||
@ -374,7 +376,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
edata_t *edatas[HUGEPAGE_PAGES];
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
hpa_shard_do_deferred_work(tsdn, shard);
|
||||
@ -408,7 +410,7 @@ TEST_BEGIN(test_defer_time) {
|
||||
*/
|
||||
for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
|
||||
edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
|
||||
&deferred_work_generated);
|
||||
false, &deferred_work_generated);
|
||||
expect_ptr_not_null(edatas[i], "Unexpected null edata");
|
||||
}
|
||||
/*
|
||||
|
@ -128,6 +128,8 @@ TEST_BEGIN(test_hpa_background_thread_purges) {
|
||||
test_skip_if(!config_stats);
|
||||
test_skip_if(!hpa_supported());
|
||||
test_skip_if(!have_background_thread);
|
||||
/* Skip since guarded pages cannot be allocated from hpa. */
|
||||
test_skip_if(san_enabled());
|
||||
|
||||
unsigned arena_ind = create_arena();
|
||||
/*
|
||||
@ -142,6 +144,8 @@ TEST_BEGIN(test_hpa_background_thread_enable_disable) {
|
||||
test_skip_if(!config_stats);
|
||||
test_skip_if(!hpa_supported());
|
||||
test_skip_if(!have_background_thread);
|
||||
/* Skip since guarded pages cannot be allocated from hpa. */
|
||||
test_skip_if(san_enabled());
|
||||
|
||||
unsigned arena_ind = create_arena();
|
||||
|
||||
|
@ -91,7 +91,7 @@ do_alloc_free_purge(void *arg) {
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
|
||||
PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
assert_ptr_not_null(edata, "");
|
||||
pa_dalloc(TSDN_NULL, &test_data->shard, edata,
|
||||
&deferred_work_generated);
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/spin.h"
|
||||
|
||||
static unsigned arena_ind;
|
||||
@ -103,7 +104,8 @@ TEST_BEGIN(test_retained) {
|
||||
|
||||
arena_ind = do_arena_create(NULL);
|
||||
sz = nallocx(HUGEPAGE, 0);
|
||||
esz = sz + sz_large_pad;
|
||||
size_t guard_sz = san_enabled() ? PAGE_GUARDS_SIZE : 0;
|
||||
esz = sz + sz_large_pad + guard_sz;
|
||||
|
||||
atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
|
||||
|
||||
@ -133,7 +135,8 @@ TEST_BEGIN(test_retained) {
|
||||
*/
|
||||
do_refresh();
|
||||
|
||||
size_t allocated = esz * nthreads * PER_THD_NALLOCS;
|
||||
size_t allocated = (esz - guard_sz) * nthreads *
|
||||
PER_THD_NALLOCS;
|
||||
size_t active = do_get_active(arena_ind);
|
||||
expect_zu_le(allocated, active, "Unexpected active memory");
|
||||
size_t mapped = do_get_mapped(arena_ind);
|
||||
|
@ -50,7 +50,9 @@ test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
|
||||
|
||||
static inline edata_t *
|
||||
pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated) {
|
||||
size_t alignment, bool zero, bool guarded,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!guarded);
|
||||
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
if (ta->alloc_fail) {
|
||||
@ -182,10 +184,12 @@ TEST_BEGIN(test_reuse) {
|
||||
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
|
||||
}
|
||||
expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
|
||||
@ -216,9 +220,11 @@ TEST_BEGIN(test_reuse) {
|
||||
*/
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_eq(one_page[i], alloc1,
|
||||
"Got unexpected allocation");
|
||||
expect_ptr_eq(two_page[i], alloc2,
|
||||
@ -255,11 +261,12 @@ TEST_BEGIN(test_auto_flush) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
|
||||
size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
|
||||
expect_zu_le(NALLOCS + 1, max_allocs,
|
||||
@ -310,7 +317,8 @@ do_disable_flush_test(bool is_disable) {
|
||||
/* max_bytes */ NALLOCS * PAGE);
|
||||
for (int i = 0; i < NALLOCS; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
|
||||
}
|
||||
/* Free all but the last aloc. */
|
||||
@ -383,7 +391,8 @@ TEST_BEGIN(test_max_alloc_respected) {
|
||||
expect_zu_eq(i, ta.dalloc_count,
|
||||
"Incorrect number of deallocations");
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
|
||||
PAGE, /* zero */ false, &deferred_work_generated);
|
||||
PAGE, /* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
expect_zu_eq(i + 1, ta.alloc_count,
|
||||
"Incorrect number of allocations");
|
||||
@ -410,7 +419,8 @@ TEST_BEGIN(test_expand_shrink_delegate) {
|
||||
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
|
||||
/* max_bytes */ 1000 * PAGE);
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_ptr_not_null(edata, "Unexpected alloc failure");
|
||||
|
||||
bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
|
||||
@ -450,7 +460,8 @@ TEST_BEGIN(test_nshards_0) {
|
||||
|
||||
bool deferred_work_generated;
|
||||
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
|
||||
|
||||
/* Both operations should have gone directly to the fallback. */
|
||||
@ -492,7 +503,8 @@ TEST_BEGIN(test_stats_simple) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
@ -505,7 +517,8 @@ TEST_BEGIN(test_stats_simple) {
|
||||
}
|
||||
for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
|
||||
allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
|
||||
}
|
||||
}
|
||||
@ -534,13 +547,14 @@ TEST_BEGIN(test_stats_auto_flush) {
|
||||
bool deferred_work_generated;
|
||||
|
||||
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* guarded */ false, &deferred_work_generated);
|
||||
|
||||
for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
@ -580,7 +594,8 @@ TEST_BEGIN(test_stats_manual_flush) {
|
||||
edata_t *allocs[FLUSH_PAGES];
|
||||
for (size_t i = 0; i < FLUSH_PAGES; i++) {
|
||||
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
|
||||
/* zero */ false, &deferred_work_generated);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
&deferred_work_generated);
|
||||
expect_stats_pages(tsdn, &sec, 0);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user