Implement guard pages.
Adding guarded extents, which are regular extents surrounded by guard pages (mprotected). To reduce syscalls, small guarded extents are cached as a separate eset in ecache, and decay through the dirty / muzzy / retained pipeline as usual.
This commit is contained in:
11
src/arena.c
11
src/arena.c
@@ -6,6 +6,7 @@
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
#include "jemalloc/internal/safety_check.h"
|
||||
@@ -327,9 +328,10 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
szind_t szind = sz_size2index(usize);
|
||||
size_t esize = usize + sz_large_pad;
|
||||
|
||||
bool guarded = large_extent_decide_guard(tsdn, arena_get_ehooks(arena),
|
||||
esize, alignment);
|
||||
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
|
||||
/* slab */ false, szind, zero, &deferred_work_generated);
|
||||
|
||||
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
|
||||
assert(deferred_work_generated == false);
|
||||
|
||||
if (edata != NULL) {
|
||||
@@ -827,9 +829,10 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
bool guarded = slab_extent_decide_guard(tsdn, arena_get_ehooks(arena));
|
||||
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
||||
PAGE, /* slab */ true, /* szind */ binind, /* zero */ false,
|
||||
&deferred_work_generated);
|
||||
/* alignment */ PAGE, /* slab */ true, /* szind */ binind,
|
||||
/* zero */ false, guarded, &deferred_work_generated);
|
||||
|
||||
if (deferred_work_generated) {
|
||||
arena_handle_deferred_work(tsdn, arena);
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
bool
|
||||
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
|
||||
bool delay_coalesce) {
|
||||
@@ -12,6 +14,8 @@ ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
|
||||
ecache->ind = ind;
|
||||
ecache->delay_coalesce = delay_coalesce;
|
||||
eset_init(&ecache->eset, state);
|
||||
eset_init(&ecache->guarded_eset, state);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
10
src/ehooks.c
10
src/ehooks.c
@@ -244,6 +244,16 @@ ehooks_default_zero_impl(void *addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ehooks_default_guard_impl(void *guard1, void *guard2) {
|
||||
pages_mark_guards(guard1, guard2);
|
||||
}
|
||||
|
||||
void
|
||||
ehooks_default_unguard_impl(void *guard1, void *guard2) {
|
||||
pages_unmark_guards(guard1, guard2);
|
||||
}
|
||||
|
||||
const extent_hooks_t ehooks_default_extent_hooks = {
|
||||
ehooks_default_alloc,
|
||||
ehooks_default_dalloc,
|
||||
|
78
src/extent.c
78
src/extent.c
@@ -37,14 +37,14 @@ static atomic_zu_t highpages;
|
||||
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
|
||||
static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
|
||||
bool zero, bool *commit, bool growing_retained);
|
||||
bool zero, bool *commit, bool growing_retained, bool guarded);
|
||||
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata, bool *coalesced);
|
||||
static void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
|
||||
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero, bool *commit);
|
||||
bool zero, bool *commit, bool guarded);
|
||||
static edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit);
|
||||
|
||||
@@ -80,7 +80,8 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
edata_t *
|
||||
ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
@@ -88,14 +89,15 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
|
||||
size, alignment, zero, &commit, false);
|
||||
size, alignment, zero, &commit, false, guarded);
|
||||
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
return edata;
|
||||
}
|
||||
|
||||
edata_t *
|
||||
ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
@@ -103,7 +105,7 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
|
||||
size, alignment, zero, &commit);
|
||||
size, alignment, zero, &commit, guarded);
|
||||
if (edata == NULL) {
|
||||
if (opt_retain && expand_edata != NULL) {
|
||||
/*
|
||||
@@ -114,6 +116,14 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
if (guarded) {
|
||||
/*
|
||||
* Means no cached guarded extents available (and no
|
||||
* grow_retained was attempted). The pac_alloc flow
|
||||
* will alloc regular extents to make new guarded ones.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
void *new_addr = (expand_edata == NULL) ? NULL :
|
||||
edata_past_get(expand_edata);
|
||||
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
|
||||
@@ -151,9 +161,19 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *edata;
|
||||
while (true) {
|
||||
/* Get the LRU extent, if any. */
|
||||
edata = edata_list_inactive_first(&ecache->eset.lru);
|
||||
eset_t *eset = &ecache->eset;
|
||||
edata = edata_list_inactive_first(&eset->lru);
|
||||
if (edata == NULL) {
|
||||
goto label_return;
|
||||
/*
|
||||
* Next check if there are guarded extents. They are
|
||||
* more expensive to purge (since they are not
|
||||
* mergeable), thus in favor of caching them longer.
|
||||
*/
|
||||
eset = &ecache->guarded_eset;
|
||||
edata = edata_list_inactive_first(&eset->lru);
|
||||
if (edata == NULL) {
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
/* Check the eviction limit. */
|
||||
size_t extents_npages = ecache_npages_get(ecache);
|
||||
@@ -161,7 +181,7 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata = NULL;
|
||||
goto label_return;
|
||||
}
|
||||
eset_remove(&ecache->eset, edata);
|
||||
eset_remove(eset, edata);
|
||||
if (!ecache->delay_coalesce) {
|
||||
break;
|
||||
}
|
||||
@@ -234,17 +254,19 @@ extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
|
||||
emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
|
||||
eset_insert(&ecache->eset, edata);
|
||||
eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
|
||||
&ecache->eset;
|
||||
eset_insert(eset, edata);
|
||||
}
|
||||
|
||||
static void
|
||||
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
|
||||
extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
|
||||
edata_t *edata) {
|
||||
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
|
||||
assert(edata_state_get(edata) == ecache->state ||
|
||||
edata_state_get(edata) == extent_state_merging);
|
||||
|
||||
eset_remove(&ecache->eset, edata);
|
||||
eset_remove(eset, edata);
|
||||
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
|
||||
}
|
||||
|
||||
@@ -350,7 +372,8 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
|
||||
*/
|
||||
static edata_t *
|
||||
extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment) {
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool guarded) {
|
||||
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
|
||||
assert(alignment > 0);
|
||||
if (config_debug && expand_edata != NULL) {
|
||||
@@ -366,6 +389,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
}
|
||||
|
||||
edata_t *edata;
|
||||
eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
|
||||
if (expand_edata != NULL) {
|
||||
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
|
||||
expand_edata, EXTENT_PAI_PAC, ecache->state);
|
||||
@@ -382,7 +406,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
* If split and merge are not allowed (Windows w/o retain), try
|
||||
* exact fit only.
|
||||
*/
|
||||
bool exact_only = (!maps_coalesce && !opt_retain);
|
||||
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
|
||||
/*
|
||||
* A large extent might be broken up from its original size to
|
||||
* some small size to satisfy a small request. When that small
|
||||
@@ -394,13 +418,13 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
*/
|
||||
unsigned lg_max_fit = ecache->delay_coalesce
|
||||
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
|
||||
edata = eset_fit(&ecache->eset, size, alignment, exact_only,
|
||||
lg_max_fit);
|
||||
edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit);
|
||||
}
|
||||
if (edata == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
extent_activate_locked(tsdn, pac, ecache, edata);
|
||||
assert(!guarded || edata_guarded_get(edata));
|
||||
extent_activate_locked(tsdn, pac, ecache, eset, edata);
|
||||
|
||||
return edata;
|
||||
}
|
||||
@@ -551,13 +575,14 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
static edata_t *
|
||||
extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit, bool growing_retained) {
|
||||
bool *commit, bool growing_retained, bool guarded) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
assert(!guarded || expand_edata == NULL);
|
||||
|
||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
|
||||
expand_edata, size, alignment);
|
||||
expand_edata, size, alignment, guarded);
|
||||
if (edata == NULL) {
|
||||
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
||||
return NULL;
|
||||
@@ -734,7 +759,7 @@ label_err:
|
||||
static edata_t *
|
||||
extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit) {
|
||||
bool *commit, bool guarded) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
|
||||
@@ -742,13 +767,13 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, expand_edata, size, alignment, zero, commit,
|
||||
/* growing_retained */ true);
|
||||
/* growing_retained */ true, guarded);
|
||||
if (edata != NULL) {
|
||||
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
} else if (opt_retain && expand_edata == NULL) {
|
||||
} else if (opt_retain && expand_edata == NULL && !guarded) {
|
||||
edata = extent_grow_retained(tsdn, pac, ehooks, size,
|
||||
alignment, zero, commit);
|
||||
/* extent_grow_retained() always releases pac->grow_mtx. */
|
||||
@@ -910,6 +935,9 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
emap_assert_mapped(tsdn, pac->emap, edata);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
goto label_skip_coalesce;
|
||||
}
|
||||
if (!ecache->delay_coalesce) {
|
||||
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
|
||||
NULL);
|
||||
@@ -931,6 +959,7 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
return;
|
||||
}
|
||||
}
|
||||
label_skip_coalesce:
|
||||
extent_deactivate_locked(tsdn, pac, ecache, edata);
|
||||
|
||||
malloc_mutex_unlock(tsdn, &ecache->mtx);
|
||||
@@ -981,6 +1010,11 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
/* Avoid calling the default extent_dalloc unless have to. */
|
||||
if (!ehooks_dalloc_will_fail(ehooks)) {
|
||||
/* Restore guard pages for dalloc / unmap. */
|
||||
if (edata_guarded_get(edata)) {
|
||||
assert(ehooks_are_default(ehooks));
|
||||
unguard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
/*
|
||||
* Deregister first to avoid a race with other allocating
|
||||
* threads, and reregister if deallocation fails.
|
||||
|
63
src/guard.c
Normal file
63
src/guard.c
Normal file
@@ -0,0 +1,63 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/ehooks.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/tsd.h"
|
||||
|
||||
/* The sanitizer options. */
|
||||
size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
|
||||
size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
|
||||
|
||||
void
|
||||
guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
|
||||
emap_deregister_boundary(tsdn, emap, edata);
|
||||
|
||||
size_t size_with_guards = edata_size_get(edata);
|
||||
size_t usize = size_with_guards - PAGE_GUARDS_SIZE;
|
||||
|
||||
uintptr_t guard1 = (uintptr_t)edata_base_get(edata);
|
||||
uintptr_t addr = guard1 + PAGE;
|
||||
uintptr_t guard2 = addr + usize;
|
||||
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
|
||||
|
||||
/* Update the guarded addr and usable size of the edata. */
|
||||
edata_size_set(edata, usize);
|
||||
edata_addr_set(edata, (void *)addr);
|
||||
edata_guarded_set(edata, true);
|
||||
|
||||
/* The new boundary will be registered on the pa_alloc path. */
|
||||
}
|
||||
|
||||
void
|
||||
unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
|
||||
/* Remove the inner boundary which no longer exists. */
|
||||
emap_deregister_boundary(tsdn, emap, edata);
|
||||
|
||||
size_t size = edata_size_get(edata);
|
||||
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
|
||||
|
||||
uintptr_t addr = (uintptr_t)edata_base_get(edata);
|
||||
uintptr_t guard1 = addr - PAGE;
|
||||
uintptr_t guard2 = addr + size;
|
||||
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
|
||||
|
||||
/* Update the true addr and usable size of the edata. */
|
||||
edata_size_set(edata, size_with_guards);
|
||||
edata_addr_set(edata, (void *)guard1);
|
||||
edata_guarded_set(edata, false);
|
||||
|
||||
/* Then re-register the outer boundary including the guards. */
|
||||
emap_register_boundary(tsdn, emap, edata, SC_NSIZES, /* slab */ false);
|
||||
}
|
||||
|
||||
void
|
||||
tsd_san_init(tsd_t *tsd) {
|
||||
*tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
|
||||
*tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
|
||||
}
|
@@ -9,7 +9,7 @@
|
||||
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
||||
|
||||
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -750,8 +750,9 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
|
||||
static edata_t *
|
||||
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@@ -796,7 +797,6 @@ hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
|
||||
assert(edata_state_get(edata) == extent_state_active);
|
||||
assert(edata_arena_ind_get(edata) == shard->ind);
|
||||
assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
|
||||
assert(!edata_slab_get(edata));
|
||||
assert(edata_committed_get(edata));
|
||||
assert(edata_base_get(edata) != NULL);
|
||||
|
||||
@@ -865,6 +865,7 @@ hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
|
||||
static void
|
||||
hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
assert(!edata_guarded_get(edata));
|
||||
/* Just a dalloc_batch of size 1; this lets us share logic. */
|
||||
edata_list_active_t dalloc_list;
|
||||
edata_list_active_init(&dalloc_list);
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
#include "jemalloc/internal/extent_mmap.h"
|
||||
#include "jemalloc/internal/fxp.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/hook.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_types.h"
|
||||
#include "jemalloc/internal/log.h"
|
||||
@@ -1616,6 +1617,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
}
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
|
||||
CONF_HANDLE_SIZE_T(opt_san_guard_small,
|
||||
"san_guard_small", 0, SIZE_T_MAX,
|
||||
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
CONF_HANDLE_SIZE_T(opt_san_guard_large,
|
||||
"san_guard_large", 0, SIZE_T_MAX,
|
||||
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
|
||||
|
||||
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
|
||||
#undef CONF_ERROR
|
||||
#undef CONF_CONTINUE
|
||||
|
27
src/pa.c
27
src/pa.c
@@ -1,6 +1,7 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/hpa.h"
|
||||
|
||||
static void
|
||||
@@ -118,15 +119,17 @@ pa_get_pai(pa_shard_t *shard, edata_t *edata) {
|
||||
|
||||
edata_t *
|
||||
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
bool slab, szind_t szind, bool zero, bool *deferred_work_generated) {
|
||||
bool slab, szind_t szind, bool zero, bool guarded,
|
||||
bool *deferred_work_generated) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
assert(!guarded || alignment <= PAGE);
|
||||
|
||||
edata_t *edata = NULL;
|
||||
*deferred_work_generated = false;
|
||||
if (pa_shard_uses_hpa(shard)) {
|
||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
||||
zero, deferred_work_generated);
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
/*
|
||||
* Fall back to the PAC if the HPA is off or couldn't serve the given
|
||||
@@ -134,10 +137,10 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
*/
|
||||
if (edata == NULL) {
|
||||
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
||||
deferred_work_generated);
|
||||
guarded, deferred_work_generated);
|
||||
}
|
||||
|
||||
if (edata != NULL) {
|
||||
assert(edata_size_get(edata) == size);
|
||||
pa_nactive_add(shard, size >> LG_PAGE);
|
||||
emap_remap(tsdn, shard->emap, edata, szind, slab);
|
||||
edata_szind_set(edata, szind);
|
||||
@@ -145,8 +148,6 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
if (slab && (size > 2 * PAGE)) {
|
||||
emap_register_interior(tsdn, shard->emap, edata, szind);
|
||||
}
|
||||
}
|
||||
if (edata != NULL) {
|
||||
assert(edata_arena_ind_get(edata) == shard->ind);
|
||||
}
|
||||
return edata;
|
||||
@@ -158,7 +159,9 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
assert(new_size > old_size);
|
||||
assert(edata_size_get(edata) == old_size);
|
||||
assert((new_size & PAGE_MASK) == 0);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
return true;
|
||||
}
|
||||
size_t expand_amount = new_size - old_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
@@ -181,6 +184,9 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
assert(new_size < old_size);
|
||||
assert(edata_size_get(edata) == old_size);
|
||||
assert((new_size & PAGE_MASK) == 0);
|
||||
if (edata_guarded_get(edata)) {
|
||||
return true;
|
||||
}
|
||||
size_t shrink_amount = old_size - new_size;
|
||||
|
||||
pai_t *pai = pa_get_pai(shard, edata);
|
||||
@@ -202,7 +208,10 @@ pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
|
||||
emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
|
||||
if (edata_slab_get(edata)) {
|
||||
emap_deregister_interior(tsdn, shard->emap, edata);
|
||||
edata_slab_set(edata, false);
|
||||
/*
|
||||
* The slab state of the extent isn't cleared. It may be used
|
||||
* by the pai implementation, e.g. to make caching decisions.
|
||||
*/
|
||||
}
|
||||
edata_addr_set(edata, edata_base_get(edata));
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
|
86
src/pac.c
86
src/pac.c
@@ -2,9 +2,10 @@
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/pac.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
|
||||
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -109,28 +110,66 @@ pac_may_have_muzzy(pac_t *pac) {
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
size_t alignment, bool zero, bool guarded) {
|
||||
assert(!guarded || alignment <= PAGE);
|
||||
|
||||
*deferred_work_generated = false;
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
|
||||
NULL, size, alignment, zero);
|
||||
NULL, size, alignment, zero, guarded);
|
||||
|
||||
if (edata == NULL && pac_may_have_muzzy(pac)) {
|
||||
edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
|
||||
NULL, size, alignment, zero);
|
||||
NULL, size, alignment, zero, guarded);
|
||||
}
|
||||
if (edata == NULL) {
|
||||
edata = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, NULL, size, alignment, zero);
|
||||
&pac->ecache_retained, NULL, size, alignment, zero,
|
||||
guarded);
|
||||
if (config_stats && edata != NULL) {
|
||||
atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
|
||||
ATOMIC_RELAXED);
|
||||
}
|
||||
}
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
size_t alignment, bool zero) {
|
||||
assert(alignment <= PAGE);
|
||||
|
||||
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
|
||||
/* Alloc a non-guarded extent first.*/
|
||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
|
||||
/* alignment */ PAGE, zero, /* guarded */ false);
|
||||
if (edata != NULL) {
|
||||
/* Add guards around it. */
|
||||
assert(edata_size_get(edata) == size_with_guards);
|
||||
guard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
assert(edata == NULL || (edata_guarded_get(edata) &&
|
||||
edata_size_get(edata) == size));
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool *deferred_work_generated) {
|
||||
*deferred_work_generated = false;
|
||||
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
|
||||
zero, guarded);
|
||||
if (edata == NULL && guarded) {
|
||||
/* No cached guarded extents; creating a new one. */
|
||||
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
|
||||
alignment, zero);
|
||||
}
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
@@ -149,15 +188,15 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
return true;
|
||||
}
|
||||
edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
|
||||
edata, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero, /* guarded*/ false);
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
|
||||
edata, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero, /* guarded*/ false);
|
||||
}
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, edata, expand_amount, PAGE,
|
||||
zero);
|
||||
zero, /* guarded */ false);
|
||||
mapped_add = expand_amount;
|
||||
}
|
||||
if (trail == NULL) {
|
||||
@@ -203,6 +242,27 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
if (edata_guarded_get(edata)) {
|
||||
/*
|
||||
* Because cached guarded extents do exact fit only, large
|
||||
* guarded extents are restored on dalloc eagerly (otherwise
|
||||
* they will not be reused efficiently). Slab sizes have a
|
||||
* limited number of size classes, and tend to cycle faster.
|
||||
*
|
||||
* In the case where coalesce is restrained (VirtualFree on
|
||||
* Windows), guarded extents are also not cached -- otherwise
|
||||
* during arena destroy / reset, the retained extents would not
|
||||
* be whole regions (i.e. they are split between regular and
|
||||
* guarded).
|
||||
*/
|
||||
if (!edata_slab_get(edata) || !maps_coalesce) {
|
||||
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
|
||||
!maps_coalesce);
|
||||
unguard_pages(tsdn, ehooks, edata, pac->emap);
|
||||
}
|
||||
}
|
||||
|
||||
ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
|
||||
/* Purging of deallocated pages is deferred */
|
||||
*deferred_work_generated = true;
|
||||
|
47
src/pages.c
47
src/pages.c
@@ -316,14 +316,10 @@ pages_unmap(void *addr, size_t size) {
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
os_pages_commit(void *addr, size_t size, bool commit) {
|
||||
assert(PAGE_ADDR2BASE(addr) == addr);
|
||||
assert(PAGE_CEILING(size) == size);
|
||||
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
|
||||
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
|
||||
@@ -348,6 +344,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit) {
|
||||
if (os_overcommits) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return os_pages_commit(addr, size, commit);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, true);
|
||||
@@ -358,6 +363,38 @@ pages_decommit(void *addr, size_t size) {
|
||||
return pages_commit_impl(addr, size, false);
|
||||
}
|
||||
|
||||
void
|
||||
pages_mark_guards(void *head, void *tail) {
|
||||
assert(head != NULL && tail != NULL);
|
||||
assert((uintptr_t)head < (uintptr_t)tail);
|
||||
#ifdef JEMALLOC_HAVE_MPROTECT
|
||||
mprotect(head, PAGE, PROT_NONE);
|
||||
mprotect(tail, PAGE, PROT_NONE);
|
||||
#else
|
||||
/* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
|
||||
os_pages_commit(head, PAGE, false);
|
||||
os_pages_commit(tail, PAGE, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmark_guards(void *head, void *tail) {
|
||||
assert(head != NULL && tail != NULL);
|
||||
assert((uintptr_t)head < (uintptr_t)tail);
|
||||
#ifdef JEMALLOC_HAVE_MPROTECT
|
||||
size_t range = (uintptr_t)tail - (uintptr_t)head + PAGE;
|
||||
if (range <= SC_LARGE_MINCLASS) {
|
||||
mprotect(head, range, PROT_READ | PROT_WRITE);
|
||||
} else {
|
||||
mprotect(head, PAGE, PROT_READ | PROT_WRITE);
|
||||
mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
|
||||
}
|
||||
#else
|
||||
os_pages_commit(head, PAGE, true);
|
||||
os_pages_commit(tail, PAGE, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge_lazy(void *addr, size_t size) {
|
||||
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
|
||||
|
@@ -7,7 +7,7 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
for (size_t i = 0; i < nallocs; i++) {
|
||||
bool deferred_by_alloc = false;
|
||||
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
|
||||
/* zero */ false, &deferred_by_alloc);
|
||||
/* zero */ false, /* guarded */ false, &deferred_by_alloc);
|
||||
*deferred_work_generated |= deferred_by_alloc;
|
||||
if (edata == NULL) {
|
||||
return i;
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#include "jemalloc/internal/sec.h"
|
||||
|
||||
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -218,8 +218,9 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
|
||||
|
||||
static edata_t *
|
||||
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool *deferred_work_generated) {
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
|
||||
sec_t *sec = (sec_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
@@ -227,7 +228,7 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
||||
|| size > sec->opts.max_alloc) {
|
||||
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
|
||||
deferred_work_generated);
|
||||
/* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
pszind_t pszind = sz_psz2ind(size);
|
||||
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
||||
@@ -250,7 +251,7 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
size);
|
||||
} else {
|
||||
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
|
||||
zero, deferred_work_generated);
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
}
|
||||
}
|
||||
return edata;
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/guard.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
|
||||
@@ -242,6 +243,7 @@ tsd_data_init(tsd_t *tsd) {
|
||||
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
|
||||
tsd_prng_state_init(tsd);
|
||||
tsd_te_init(tsd); /* event_init may use the prng state above. */
|
||||
tsd_san_init(tsd);
|
||||
return tsd_tcache_enabled_data_init(tsd);
|
||||
}
|
||||
|
||||
@@ -269,6 +271,7 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
|
||||
*tsd_reentrancy_levelp_get(tsd) = 1;
|
||||
tsd_prng_state_init(tsd);
|
||||
tsd_te_init(tsd); /* event_init may use the prng state above. */
|
||||
tsd_san_init(tsd);
|
||||
assert_tsd_data_cleanup_done(tsd);
|
||||
|
||||
return false;
|
||||
|
Reference in New Issue
Block a user