San: Bump alloc frequently reused guarded allocations

To utilize a separate retained area for guarded extents, use bump alloc
to allocate those extents.
This commit is contained in:
Alex Lapenkou
2021-10-28 12:08:10 -07:00
committed by Alexander Lapenkov
parent f56f5b9930
commit 800ce49c19
10 changed files with 106 additions and 56 deletions

View File

@@ -328,8 +328,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
szind_t szind = sz_size2index(usize);
size_t esize = usize + sz_large_pad;
bool guarded = san_large_extent_decide_guard(tsdn, arena_get_ehooks(arena),
esize, alignment);
bool guarded = san_large_extent_decide_guard(tsdn,
arena_get_ehooks(arena), esize, alignment);
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
assert(deferred_work_generated == false);
@@ -829,7 +829,8 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
bool guarded = san_slab_extent_decide_guard(tsdn, arena_get_ehooks(arena));
bool guarded = san_slab_extent_decide_guard(tsdn,
arena_get_ehooks(arena));
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
/* alignment */ PAGE, /* slab */ true, /* szind */ binind,
/* zero */ false, guarded, &deferred_work_generated);

View File

@@ -44,6 +44,7 @@ emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
bool expanding) {
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE);
assert(!edata_guarded_get(edata));
assert(!expanding || forward);
assert(!edata_state_in_transition(expected_state));
assert(expected_state == extent_state_dirty ||

View File

@@ -87,6 +87,7 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
size, alignment, zero, &commit, false, guarded);
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
assert(edata == NULL || edata_guarded_get(edata) == guarded);
return edata;
}
@@ -179,7 +180,7 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
goto label_return;
}
eset_remove(eset, edata);
if (!ecache->delay_coalesce) {
if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
break;
}
/* Try to coalesce. */
@@ -399,11 +400,6 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
}
}
} else {
/*
* If split and merge are not allowed (Windows w/o retain), try
* exact fit only.
*/
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
/*
* A large extent might be broken up from its original size to
* some small size to satisfy a small request. When that small
@@ -415,7 +411,18 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
*/
unsigned lg_max_fit = ecache->delay_coalesce
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit);
/*
* If split and merge are not allowed (Windows w/o retain), try
* exact fit only.
*
* For simplicity purposes, splitting guarded extents is not
* supported. Hence, we do only exact fit for guarded
* allocations.
*/
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
edata = eset_fit(eset, size, alignment, exact_only,
lg_max_fit);
}
if (edata == NULL) {
return NULL;
@@ -474,6 +481,7 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Split the lead. */
if (leadsize != 0) {
assert(!edata_guarded_get(*edata));
*lead = *edata;
*edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
size + trailsize, /* holding_core_locks*/ true);
@@ -486,6 +494,7 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Split the trail. */
if (trailsize != 0) {
assert(!edata_guarded_get(*edata));
*trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
trailsize, /* holding_core_locks */ true);
if (*trail == NULL) {
@@ -510,6 +519,7 @@ static edata_t *
extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
edata_t *edata, bool growing_retained) {
assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
edata_t *lead;
@@ -576,8 +586,10 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(!guarded || expand_edata == NULL);
assert(!guarded || alignment <= PAGE);
malloc_mutex_lock(tsdn, &ecache->mtx);
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
expand_edata, size, alignment, guarded);
if (edata == NULL) {
@@ -746,7 +758,6 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size);
}
return edata;
label_err:
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
@@ -801,6 +812,7 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced) {
assert(!edata_guarded_get(edata));
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
@@ -907,7 +919,7 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
goto label_skip_coalesce;
}
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
NULL);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &pac->ecache_dirty);
@@ -1014,7 +1026,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Avoid calling the default extent_dalloc unless have to. */
if (!ehooks_dalloc_will_fail(ehooks)) {
/* Restore guard pages for dalloc / unmap. */
/* Remove guard pages for dalloc / unmap. */
if (edata_guarded_get(edata)) {
assert(ehooks_are_default(ehooks));
san_unguard_pages_two_sided(tsdn, ehooks, edata,

View File

@@ -81,6 +81,9 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
return true;
}
if (san_bump_alloc_init(&pac->sba)) {
return true;
}
pac->base = base;
pac->emap = emap;
@@ -132,18 +135,24 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
static edata_t *
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
size_t alignment, bool zero) {
size_t alignment, bool zero, bool frequent_reuse) {
assert(alignment <= PAGE);
size_t size_with_guards = size + SAN_PAGE_GUARDS_SIZE;
/* Alloc a non-guarded extent first.*/
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
/* alignment */ PAGE, zero, /* guarded */ false);
if (edata != NULL) {
/* Add guards around it. */
assert(edata_size_get(edata) == size_with_guards);
san_guard_pages(tsdn, ehooks, edata, pac->emap, true, true,
true);
edata_t *edata;
if (san_bump_enabled() && frequent_reuse) {
edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
zero);
} else {
size_t size_with_guards = san_two_side_guarded_sz(size);
/* Alloc a non-guarded extent first.*/
edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
/* alignment */ PAGE, zero, /* guarded */ false);
if (edata != NULL) {
/* Add guards around it. */
assert(edata_size_get(edata) == size_with_guards);
san_guard_pages_two_sided(tsdn, ehooks, edata,
pac->emap, true);
}
}
assert(edata == NULL || (edata_guarded_get(edata) &&
edata_size_get(edata) == size));
@@ -158,12 +167,21 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
zero, guarded);
edata_t *edata = NULL;
/*
* The condition is an optimization - not frequently reused guarded
* allocations are never put in the ecache. pac_alloc_real also
* doesn't grow retained for guarded allocations. So pac_alloc_real
* for such allocations would always return NULL.
* */
if (!guarded || frequent_reuse) {
edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
zero, guarded);
}
if (edata == NULL && guarded) {
/* No cached guarded extents; creating a new one. */
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
alignment, zero);
alignment, zero, frequent_reuse);
}
return edata;
@@ -189,8 +207,8 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
}
if (trail == NULL) {
trail = ecache_alloc_grow(tsdn, pac, ehooks,
&pac->ecache_retained, edata, expand_amount, PAGE,
zero, /* guarded */ false);
&pac->ecache_retained, edata, expand_amount, PAGE, zero,
/* guarded */ false);
mapped_add = expand_amount;
}
if (trail == NULL) {

View File

@@ -7,28 +7,14 @@
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/edata_cache.h"
const size_t SBA_RETAINED_ALLOC_SIZE = 1024 * 1024 * 4; /* 4 MB */
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size);
bool
san_bump_alloc_init(san_bump_alloc_t* sba) {
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
sba->curr_reg = NULL;
return false;
}
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
ehooks_t *ehooks, size_t size, bool zero) {
assert(maps_coalesce && opt_retain);
assert(san_bump_enabled());
edata_t* to_destroy;
size_t guarded_size = san_one_side_guarded_sz(size);