San: Bump alloc frequently reused guarded allocations
To utilize a separate retained area for guarded extents, use bump alloc to allocate those extents.
This commit is contained in:
parent
f56f5b9930
commit
800ce49c19
@ -208,6 +208,7 @@ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
|
|||||||
assert(edata_committed_get(inner) == edata_committed_get(outer));
|
assert(edata_committed_get(inner) == edata_committed_get(outer));
|
||||||
assert(edata_state_get(inner) == extent_state_active);
|
assert(edata_state_get(inner) == extent_state_active);
|
||||||
assert(edata_state_get(outer) == extent_state_merging);
|
assert(edata_state_get(outer) == extent_state_merging);
|
||||||
|
assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
|
||||||
assert(edata_base_get(inner) == edata_past_get(outer) ||
|
assert(edata_base_get(inner) == edata_past_get(outer) ||
|
||||||
edata_base_get(outer) == edata_past_get(inner));
|
edata_base_get(outer) == edata_past_get(inner));
|
||||||
}
|
}
|
||||||
|
@ -127,6 +127,7 @@ extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -99,6 +99,9 @@ struct pac_s {
|
|||||||
exp_grow_t exp_grow;
|
exp_grow_t exp_grow;
|
||||||
malloc_mutex_t grow_mtx;
|
malloc_mutex_t grow_mtx;
|
||||||
|
|
||||||
|
/* Special allocator for guarded frequently reused extents. */
|
||||||
|
san_bump_alloc_t sba;
|
||||||
|
|
||||||
/* How large extents should be before getting auto-purged. */
|
/* How large extents should be before getting auto-purged. */
|
||||||
atomic_zu_t oversize_threshold;
|
atomic_zu_t oversize_threshold;
|
||||||
|
|
||||||
|
@ -5,7 +5,9 @@
|
|||||||
#include "jemalloc/internal/exp_grow.h"
|
#include "jemalloc/internal/exp_grow.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
|
||||||
extern const size_t SBA_RETAINED_ALLOC_SIZE;
|
#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
|
||||||
|
|
||||||
|
extern bool opt_retain;
|
||||||
|
|
||||||
typedef struct ehooks_s ehooks_t;
|
typedef struct ehooks_s ehooks_t;
|
||||||
typedef struct pac_s pac_t;
|
typedef struct pac_s pac_t;
|
||||||
@ -17,8 +19,31 @@ struct san_bump_alloc_s {
|
|||||||
edata_t *curr_reg;
|
edata_t *curr_reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool
|
static inline bool
|
||||||
san_bump_alloc_init(san_bump_alloc_t* sba);
|
san_bump_enabled() {
|
||||||
|
/*
|
||||||
|
* We enable san_bump allocator only when it's possible to break up a
|
||||||
|
* mapping and unmap a part of it (maps_coalesce). This is needed to
|
||||||
|
* ensure the arena destruction process can destroy all retained guarded
|
||||||
|
* extents one by one and to unmap a trailing part of a retained guarded
|
||||||
|
* region when it's too small to fit a pending allocation.
|
||||||
|
* opt_retain is required, because this allocator retains a large
|
||||||
|
* virtual memory mapping and returns smaller parts of it.
|
||||||
|
*/
|
||||||
|
return maps_coalesce && opt_retain;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
san_bump_alloc_init(san_bump_alloc_t* sba) {
|
||||||
|
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
|
||||||
|
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
|
||||||
|
if (err) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
sba->curr_reg = NULL;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
edata_t *
|
edata_t *
|
||||||
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
|
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
|
||||||
|
@ -328,8 +328,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
size_t esize = usize + sz_large_pad;
|
size_t esize = usize + sz_large_pad;
|
||||||
|
|
||||||
bool guarded = san_large_extent_decide_guard(tsdn, arena_get_ehooks(arena),
|
bool guarded = san_large_extent_decide_guard(tsdn,
|
||||||
esize, alignment);
|
arena_get_ehooks(arena), esize, alignment);
|
||||||
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
|
edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
|
||||||
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
|
/* slab */ false, szind, zero, guarded, &deferred_work_generated);
|
||||||
assert(deferred_work_generated == false);
|
assert(deferred_work_generated == false);
|
||||||
@ -829,7 +829,8 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
bool guarded = san_slab_extent_decide_guard(tsdn, arena_get_ehooks(arena));
|
bool guarded = san_slab_extent_decide_guard(tsdn,
|
||||||
|
arena_get_ehooks(arena));
|
||||||
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
||||||
/* alignment */ PAGE, /* slab */ true, /* szind */ binind,
|
/* alignment */ PAGE, /* slab */ true, /* szind */ binind,
|
||||||
/* zero */ false, guarded, &deferred_work_generated);
|
/* zero */ false, guarded, &deferred_work_generated);
|
||||||
|
@ -44,6 +44,7 @@ emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
|||||||
bool expanding) {
|
bool expanding) {
|
||||||
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE);
|
WITNESS_RANK_CORE);
|
||||||
|
assert(!edata_guarded_get(edata));
|
||||||
assert(!expanding || forward);
|
assert(!expanding || forward);
|
||||||
assert(!edata_state_in_transition(expected_state));
|
assert(!edata_state_in_transition(expected_state));
|
||||||
assert(expected_state == extent_state_dirty ||
|
assert(expected_state == extent_state_dirty ||
|
||||||
|
32
src/extent.c
32
src/extent.c
@ -87,6 +87,7 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
|
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
|
||||||
size, alignment, zero, &commit, false, guarded);
|
size, alignment, zero, &commit, false, guarded);
|
||||||
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
|
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||||
|
assert(edata == NULL || edata_guarded_get(edata) == guarded);
|
||||||
return edata;
|
return edata;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,7 +180,7 @@ ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
eset_remove(eset, edata);
|
eset_remove(eset, edata);
|
||||||
if (!ecache->delay_coalesce) {
|
if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* Try to coalesce. */
|
/* Try to coalesce. */
|
||||||
@ -399,11 +400,6 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
|
||||||
* If split and merge are not allowed (Windows w/o retain), try
|
|
||||||
* exact fit only.
|
|
||||||
*/
|
|
||||||
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
|
|
||||||
/*
|
/*
|
||||||
* A large extent might be broken up from its original size to
|
* A large extent might be broken up from its original size to
|
||||||
* some small size to satisfy a small request. When that small
|
* some small size to satisfy a small request. When that small
|
||||||
@ -415,7 +411,18 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
*/
|
*/
|
||||||
unsigned lg_max_fit = ecache->delay_coalesce
|
unsigned lg_max_fit = ecache->delay_coalesce
|
||||||
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
|
? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
|
||||||
edata = eset_fit(eset, size, alignment, exact_only, lg_max_fit);
|
|
||||||
|
/*
|
||||||
|
* If split and merge are not allowed (Windows w/o retain), try
|
||||||
|
* exact fit only.
|
||||||
|
*
|
||||||
|
* For simplicity purposes, splitting guarded extents is not
|
||||||
|
* supported. Hence, we do only exact fit for guarded
|
||||||
|
* allocations.
|
||||||
|
*/
|
||||||
|
bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
|
||||||
|
edata = eset_fit(eset, size, alignment, exact_only,
|
||||||
|
lg_max_fit);
|
||||||
}
|
}
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -474,6 +481,7 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
|
|
||||||
/* Split the lead. */
|
/* Split the lead. */
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
|
assert(!edata_guarded_get(*edata));
|
||||||
*lead = *edata;
|
*lead = *edata;
|
||||||
*edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
|
*edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
|
||||||
size + trailsize, /* holding_core_locks*/ true);
|
size + trailsize, /* holding_core_locks*/ true);
|
||||||
@ -486,6 +494,7 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
|
|
||||||
/* Split the trail. */
|
/* Split the trail. */
|
||||||
if (trailsize != 0) {
|
if (trailsize != 0) {
|
||||||
|
assert(!edata_guarded_get(*edata));
|
||||||
*trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
|
*trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
|
||||||
trailsize, /* holding_core_locks */ true);
|
trailsize, /* holding_core_locks */ true);
|
||||||
if (*trail == NULL) {
|
if (*trail == NULL) {
|
||||||
@ -510,6 +519,7 @@ static edata_t *
|
|||||||
extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||||
edata_t *edata, bool growing_retained) {
|
edata_t *edata, bool growing_retained) {
|
||||||
|
assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
|
||||||
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
|
malloc_mutex_assert_owner(tsdn, &ecache->mtx);
|
||||||
|
|
||||||
edata_t *lead;
|
edata_t *lead;
|
||||||
@ -576,8 +586,10 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||||
assert(!guarded || expand_edata == NULL);
|
assert(!guarded || expand_edata == NULL);
|
||||||
|
assert(!guarded || alignment <= PAGE);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||||
|
|
||||||
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
|
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
|
||||||
expand_edata, size, alignment, guarded);
|
expand_edata, size, alignment, guarded);
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
@ -746,7 +758,6 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
size_t size = edata_size_get(edata);
|
size_t size = edata_size_get(edata);
|
||||||
ehooks_zero(tsdn, ehooks, addr, size);
|
ehooks_zero(tsdn, ehooks, addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return edata;
|
return edata;
|
||||||
label_err:
|
label_err:
|
||||||
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
|
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
|
||||||
@ -801,6 +812,7 @@ extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
static edata_t *
|
static edata_t *
|
||||||
extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *edata, bool *coalesced) {
|
ecache_t *ecache, edata_t *edata, bool *coalesced) {
|
||||||
|
assert(!edata_guarded_get(edata));
|
||||||
/*
|
/*
|
||||||
* We avoid checking / locking inactive neighbors for large size
|
* We avoid checking / locking inactive neighbors for large size
|
||||||
* classes, since they are eagerly coalesced on deallocation which can
|
* classes, since they are eagerly coalesced on deallocation which can
|
||||||
@ -907,7 +919,7 @@ extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
goto label_skip_coalesce;
|
goto label_skip_coalesce;
|
||||||
}
|
}
|
||||||
if (!ecache->delay_coalesce) {
|
if (!ecache->delay_coalesce) {
|
||||||
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
|
edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
|
||||||
NULL);
|
NULL);
|
||||||
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
||||||
assert(ecache == &pac->ecache_dirty);
|
assert(ecache == &pac->ecache_dirty);
|
||||||
@ -1014,7 +1026,7 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
|||||||
|
|
||||||
/* Avoid calling the default extent_dalloc unless have to. */
|
/* Avoid calling the default extent_dalloc unless have to. */
|
||||||
if (!ehooks_dalloc_will_fail(ehooks)) {
|
if (!ehooks_dalloc_will_fail(ehooks)) {
|
||||||
/* Restore guard pages for dalloc / unmap. */
|
/* Remove guard pages for dalloc / unmap. */
|
||||||
if (edata_guarded_get(edata)) {
|
if (edata_guarded_get(edata)) {
|
||||||
assert(ehooks_are_default(ehooks));
|
assert(ehooks_are_default(ehooks));
|
||||||
san_unguard_pages_two_sided(tsdn, ehooks, edata,
|
san_unguard_pages_two_sided(tsdn, ehooks, edata,
|
||||||
|
48
src/pac.c
48
src/pac.c
@ -81,6 +81,9 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
|
|||||||
if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
|
if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (san_bump_alloc_init(&pac->sba)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
pac->base = base;
|
pac->base = base;
|
||||||
pac->emap = emap;
|
pac->emap = emap;
|
||||||
@ -132,18 +135,24 @@ pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||||
size_t alignment, bool zero) {
|
size_t alignment, bool zero, bool frequent_reuse) {
|
||||||
assert(alignment <= PAGE);
|
assert(alignment <= PAGE);
|
||||||
|
|
||||||
size_t size_with_guards = size + SAN_PAGE_GUARDS_SIZE;
|
edata_t *edata;
|
||||||
/* Alloc a non-guarded extent first.*/
|
if (san_bump_enabled() && frequent_reuse) {
|
||||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
|
edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
|
||||||
/* alignment */ PAGE, zero, /* guarded */ false);
|
zero);
|
||||||
if (edata != NULL) {
|
} else {
|
||||||
/* Add guards around it. */
|
size_t size_with_guards = san_two_side_guarded_sz(size);
|
||||||
assert(edata_size_get(edata) == size_with_guards);
|
/* Alloc a non-guarded extent first.*/
|
||||||
san_guard_pages(tsdn, ehooks, edata, pac->emap, true, true,
|
edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
|
||||||
true);
|
/* alignment */ PAGE, zero, /* guarded */ false);
|
||||||
|
if (edata != NULL) {
|
||||||
|
/* Add guards around it. */
|
||||||
|
assert(edata_size_get(edata) == size_with_guards);
|
||||||
|
san_guard_pages_two_sided(tsdn, ehooks, edata,
|
||||||
|
pac->emap, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assert(edata == NULL || (edata_guarded_get(edata) &&
|
assert(edata == NULL || (edata_guarded_get(edata) &&
|
||||||
edata_size_get(edata) == size));
|
edata_size_get(edata) == size));
|
||||||
@ -158,12 +167,21 @@ pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
|||||||
pac_t *pac = (pac_t *)self;
|
pac_t *pac = (pac_t *)self;
|
||||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||||
|
|
||||||
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
|
edata_t *edata = NULL;
|
||||||
zero, guarded);
|
/*
|
||||||
|
* The condition is an optimization - not frequently reused guarded
|
||||||
|
* allocations are never put in the ecache. pac_alloc_real also
|
||||||
|
* doesn't grow retained for guarded allocations. So pac_alloc_real
|
||||||
|
* for such allocations would always return NULL.
|
||||||
|
* */
|
||||||
|
if (!guarded || frequent_reuse) {
|
||||||
|
edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
|
||||||
|
zero, guarded);
|
||||||
|
}
|
||||||
if (edata == NULL && guarded) {
|
if (edata == NULL && guarded) {
|
||||||
/* No cached guarded extents; creating a new one. */
|
/* No cached guarded extents; creating a new one. */
|
||||||
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
|
edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
|
||||||
alignment, zero);
|
alignment, zero, frequent_reuse);
|
||||||
}
|
}
|
||||||
|
|
||||||
return edata;
|
return edata;
|
||||||
@ -189,8 +207,8 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
|||||||
}
|
}
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
trail = ecache_alloc_grow(tsdn, pac, ehooks,
|
trail = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||||
&pac->ecache_retained, edata, expand_amount, PAGE,
|
&pac->ecache_retained, edata, expand_amount, PAGE, zero,
|
||||||
zero, /* guarded */ false);
|
/* guarded */ false);
|
||||||
mapped_add = expand_amount;
|
mapped_add = expand_amount;
|
||||||
}
|
}
|
||||||
if (trail == NULL) {
|
if (trail == NULL) {
|
||||||
|
@ -7,28 +7,14 @@
|
|||||||
#include "jemalloc/internal/ehooks.h"
|
#include "jemalloc/internal/ehooks.h"
|
||||||
#include "jemalloc/internal/edata_cache.h"
|
#include "jemalloc/internal/edata_cache.h"
|
||||||
|
|
||||||
const size_t SBA_RETAINED_ALLOC_SIZE = 1024 * 1024 * 4; /* 4 MB */
|
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
|
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
|
||||||
ehooks_t *ehooks, size_t size);
|
ehooks_t *ehooks, size_t size);
|
||||||
|
|
||||||
bool
|
|
||||||
san_bump_alloc_init(san_bump_alloc_t* sba) {
|
|
||||||
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
|
|
||||||
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
|
|
||||||
if (err) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
sba->curr_reg = NULL;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
edata_t *
|
edata_t *
|
||||||
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
|
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
|
||||||
ehooks_t *ehooks, size_t size, bool zero) {
|
ehooks_t *ehooks, size_t size, bool zero) {
|
||||||
assert(maps_coalesce && opt_retain);
|
assert(san_bump_enabled());
|
||||||
|
|
||||||
edata_t* to_destroy;
|
edata_t* to_destroy;
|
||||||
size_t guarded_size = san_one_side_guarded_sz(size);
|
size_t guarded_size = san_one_side_guarded_sz(size);
|
||||||
|
@ -13,6 +13,11 @@ verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
|
|||||||
#define MAX_SMALL_ALLOCATIONS 4096
|
#define MAX_SMALL_ALLOCATIONS 4096
|
||||||
void *small_alloc[MAX_SMALL_ALLOCATIONS];
|
void *small_alloc[MAX_SMALL_ALLOCATIONS];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This test allocates page sized slabs and checks that every two slabs have
|
||||||
|
* at least one page in between them. That page is supposed to be the guard
|
||||||
|
* page.
|
||||||
|
*/
|
||||||
TEST_BEGIN(test_guarded_small) {
|
TEST_BEGIN(test_guarded_small) {
|
||||||
test_skip_if(opt_prof);
|
test_skip_if(opt_prof);
|
||||||
|
|
||||||
@ -21,7 +26,8 @@ TEST_BEGIN(test_guarded_small) {
|
|||||||
VARIABLE_ARRAY(uintptr_t, pages, npages);
|
VARIABLE_ARRAY(uintptr_t, pages, npages);
|
||||||
|
|
||||||
/* Allocate to get sanitized pointers. */
|
/* Allocate to get sanitized pointers. */
|
||||||
size_t sz = PAGE / 8;
|
size_t slab_sz = PAGE;
|
||||||
|
size_t sz = slab_sz / 8;
|
||||||
unsigned n_alloc = 0;
|
unsigned n_alloc = 0;
|
||||||
while (n_alloc < MAX_SMALL_ALLOCATIONS) {
|
while (n_alloc < MAX_SMALL_ALLOCATIONS) {
|
||||||
void *ptr = malloc(sz);
|
void *ptr = malloc(sz);
|
||||||
@ -50,8 +56,9 @@ TEST_BEGIN(test_guarded_small) {
|
|||||||
for (unsigned j = i + 1; j < npages; j++) {
|
for (unsigned j = i + 1; j < npages; j++) {
|
||||||
uintptr_t ptr_diff = pages[i] > pages[j] ?
|
uintptr_t ptr_diff = pages[i] > pages[j] ?
|
||||||
pages[i] - pages[j] : pages[j] - pages[i];
|
pages[i] - pages[j] : pages[j] - pages[i];
|
||||||
expect_zu_gt((size_t)ptr_diff, 2 * PAGE,
|
expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
|
||||||
"Pages should not be next to each other.");
|
"There should be at least one pages between "
|
||||||
|
"guarded slabs");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,20 +83,15 @@ TEST_BEGIN(test_guarded_large) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Verify the pages are not continuous, i.e. separated by guards. */
|
/* Verify the pages are not continuous, i.e. separated by guards. */
|
||||||
uintptr_t min_diff = (uintptr_t)-1;
|
|
||||||
for (unsigned i = 0; i < nlarge; i++) {
|
for (unsigned i = 0; i < nlarge; i++) {
|
||||||
for (unsigned j = i + 1; j < nlarge; j++) {
|
for (unsigned j = i + 1; j < nlarge; j++) {
|
||||||
uintptr_t ptr_diff = large[i] > large[j] ?
|
uintptr_t ptr_diff = large[i] > large[j] ?
|
||||||
large[i] - large[j] : large[j] - large[i];
|
large[i] - large[j] : large[j] - large[i];
|
||||||
expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
|
expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
|
||||||
"Pages should not be next to each other.");
|
"There should be at least two pages between "
|
||||||
if (ptr_diff < min_diff) {
|
" guarded large allocations");
|
||||||
min_diff = ptr_diff;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expect_zu_ge((size_t)min_diff, large_sz + 2 * PAGE,
|
|
||||||
"Pages should not be next to each other.");
|
|
||||||
|
|
||||||
for (unsigned i = 0; i < nlarge; i++) {
|
for (unsigned i = 0; i < nlarge; i++) {
|
||||||
free((void *)large[i]);
|
free((void *)large[i]);
|
||||||
|
Loading…
Reference in New Issue
Block a user