San: Implement bump alloc

The new allocator will be used to allocate guarded extents used as slabs
for guarded small allocations.
This commit is contained in:
Alex Lapenkou
2021-11-04 11:10:19 -07:00
committed by Alexander Lapenkov
parent 34b00f8969
commit 0f6da1257d
15 changed files with 521 additions and 107 deletions

View File

@@ -40,13 +40,9 @@ static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
bool zero, bool *commit, bool growing_retained, bool guarded);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced);
static void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
bool zero, bool *commit, bool guarded);
static edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit);
/******************************************************************************/
@@ -127,7 +123,8 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
void *new_addr = (expand_edata == NULL) ? NULL :
edata_past_get(expand_edata);
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
size, alignment, zero, &commit);
size, alignment, zero, &commit,
/* growing_retained */ false);
}
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
@@ -270,7 +267,7 @@ extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
}
static void
void
extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
@@ -785,35 +782,6 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
return edata;
}
static edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (edata == NULL) {
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
&zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, EXTENT_PAI_PAC,
opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
if (extent_register(tsdn, pac, edata)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
return edata;
}
static bool
extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *inner, edata_t *outer, bool forward) {
@@ -924,9 +892,9 @@ extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* Does the metadata management portions of putting an unused extent into the
* given ecache_t (coalesces and inserts into the eset).
*/
static void
extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata) {
void
extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
assert((ecache->state != extent_state_dirty &&
ecache->state != extent_state_muzzy) ||
!edata_zeroed_get(edata));
@@ -1001,6 +969,42 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
return err;
}
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
if (edata == NULL) {
return NULL;
}
size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
&zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
extent_state_active, zero, *commit, EXTENT_PAI_PAC,
opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
/*
* Retained memory is not counted towards gdump. Only if an extent is
* allocated as a separate mapping, i.e. growing_retained is false, then
* gdump should be updated.
*/
bool gdump_add = !growing_retained;
if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
edata_cache_put(tsdn, pac->edata_cache, edata);
return NULL;
}
return edata;
}
void
extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
@@ -1013,7 +1017,8 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* Restore guard pages for dalloc / unmap. */
if (edata_guarded_get(edata)) {
assert(ehooks_are_default(ehooks));
san_unguard_pages(tsdn, ehooks, edata, pac->emap);
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
}
/*
* Deregister first to avoid a race with other allocating
@@ -1057,12 +1062,14 @@ extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata_t *edata) {
assert(edata_base_get(edata) != NULL);
assert(edata_size_get(edata) != 0);
assert(edata_state_get(edata) == extent_state_retained);
extent_state_t state = edata_state_get(edata);
assert(state == extent_state_retained || state == extent_state_active);
assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (edata_guarded_get(edata)) {
assert(opt_retain);
san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
}
edata_addr_set(edata, edata_base_get(edata));
@@ -1087,9 +1094,9 @@ extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
bool
extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length) {
size_t offset, size_t length, bool growing_retained) {
return extent_commit_impl(tsdn, ehooks, edata, offset, length,
false);
growing_retained);
}
bool
@@ -1207,9 +1214,9 @@ label_error_a:
edata_t *
extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
size_t size_a, size_t size_b) {
size_t size_a, size_t size_b, bool holding_core_locks) {
return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
/* holding_core_locks */ false);
holding_core_locks);
}
static bool

View File

@@ -14,11 +14,6 @@ static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated);
static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
static ehooks_t *
pac_ehooks_get(pac_t *pac) {
return base_ehooks_get(pac->base);
}
static inline void
pac_decay_data_get(pac_t *pac, extent_state_t state,
decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
@@ -139,14 +134,15 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
size_t alignment, bool zero) {
assert(alignment <= PAGE);
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
size_t size_with_guards = size + SAN_PAGE_GUARDS_SIZE;
/* Alloc a non-guarded extent first.*/
edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
/* alignment */ PAGE, zero, /* guarded */ false);
if (edata != NULL) {
/* Add guards around it. */
assert(edata_size_get(edata) == size_with_guards);
san_guard_pages(tsdn, ehooks, edata, pac->emap);
san_guard_pages(tsdn, ehooks, edata, pac->emap, true, true,
true);
}
assert(edata == NULL || (edata_guarded_get(edata) &&
edata_size_get(edata) == size));
@@ -222,7 +218,7 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
}
edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata,
new_size, shrink_amount);
new_size, shrink_amount, /* holding_core_locks */ false);
if (trail == NULL) {
return true;
}
@@ -253,7 +249,8 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
if (!edata_slab_get(edata) || !maps_coalesce) {
assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
!maps_coalesce);
san_unguard_pages(tsdn, ehooks, edata, pac->emap);
san_unguard_pages_two_sided(tsdn, ehooks, edata,
pac->emap);
}
}

View File

@@ -365,33 +365,61 @@ pages_decommit(void *addr, size_t size) {
void
pages_mark_guards(void *head, void *tail) {
assert(head != NULL && tail != NULL);
assert((uintptr_t)head < (uintptr_t)tail);
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
mprotect(head, PAGE, PROT_NONE);
mprotect(tail, PAGE, PROT_NONE);
if (head != NULL) {
mprotect(head, PAGE, PROT_NONE);
}
if (tail != NULL) {
mprotect(tail, PAGE, PROT_NONE);
}
#else
/* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
os_pages_commit(head, PAGE, false);
os_pages_commit(tail, PAGE, false);
if (head != NULL) {
os_pages_commit(head, PAGE, false);
}
if (tail != NULL) {
os_pages_commit(tail, PAGE, false);
}
#endif
}
void
pages_unmark_guards(void *head, void *tail) {
assert(head != NULL && tail != NULL);
assert((uintptr_t)head < (uintptr_t)tail);
assert(head != NULL || tail != NULL);
assert(head == NULL || tail == NULL ||
(uintptr_t)head < (uintptr_t)tail);
#ifdef JEMALLOC_HAVE_MPROTECT
size_t range = (uintptr_t)tail - (uintptr_t)head + PAGE;
if (range <= SC_LARGE_MINCLASS) {
bool head_and_tail = (head != NULL) && (tail != NULL);
size_t range = head_and_tail ?
(uintptr_t)tail - (uintptr_t)head + PAGE :
SIZE_T_MAX;
/*
* The amount of work that the kernel does in mprotect depends on the
* range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
* to prevent kernel from doing too much work that would outweigh the
* savings of performing one less system call.
*/
bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
if (ranged_mprotect) {
mprotect(head, range, PROT_READ | PROT_WRITE);
} else {
mprotect(head, PAGE, PROT_READ | PROT_WRITE);
mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
if (head != NULL) {
mprotect(head, PAGE, PROT_READ | PROT_WRITE);
}
if (tail != NULL) {
mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
}
}
#else
os_pages_commit(head, PAGE, true);
os_pages_commit(tail, PAGE, true);
if (head != NULL) {
os_pages_commit(head, PAGE, true);
}
if (tail != NULL) {
os_pages_commit(tail, PAGE, true);
}
#endif
}

View File

@@ -10,16 +10,63 @@
size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
static inline void
san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
assert(!edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
if (left) {
*guard1 = *addr;
*addr += SAN_PAGE_GUARD;
} else {
*guard1 = 0;
}
if (right) {
*guard2 = *addr + size;
} else {
*guard2 = 0;
}
}
static inline void
san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
uintptr_t *addr, size_t size, bool left, bool right) {
assert(edata_guarded_get(edata));
assert(size % PAGE == 0);
*addr = (uintptr_t)edata_base_get(edata);
if (right) {
*guard2 = *addr + size;
} else {
*guard2 = 0;
}
if (left) {
*guard1 = *addr - SAN_PAGE_GUARD;
assert(*guard1 != 0);
*addr = *guard1;
} else {
*guard1 = 0;
}
}
void
san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
emap_deregister_boundary(tsdn, emap, edata);
san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
bool left, bool right, bool remap) {
assert(left || right);
if (remap) {
emap_deregister_boundary(tsdn, emap, edata);
}
size_t size_with_guards = edata_size_get(edata);
size_t usize = size_with_guards - PAGE_GUARDS_SIZE;
size_t usize = (left && right)
? san_two_side_unguarded_sz(size_with_guards)
: san_one_side_unguarded_sz(size_with_guards);
uintptr_t guard1 = (uintptr_t)edata_base_get(edata);
uintptr_t addr = guard1 + PAGE;
uintptr_t guard2 = addr + usize;
uintptr_t guard1, guard2, addr;
san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
right);
assert(edata_state_get(edata) == extent_state_active);
ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
@@ -29,14 +76,18 @@ san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) {
edata_addr_set(edata, (void *)addr);
edata_guarded_set(edata, true);
/* The new boundary will be registered on the pa_alloc path. */
if (remap) {
emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
/* slab */ false);
}
}
static void
san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap, bool reg_emap) {
emap_t *emap, bool left, bool right, bool remap) {
assert(left || right);
/* Remove the inner boundary which no longer exists. */
if (reg_emap) {
if (remap) {
assert(edata_state_get(edata) == extent_state_active);
emap_deregister_boundary(tsdn, emap, edata);
} else {
@@ -44,24 +95,26 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
}
size_t size = edata_size_get(edata);
size_t size_with_guards = size + PAGE_GUARDS_SIZE;
size_t size_with_guards = (left && right)
? san_two_side_guarded_sz(size)
: san_one_side_guarded_sz(size);
uintptr_t addr = (uintptr_t)edata_base_get(edata);
uintptr_t guard1 = addr - PAGE;
uintptr_t guard2 = addr + size;
uintptr_t guard1, guard2, addr;
san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
right);
ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
/* Update the true addr and usable size of the edata. */
edata_size_set(edata, size_with_guards);
edata_addr_set(edata, (void *)guard1);
edata_addr_set(edata, (void *)addr);
edata_guarded_set(edata, false);
/*
* Then re-register the outer boundary including the guards, if
* requested.
*/
if (reg_emap) {
if (remap) {
emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
/* slab */ false);
}
@@ -69,15 +122,23 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
void
san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* reg_emap */ true);
emap_t *emap, bool left, bool right) {
san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
/* remap */ true);
}
void
san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
emap_t *emap) {
emap_assert_not_mapped(tsdn, emap, edata);
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* reg_emap */ false);
/*
* We don't want to touch the emap of about to be destroyed extents, as
* they have been unmapped upon eviction from the retained ecache. Also,
* we unguard the extents to the right, because retained extents only
* own their right guard page per san_bump_alloc's logic.
*/
san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
/* right */ true, /* remap */ false);
}
void

127
src/san_bump.c Normal file
View File

@@ -0,0 +1,127 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/san_bump.h"
#include "jemalloc/internal/pac.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/edata_cache.h"
const size_t SBA_RETAINED_ALLOC_SIZE = 1024 * 1024 * 4; /* 4 MB */
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size);
bool
san_bump_alloc_init(san_bump_alloc_t* sba) {
bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
if (err) {
return true;
}
sba->curr_reg = NULL;
return false;
}
edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
ehooks_t *ehooks, size_t size, bool zero) {
assert(maps_coalesce && opt_retain);
edata_t* to_destroy;
size_t guarded_size = san_one_side_guarded_sz(size);
malloc_mutex_lock(tsdn, &sba->mtx);
if (sba->curr_reg == NULL ||
edata_size_get(sba->curr_reg) < guarded_size) {
/*
* If the current region can't accommodate the allocation,
* try replacing it with a larger one and destroy current if the
* replacement succeeds.
*/
to_destroy = sba->curr_reg;
bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
guarded_size);
if (err) {
goto label_err;
}
} else {
to_destroy = NULL;
}
assert(guarded_size <= edata_size_get(sba->curr_reg));
size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
edata_t* edata;
if (trail_size != 0) {
edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
ehooks, sba->curr_reg, guarded_size, trail_size,
/* holding_core_locks */ true);
if (curr_reg_trail == NULL) {
goto label_err;
}
edata = sba->curr_reg;
sba->curr_reg = curr_reg_trail;
} else {
edata = sba->curr_reg;
sba->curr_reg = NULL;
}
malloc_mutex_unlock(tsdn, &sba->mtx);
assert(!edata_guarded_get(edata));
assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg));
assert(to_destroy == NULL || !edata_guarded_get(to_destroy));
if (to_destroy != NULL) {
extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy);
}
san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false,
/* right */ true, /* remap */ true);
if (!edata_committed_get(edata)) {
if (extent_commit_wrapper(tsdn, ehooks, edata, 0,
edata_size_get(edata), true)) {
extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
edata);
return NULL;
}
edata_committed_set(edata, true);
}
if (zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata);
size_t size = edata_size_get(edata);
ehooks_zero(tsdn, ehooks, addr, size);
edata_zeroed_set(edata, true);
}
if (config_prof) {
extent_gdump_add(tsdn, edata);
}
return edata;
label_err:
malloc_mutex_unlock(tsdn, &sba->mtx);
return NULL;
}
static bool
san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
ehooks_t *ehooks, size_t size) {
malloc_mutex_assert_owner(tsdn, &sba->mtx);
bool committed = false, zeroed = false;
size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
SBA_RETAINED_ALLOC_SIZE;
assert((alloc_size & PAGE_MASK) == 0);
sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
alloc_size, PAGE, zeroed, &committed,
/* growing_retained */ true);
if (sba->curr_reg == NULL) {
return true;
}
return false;
}