diff --git a/Makefile.in b/Makefile.in index 8f96a992..50c586c5 100644 --- a/Makefile.in +++ b/Makefile.in @@ -120,6 +120,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/extent_mmap.c \ $(srcroot)src/fxp.c \ $(srcroot)src/san.c \ + $(srcroot)src/san_bump.c \ $(srcroot)src/hook.c \ $(srcroot)src/hpa.c \ $(srcroot)src/hpa_hooks.c \ @@ -220,6 +221,7 @@ TESTS_UNIT := \ $(srcroot)test/unit/fork.c \ ${srcroot}test/unit/fxp.c \ ${srcroot}test/unit/san.c \ + ${srcroot}test/unit/san_bump.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/hook.c \ $(srcroot)test/unit/hpa.c \ diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index 73c55633..73059ad2 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -30,14 +30,20 @@ void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, size_t npages_min); +void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata); +void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata); void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); +edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, + bool growing_retained); void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata); bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - size_t offset, size_t length); + size_t offset, size_t length, bool growing_retained); bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length); bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, @@ -45,7 +51,8 @@ bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, size_t offset, size_t length); edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, - ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b); + ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b, + bool holding_core_locks); bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a, edata_t *b); size_t extent_sn_next(pac_t *pac); diff --git a/include/jemalloc/internal/pac.h b/include/jemalloc/internal/pac.h index 5eee3de8..7eaaf894 100644 --- a/include/jemalloc/internal/pac.h +++ b/include/jemalloc/internal/pac.h @@ -3,6 +3,7 @@ #include "jemalloc/internal/exp_grow.h" #include "jemalloc/internal/pai.h" +#include "san_bump.h" /* @@ -127,6 +128,11 @@ pac_mapped(pac_t *pac) { return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED); } +static inline ehooks_t * +pac_ehooks_get(pac_t *pac) { + return base_ehooks_get(pac->base); +} + /* * All purging functions require holding decay->mtx. This is one of the few * places external modules are allowed to peek inside pa_shard_t internals. diff --git a/include/jemalloc/internal/san.h b/include/jemalloc/internal/san.h index b3d0304c..70debf3a 100644 --- a/include/jemalloc/internal/san.h +++ b/include/jemalloc/internal/san.h @@ -4,7 +4,8 @@ #include "jemalloc/internal/ehooks.h" #include "jemalloc/internal/emap.h" -#define PAGE_GUARDS_SIZE (2 * PAGE) +#define SAN_PAGE_GUARD PAGE +#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2) #define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0 #define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0 @@ -14,9 +15,9 @@ extern size_t opt_san_guard_large; extern size_t opt_san_guard_small; void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap); + emap_t *emap, bool left, bool right, bool remap); void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap); + emap_t *emap, bool left, bool right); /* * Unguard the extent, but don't modify emap boundaries. Must be called on an * extent that has been erased from emap and shouldn't be placed back. @@ -25,6 +26,45 @@ void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap); void tsd_san_init(tsd_t *tsd); +static inline void +san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap, bool remap) { + return san_guard_pages(tsdn, ehooks, edata, emap, true, true, + remap); +} + +static inline void +san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, + emap_t *emap) { + return san_unguard_pages(tsdn, ehooks, edata, emap, true, true); +} + +static inline size_t +san_two_side_unguarded_sz(size_t size) { + assert(size % PAGE == 0); + assert(size >= SAN_PAGE_GUARDS_SIZE); + return size - SAN_PAGE_GUARDS_SIZE; +} + +static inline size_t +san_two_side_guarded_sz(size_t size) { + assert(size % PAGE == 0); + return size + SAN_PAGE_GUARDS_SIZE; +} + +static inline size_t +san_one_side_unguarded_sz(size_t size) { + assert(size % PAGE == 0); + assert(size >= SAN_PAGE_GUARD); + return size - SAN_PAGE_GUARD; +} + +static inline size_t +san_one_side_guarded_sz(size_t size) { + assert(size % PAGE == 0); + return size + SAN_PAGE_GUARD; +} + static inline bool san_enabled(void) { return (opt_san_guard_large != 0 || opt_san_guard_small != 0); @@ -50,7 +90,7 @@ san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size, } if (n == 1 && (alignment <= PAGE) && - (size + PAGE_GUARDS_SIZE <= SC_LARGE_MAXCLASS)) { + (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) { *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large; return true; diff --git a/include/jemalloc/internal/san_bump.h b/include/jemalloc/internal/san_bump.h new file mode 100644 index 00000000..9c6c224f --- /dev/null +++ b/include/jemalloc/internal/san_bump.h @@ -0,0 +1,27 @@ +#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H +#define JEMALLOC_INTERNAL_SAN_BUMP_H + +#include "jemalloc/internal/edata.h" +#include "jemalloc/internal/exp_grow.h" +#include "jemalloc/internal/mutex.h" + +extern const size_t SBA_RETAINED_ALLOC_SIZE; + +typedef struct ehooks_s ehooks_t; +typedef struct pac_s pac_t; + +typedef struct san_bump_alloc_s san_bump_alloc_t; +struct san_bump_alloc_s { + malloc_mutex_t mtx; + + edata_t *curr_reg; +}; + +bool +san_bump_alloc_init(san_bump_alloc_t* sba); + +edata_t * +san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks, + size_t size, bool zero); + +#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */ diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index c12a705c..e81b9a00 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -48,6 +48,7 @@ enum witness_rank_e { WITNESS_RANK_EXTENT_GROW, WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW, + WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW, WITNESS_RANK_EXTENTS, WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS, diff --git a/src/extent.c b/src/extent.c index 7112d3a8..13d688d1 100644 --- a/src/extent.c +++ b/src/extent.c @@ -40,13 +40,9 @@ static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, bool zero, bool *commit, bool growing_retained, bool guarded); static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced); -static void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - ecache_t *ecache, edata_t *edata); static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment, bool zero, bool *commit, bool guarded); -static edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - void *new_addr, size_t size, size_t alignment, bool zero, bool *commit); /******************************************************************************/ @@ -127,7 +123,8 @@ ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, void *new_addr = (expand_edata == NULL) ? NULL : edata_past_get(expand_edata); edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr, - size, alignment, zero, &commit); + size, alignment, zero, &commit, + /* growing_retained */ false); } assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC); @@ -270,7 +267,7 @@ extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset, emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active); } -static void +void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) { cassert(config_prof); /* prof_gdump() requirement. */ @@ -785,35 +782,6 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return edata; } -static edata_t * -extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - void *new_addr, size_t size, size_t alignment, bool zero, bool *commit) { - witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), - WITNESS_RANK_CORE, 0); - - edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); - if (edata == NULL) { - return NULL; - } - size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); - void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment, - &zero, commit); - if (addr == NULL) { - edata_cache_put(tsdn, pac->edata_cache, edata); - return NULL; - } - edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, - size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), - extent_state_active, zero, *commit, EXTENT_PAI_PAC, - opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD); - if (extent_register(tsdn, pac, edata)) { - edata_cache_put(tsdn, pac->edata_cache, edata); - return NULL; - } - - return edata; -} - static bool extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, edata_t *inner, edata_t *outer, bool forward) { @@ -924,9 +892,9 @@ extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, * Does the metadata management portions of putting an unused extent into the * given ecache_t (coalesces and inserts into the eset). */ -static void -extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, - ecache_t *ecache, edata_t *edata) { +void +extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, + edata_t *edata) { assert((ecache->state != extent_state_dirty && ecache->state != extent_state_muzzy) || !edata_zeroed_get(edata)); @@ -1001,6 +969,42 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return err; } +edata_t * +extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, + void *new_addr, size_t size, size_t alignment, bool zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + edata_t *edata = edata_cache_get(tsdn, pac->edata_cache); + if (edata == NULL) { + return NULL; + } + size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); + void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment, + &zero, commit); + if (addr == NULL) { + edata_cache_put(tsdn, pac->edata_cache, edata); + return NULL; + } + edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr, + size, /* slab */ false, SC_NSIZES, extent_sn_next(pac), + extent_state_active, zero, *commit, EXTENT_PAI_PAC, + opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD); + /* + * Retained memory is not counted towards gdump. Only if an extent is + * allocated as a separate mapping, i.e. growing_retained is false, then + * gdump should be updated. + */ + bool gdump_add = !growing_retained; + if (extent_register_impl(tsdn, pac, edata, gdump_add)) { + edata_cache_put(tsdn, pac->edata_cache, edata); + return NULL; + } + + return edata; +} + void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { @@ -1013,7 +1017,8 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, /* Restore guard pages for dalloc / unmap. */ if (edata_guarded_get(edata)) { assert(ehooks_are_default(ehooks)); - san_unguard_pages(tsdn, ehooks, edata, pac->emap); + san_unguard_pages_two_sided(tsdn, ehooks, edata, + pac->emap); } /* * Deregister first to avoid a race with other allocating @@ -1057,12 +1062,14 @@ extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata) { assert(edata_base_get(edata) != NULL); assert(edata_size_get(edata) != 0); - assert(edata_state_get(edata) == extent_state_retained); + extent_state_t state = edata_state_get(edata); + assert(state == extent_state_retained || state == extent_state_active); assert(emap_edata_is_acquired(tsdn, pac->emap, edata)); witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); if (edata_guarded_get(edata)) { + assert(opt_retain); san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap); } edata_addr_set(edata, edata_base_get(edata)); @@ -1087,9 +1094,9 @@ extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - size_t offset, size_t length) { + size_t offset, size_t length, bool growing_retained) { return extent_commit_impl(tsdn, ehooks, edata, offset, length, - false); + growing_retained); } bool @@ -1207,9 +1214,9 @@ label_error_a: edata_t * extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata, - size_t size_a, size_t size_b) { + size_t size_a, size_t size_b, bool holding_core_locks) { return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b, - /* holding_core_locks */ false); + holding_core_locks); } static bool diff --git a/src/pac.c b/src/pac.c index e53de80f..914cec90 100644 --- a/src/pac.c +++ b/src/pac.c @@ -14,11 +14,6 @@ static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, bool *deferred_work_generated); static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); -static ehooks_t * -pac_ehooks_get(pac_t *pac) { - return base_ehooks_get(pac->base); -} - static inline void pac_decay_data_get(pac_t *pac, extent_state_t state, decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) { @@ -139,14 +134,15 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, size_t alignment, bool zero) { assert(alignment <= PAGE); - size_t size_with_guards = size + PAGE_GUARDS_SIZE; + size_t size_with_guards = size + SAN_PAGE_GUARDS_SIZE; /* Alloc a non-guarded extent first.*/ edata_t *edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards, /* alignment */ PAGE, zero, /* guarded */ false); if (edata != NULL) { /* Add guards around it. */ assert(edata_size_get(edata) == size_with_guards); - san_guard_pages(tsdn, ehooks, edata, pac->emap); + san_guard_pages(tsdn, ehooks, edata, pac->emap, true, true, + true); } assert(edata == NULL || (edata_guarded_get(edata) && edata_size_get(edata) == size)); @@ -222,7 +218,7 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, } edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata, - new_size, shrink_amount); + new_size, shrink_amount, /* holding_core_locks */ false); if (trail == NULL) { return true; } @@ -253,7 +249,8 @@ pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, if (!edata_slab_get(edata) || !maps_coalesce) { assert(edata_size_get(edata) >= SC_LARGE_MINCLASS || !maps_coalesce); - san_unguard_pages(tsdn, ehooks, edata, pac->emap); + san_unguard_pages_two_sided(tsdn, ehooks, edata, + pac->emap); } } diff --git a/src/pages.c b/src/pages.c index a8d9988b..8c83a7de 100644 --- a/src/pages.c +++ b/src/pages.c @@ -365,33 +365,61 @@ pages_decommit(void *addr, size_t size) { void pages_mark_guards(void *head, void *tail) { - assert(head != NULL && tail != NULL); - assert((uintptr_t)head < (uintptr_t)tail); + assert(head != NULL || tail != NULL); + assert(head == NULL || tail == NULL || + (uintptr_t)head < (uintptr_t)tail); #ifdef JEMALLOC_HAVE_MPROTECT - mprotect(head, PAGE, PROT_NONE); - mprotect(tail, PAGE, PROT_NONE); + if (head != NULL) { + mprotect(head, PAGE, PROT_NONE); + } + if (tail != NULL) { + mprotect(tail, PAGE, PROT_NONE); + } #else /* Decommit sets to PROT_NONE / MEM_DECOMMIT. */ - os_pages_commit(head, PAGE, false); - os_pages_commit(tail, PAGE, false); + if (head != NULL) { + os_pages_commit(head, PAGE, false); + } + if (tail != NULL) { + os_pages_commit(tail, PAGE, false); + } #endif } void pages_unmark_guards(void *head, void *tail) { - assert(head != NULL && tail != NULL); - assert((uintptr_t)head < (uintptr_t)tail); + assert(head != NULL || tail != NULL); + assert(head == NULL || tail == NULL || + (uintptr_t)head < (uintptr_t)tail); #ifdef JEMALLOC_HAVE_MPROTECT - size_t range = (uintptr_t)tail - (uintptr_t)head + PAGE; - if (range <= SC_LARGE_MINCLASS) { + bool head_and_tail = (head != NULL) && (tail != NULL); + size_t range = head_and_tail ? + (uintptr_t)tail - (uintptr_t)head + PAGE : + SIZE_T_MAX; + /* + * The amount of work that the kernel does in mprotect depends on the + * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen + * to prevent kernel from doing too much work that would outweigh the + * savings of performing one less system call. + */ + bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS; + if (ranged_mprotect) { mprotect(head, range, PROT_READ | PROT_WRITE); } else { - mprotect(head, PAGE, PROT_READ | PROT_WRITE); - mprotect(tail, PAGE, PROT_READ | PROT_WRITE); + if (head != NULL) { + mprotect(head, PAGE, PROT_READ | PROT_WRITE); + } + if (tail != NULL) { + mprotect(tail, PAGE, PROT_READ | PROT_WRITE); + } } #else - os_pages_commit(head, PAGE, true); - os_pages_commit(tail, PAGE, true); + if (head != NULL) { + os_pages_commit(head, PAGE, true); + } + if (tail != NULL) { + os_pages_commit(tail, PAGE, true); + } #endif } diff --git a/src/san.c b/src/san.c index 139ec5a3..15fdb7ff 100644 --- a/src/san.c +++ b/src/san.c @@ -10,16 +10,63 @@ size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT; size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT; +static inline void +san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, + uintptr_t *addr, size_t size, bool left, bool right) { + assert(!edata_guarded_get(edata)); + assert(size % PAGE == 0); + *addr = (uintptr_t)edata_base_get(edata); + if (left) { + *guard1 = *addr; + *addr += SAN_PAGE_GUARD; + } else { + *guard1 = 0; + } + + if (right) { + *guard2 = *addr + size; + } else { + *guard2 = 0; + } +} + +static inline void +san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2, + uintptr_t *addr, size_t size, bool left, bool right) { + assert(edata_guarded_get(edata)); + assert(size % PAGE == 0); + *addr = (uintptr_t)edata_base_get(edata); + if (right) { + *guard2 = *addr + size; + } else { + *guard2 = 0; + } + + if (left) { + *guard1 = *addr - SAN_PAGE_GUARD; + assert(*guard1 != 0); + *addr = *guard1; + } else { + *guard1 = 0; + } +} + void -san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) { - emap_deregister_boundary(tsdn, emap, edata); +san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap, + bool left, bool right, bool remap) { + assert(left || right); + if (remap) { + emap_deregister_boundary(tsdn, emap, edata); + } size_t size_with_guards = edata_size_get(edata); - size_t usize = size_with_guards - PAGE_GUARDS_SIZE; + size_t usize = (left && right) + ? san_two_side_unguarded_sz(size_with_guards) + : san_one_side_unguarded_sz(size_with_guards); - uintptr_t guard1 = (uintptr_t)edata_base_get(edata); - uintptr_t addr = guard1 + PAGE; - uintptr_t guard2 = addr + usize; + uintptr_t guard1, guard2, addr; + san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left, + right); assert(edata_state_get(edata) == extent_state_active); ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2); @@ -29,14 +76,18 @@ san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) { edata_addr_set(edata, (void *)addr); edata_guarded_set(edata, true); - /* The new boundary will be registered on the pa_alloc path. */ + if (remap) { + emap_register_boundary(tsdn, emap, edata, SC_NSIZES, + /* slab */ false); + } } static void san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap, bool reg_emap) { + emap_t *emap, bool left, bool right, bool remap) { + assert(left || right); /* Remove the inner boundary which no longer exists. */ - if (reg_emap) { + if (remap) { assert(edata_state_get(edata) == extent_state_active); emap_deregister_boundary(tsdn, emap, edata); } else { @@ -44,24 +95,26 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, } size_t size = edata_size_get(edata); - size_t size_with_guards = size + PAGE_GUARDS_SIZE; + size_t size_with_guards = (left && right) + ? san_two_side_guarded_sz(size) + : san_one_side_guarded_sz(size); - uintptr_t addr = (uintptr_t)edata_base_get(edata); - uintptr_t guard1 = addr - PAGE; - uintptr_t guard2 = addr + size; + uintptr_t guard1, guard2, addr; + san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left, + right); ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2); /* Update the true addr and usable size of the edata. */ edata_size_set(edata, size_with_guards); - edata_addr_set(edata, (void *)guard1); + edata_addr_set(edata, (void *)addr); edata_guarded_set(edata, false); /* * Then re-register the outer boundary including the guards, if * requested. */ - if (reg_emap) { + if (remap) { emap_register_boundary(tsdn, emap, edata, SC_NSIZES, /* slab */ false); } @@ -69,15 +122,23 @@ san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, - emap_t *emap) { - san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* reg_emap */ true); + emap_t *emap, bool left, bool right) { + san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right, + /* remap */ true); } void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap) { emap_assert_not_mapped(tsdn, emap, edata); - san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* reg_emap */ false); + /* + * We don't want to touch the emap of about to be destroyed extents, as + * they have been unmapped upon eviction from the retained ecache. Also, + * we unguard the extents to the right, because retained extents only + * own their right guard page per san_bump_alloc's logic. + */ + san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false, + /* right */ true, /* remap */ false); } void diff --git a/src/san_bump.c b/src/san_bump.c new file mode 100644 index 00000000..6098bd95 --- /dev/null +++ b/src/san_bump.c @@ -0,0 +1,127 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/san_bump.h" +#include "jemalloc/internal/pac.h" +#include "jemalloc/internal/san.h" +#include "jemalloc/internal/ehooks.h" +#include "jemalloc/internal/edata_cache.h" + +const size_t SBA_RETAINED_ALLOC_SIZE = 1024 * 1024 * 4; /* 4 MB */ + +static bool +san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, + ehooks_t *ehooks, size_t size); + +bool +san_bump_alloc_init(san_bump_alloc_t* sba) { + bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator", + WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive); + if (err) { + return true; + } + sba->curr_reg = NULL; + + return false; +} + +edata_t * +san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, + ehooks_t *ehooks, size_t size, bool zero) { + assert(maps_coalesce && opt_retain); + + edata_t* to_destroy; + size_t guarded_size = san_one_side_guarded_sz(size); + + malloc_mutex_lock(tsdn, &sba->mtx); + + if (sba->curr_reg == NULL || + edata_size_get(sba->curr_reg) < guarded_size) { + /* + * If the current region can't accommodate the allocation, + * try replacing it with a larger one and destroy current if the + * replacement succeeds. + */ + to_destroy = sba->curr_reg; + bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks, + guarded_size); + if (err) { + goto label_err; + } + } else { + to_destroy = NULL; + } + assert(guarded_size <= edata_size_get(sba->curr_reg)); + size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size; + + edata_t* edata; + if (trail_size != 0) { + edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac, + ehooks, sba->curr_reg, guarded_size, trail_size, + /* holding_core_locks */ true); + if (curr_reg_trail == NULL) { + goto label_err; + } + edata = sba->curr_reg; + sba->curr_reg = curr_reg_trail; + } else { + edata = sba->curr_reg; + sba->curr_reg = NULL; + } + + malloc_mutex_unlock(tsdn, &sba->mtx); + + assert(!edata_guarded_get(edata)); + assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg)); + assert(to_destroy == NULL || !edata_guarded_get(to_destroy)); + + if (to_destroy != NULL) { + extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy); + } + + san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false, + /* right */ true, /* remap */ true); + + if (!edata_committed_get(edata)) { + if (extent_commit_wrapper(tsdn, ehooks, edata, 0, + edata_size_get(edata), true)) { + extent_record(tsdn, pac, ehooks, &pac->ecache_retained, + edata); + return NULL; + } + edata_committed_set(edata, true); + } + if (zero && !edata_zeroed_get(edata)) { + void *addr = edata_base_get(edata); + size_t size = edata_size_get(edata); + ehooks_zero(tsdn, ehooks, addr, size); + edata_zeroed_set(edata, true); + } + + if (config_prof) { + extent_gdump_add(tsdn, edata); + } + + return edata; +label_err: + malloc_mutex_unlock(tsdn, &sba->mtx); + return NULL; +} + +static bool +san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac, + ehooks_t *ehooks, size_t size) { + malloc_mutex_assert_owner(tsdn, &sba->mtx); + + bool committed = false, zeroed = false; + size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size : + SBA_RETAINED_ALLOC_SIZE; + assert((alloc_size & PAGE_MASK) == 0); + sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL, + alloc_size, PAGE, zeroed, &committed, + /* growing_retained */ true); + if (sba->curr_reg == NULL) { + return true; + } + return false; +} diff --git a/test/include/test/arena_decay.h b/test/include/test/arena_decay.h index da659212..524ee218 100644 --- a/test/include/test/arena_decay.h +++ b/test/include/test/arena_decay.h @@ -1,4 +1,4 @@ -static unsigned +static inline unsigned do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { unsigned arena_ind; size_t sz = sizeof(unsigned); @@ -24,7 +24,7 @@ do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { return arena_ind; } -static void +static inline void do_arena_destroy(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); @@ -35,14 +35,14 @@ do_arena_destroy(unsigned arena_ind) { "Unexpected mallctlbymib() failure"); } -static void +static inline void do_epoch(void) { uint64_t epoch = 1; expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); } -static void +static inline void do_purge(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); @@ -53,7 +53,7 @@ do_purge(unsigned arena_ind) { "Unexpected mallctlbymib() failure"); } -static void +static inline void do_decay(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); @@ -64,7 +64,7 @@ do_decay(unsigned arena_ind) { "Unexpected mallctlbymib() failure"); } -static uint64_t +static inline uint64_t get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); @@ -78,32 +78,32 @@ get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { return npurge; } -static uint64_t +static inline uint64_t get_arena_dirty_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); } -static uint64_t +static inline uint64_t get_arena_dirty_purged(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind); } -static uint64_t +static inline uint64_t get_arena_muzzy_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } -static uint64_t +static inline uint64_t get_arena_npurge(unsigned arena_ind) { do_epoch(); return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); } -static size_t +static inline size_t get_arena_pdirty(unsigned arena_ind) { do_epoch(); size_t mib[4]; @@ -118,7 +118,7 @@ get_arena_pdirty(unsigned arena_ind) { return pdirty; } -static size_t +static inline size_t get_arena_pmuzzy(unsigned arena_ind) { do_epoch(); size_t mib[4]; @@ -133,14 +133,14 @@ get_arena_pmuzzy(unsigned arena_ind) { return pmuzzy; } -static void * +static inline void * do_mallocx(size_t size, int flags) { void *p = mallocx(size, flags); expect_ptr_not_null(p, "Unexpected mallocx() failure"); return p; } -static void +static inline void generate_dirty(unsigned arena_ind, size_t size) { int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; void *p = do_mallocx(size, flags); diff --git a/test/unit/retained.c b/test/unit/retained.c index 76bda50f..53c90f24 100644 --- a/test/unit/retained.c +++ b/test/unit/retained.c @@ -104,7 +104,7 @@ TEST_BEGIN(test_retained) { arena_ind = do_arena_create(NULL); sz = nallocx(HUGEPAGE, 0); - size_t guard_sz = san_enabled() ? PAGE_GUARDS_SIZE : 0; + size_t guard_sz = san_enabled() ? SAN_PAGE_GUARDS_SIZE : 0; esz = sz + sz_large_pad + guard_sz; atomic_store_u(&epoch, 0, ATOMIC_RELAXED); diff --git a/test/unit/san.c b/test/unit/san.c index 93e292f6..eb9ff517 100644 --- a/test/unit/san.c +++ b/test/unit/san.c @@ -122,7 +122,7 @@ TEST_BEGIN(test_guarded_decay) { /* Verify that guarded extents as dirty. */ size_t sz1 = PAGE, sz2 = PAGE * 2; /* W/o maps_coalesce, guarded extents are unguarded eagerly. */ - size_t add_guard_size = maps_coalesce ? 0 : PAGE_GUARDS_SIZE; + size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE; generate_dirty(arena_ind, sz1); verify_pdirty(arena_ind, sz1 + add_guard_size); verify_pmuzzy(arena_ind, 0); diff --git a/test/unit/san_bump.c b/test/unit/san_bump.c new file mode 100644 index 00000000..fbee53e5 --- /dev/null +++ b/test/unit/san_bump.c @@ -0,0 +1,111 @@ +#include "test/jemalloc_test.h" +#include "test/arena_decay.h" + +#include "jemalloc/internal/arena_structs.h" +#include "jemalloc/internal/san_bump.h" + +TEST_BEGIN(test_san_bump_alloc) { + test_skip_if(!maps_coalesce || !opt_retain); + + tsdn_t *tsdn = tsdn_fetch(); + + san_bump_alloc_t sba; + san_bump_alloc_init(&sba); + + unsigned arena_ind = do_arena_create(0, 0); + assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena"); + + arena_t *arena = arena_get(tsdn, arena_ind, false); + pac_t *pac = &arena->pa_shard.pac; + + size_t alloc_size = PAGE * 16; + size_t alloc_n = alloc_size / sizeof(unsigned); + edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), + alloc_size, /* zero */ false); + + expect_ptr_not_null(edata, "Failed to allocate edata"); + expect_u_eq(edata_arena_ind_get(edata), arena_ind, + "Edata was assigned an incorrect arena id"); + expect_zu_eq(edata_size_get(edata), alloc_size, + "Allocated edata of incorrect size"); + expect_false(edata_slab_get(edata), + "Bump allocator incorrectly assigned 'slab' to true"); + expect_true(edata_committed_get(edata), "Edata is not committed"); + + void *ptr = edata_addr_get(edata); + expect_ptr_not_null(ptr, "Edata was assigned an invalid address"); + /* Test that memory is allocated; no guard pages are misplaced */ + for (unsigned i = 0; i < alloc_n; ++i) { + ((unsigned *)ptr)[i] = 1; + } + + size_t alloc_size2 = PAGE * 28; + size_t alloc_n2 = alloc_size / sizeof(unsigned); + edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), + alloc_size2, /* zero */ true); + + expect_ptr_not_null(edata2, "Failed to allocate edata"); + expect_u_eq(edata_arena_ind_get(edata2), arena_ind, + "Edata was assigned an incorrect arena id"); + expect_zu_eq(edata_size_get(edata2), alloc_size2, + "Allocated edata of incorrect size"); + expect_false(edata_slab_get(edata2), + "Bump allocator incorrectly assigned 'slab' to true"); + expect_true(edata_committed_get(edata2), "Edata is not committed"); + + void *ptr2 = edata_addr_get(edata2); + expect_ptr_not_null(ptr, "Edata was assigned an invalid address"); + + uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr + : (uintptr_t)ptr - (uintptr_t)ptr2; + size_t between_allocs = (size_t)ptrdiff - alloc_size; + + expect_zu_ge(between_allocs, PAGE, + "Guard page between allocs is missing"); + + for (unsigned i = 0; i < alloc_n2; ++i) { + expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed"); + } +} +TEST_END + +TEST_BEGIN(test_large_alloc_size) { + test_skip_if(!maps_coalesce || !opt_retain); + + tsdn_t *tsdn = tsdn_fetch(); + + san_bump_alloc_t sba; + san_bump_alloc_init(&sba); + + unsigned arena_ind = do_arena_create(0, 0); + assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena"); + + arena_t *arena = arena_get(tsdn, arena_ind, false); + pac_t *pac = &arena->pa_shard.pac; + + size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2; + edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac), + alloc_size, /* zero */ false); + expect_u_eq(edata_arena_ind_get(edata), arena_ind, + "Edata was assigned an incorrect arena id"); + expect_zu_eq(edata_size_get(edata), alloc_size, + "Allocated edata of incorrect size"); + expect_false(edata_slab_get(edata), + "Bump allocator incorrectly assigned 'slab' to true"); + expect_true(edata_committed_get(edata), "Edata is not committed"); + + void *ptr = edata_addr_get(edata); + expect_ptr_not_null(ptr, "Edata was assigned an invalid address"); + /* Test that memory is allocated; no guard pages are misplaced */ + for (unsigned i = 0; i < alloc_size / PAGE; ++i) { + *((char *)ptr + PAGE * i) = 1; + } +} +TEST_END + +int +main(void) { + return test( + test_san_bump_alloc, + test_large_alloc_size); +}