diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index b57dbfdd..11b0ce46 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -42,6 +42,34 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) { return tsd_arena; } +JEMALLOC_ALWAYS_INLINE bool +large_dalloc_safety_checks(edata_t *edata, const void *ptr, szind_t szind) { + if (!config_opt_safety_checks) { + return false; + } + + /* + * Eagerly detect double free and sized dealloc bugs for large sizes. + * The cost is low enough (as edata will be accessed anyway) to be + * enabled all the time. + */ + if (unlikely(edata == NULL || + edata_state_get(edata) != extent_state_active)) { + safety_check_fail("Invalid deallocation detected: " + "pages being freed (%p) not currently active, " + "possibly caused by double free bugs.", ptr); + return true; + } + size_t input_size = sz_index2size(szind); + if (unlikely(input_size != edata_usize_get(edata))) { + safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr, + /* true_size */ edata_usize_get(edata), input_size); + return true; + } + + return false; +} + JEMALLOC_ALWAYS_INLINE void arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info, bool reset_recent) { @@ -65,6 +93,11 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, if (unlikely(!is_slab)) { /* edata must have been initialized at this point. */ assert(edata != NULL); + if (reset_recent && + large_dalloc_safety_checks(edata, ptr, + edata_szind_get(edata))) { + return; + } large_prof_info_get(tsd, edata, prof_info, reset_recent); } else { prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U; @@ -215,34 +248,6 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) { return sz_index2size(full_alloc_ctx.szind); } -JEMALLOC_ALWAYS_INLINE bool -large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) { - if (!config_opt_safety_checks) { - return false; - } - - /* - * Eagerly detect double free and sized dealloc bugs for large sizes. - * The cost is low enough (as edata will be accessed anyway) to be - * enabled all the time. - */ - if (unlikely(edata == NULL || - edata_state_get(edata) != extent_state_active)) { - safety_check_fail("Invalid deallocation detected: " - "pages being freed (%p) not currently active, " - "possibly caused by double free bugs.", ptr); - return true; - } - size_t input_size = sz_index2size(szind); - if (unlikely(input_size != edata_usize_get(edata))) { - safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr, - /* true_size */ edata_usize_get(edata), input_size); - return true; - } - - return false; -} - static inline void arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { if (config_prof && unlikely(szind < SC_NBINS)) { diff --git a/include/jemalloc/internal/rb.h b/include/jemalloc/internal/rb.h index a9a51cb6..fc1dac7c 100644 --- a/include/jemalloc/internal/rb.h +++ b/include/jemalloc/internal/rb.h @@ -560,6 +560,20 @@ a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \ * the same as with the unfiltered version, with the added constraint that the * returned node must pass the filter. */ +JEMALLOC_ALWAYS_INLINE void +rb_remove_safety_checks(const void *nodep, const char *function_name) { + if (!config_opt_safety_checks) { + return; + } + if (unlikely(nodep == NULL)) { + safety_check_fail( + ": Invalid deallocation detected in %s: " + "attempting to remove node from tree but node was " + "not found. Possibly caused by double free bugs.", + function_name); + } +} + #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ rb_empty_summarize, false) @@ -852,6 +866,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } \ } \ + rb_remove_safety_checks(nodep, __func__); \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \