Fix segfault in rb *_tree_remove

Static analysis flagged this. It's possible to segfault in the
`*_tree_remove` function generated by `rb_gen`, as `nodep` may
still be `NULL` after the initial for loop. I can confirm from reviewing
the fleetwide coredump data that this was in fact being hit in
production, primarily through `tctx_tree_remove`, and much more rarely
through `gctx_tree_remove`.
This commit is contained in:
Kevin Svetlitski 2023-05-09 12:06:47 -07:00 committed by Qi Wang
parent 86eb49b478
commit 90176f8a87
2 changed files with 48 additions and 28 deletions

View File

@ -42,6 +42,34 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
return tsd_arena; return tsd_arena;
} }
JEMALLOC_ALWAYS_INLINE bool
large_dalloc_safety_checks(edata_t *edata, const void *ptr, szind_t szind) {
if (!config_opt_safety_checks) {
return false;
}
/*
* Eagerly detect double free and sized dealloc bugs for large sizes.
* The cost is low enough (as edata will be accessed anyway) to be
* enabled all the time.
*/
if (unlikely(edata == NULL ||
edata_state_get(edata) != extent_state_active)) {
safety_check_fail("Invalid deallocation detected: "
"pages being freed (%p) not currently active, "
"possibly caused by double free bugs.", ptr);
return true;
}
size_t input_size = sz_index2size(szind);
if (unlikely(input_size != edata_usize_get(edata))) {
safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
/* true_size */ edata_usize_get(edata), input_size);
return true;
}
return false;
}
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx, arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
prof_info_t *prof_info, bool reset_recent) { prof_info_t *prof_info, bool reset_recent) {
@ -65,6 +93,11 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
if (unlikely(!is_slab)) { if (unlikely(!is_slab)) {
/* edata must have been initialized at this point. */ /* edata must have been initialized at this point. */
assert(edata != NULL); assert(edata != NULL);
if (reset_recent &&
large_dalloc_safety_checks(edata, ptr,
edata_szind_get(edata))) {
return;
}
large_prof_info_get(tsd, edata, prof_info, reset_recent); large_prof_info_get(tsd, edata, prof_info, reset_recent);
} else { } else {
prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U; prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
@ -215,34 +248,6 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
return sz_index2size(full_alloc_ctx.szind); return sz_index2size(full_alloc_ctx.szind);
} }
JEMALLOC_ALWAYS_INLINE bool
large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
if (!config_opt_safety_checks) {
return false;
}
/*
* Eagerly detect double free and sized dealloc bugs for large sizes.
* The cost is low enough (as edata will be accessed anyway) to be
* enabled all the time.
*/
if (unlikely(edata == NULL ||
edata_state_get(edata) != extent_state_active)) {
safety_check_fail("Invalid deallocation detected: "
"pages being freed (%p) not currently active, "
"possibly caused by double free bugs.", ptr);
return true;
}
size_t input_size = sz_index2size(szind);
if (unlikely(input_size != edata_usize_get(edata))) {
safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
/* true_size */ edata_usize_get(edata), input_size);
return true;
}
return false;
}
static inline void static inline void
arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) { arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) { if (config_prof && unlikely(szind < SC_NBINS)) {

View File

@ -560,6 +560,20 @@ a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
* the same as with the unfiltered version, with the added constraint that the * the same as with the unfiltered version, with the added constraint that the
* returned node must pass the filter. * returned node must pass the filter.
*/ */
JEMALLOC_ALWAYS_INLINE void
rb_remove_safety_checks(const void *nodep, const char *function_name) {
if (!config_opt_safety_checks) {
return;
}
if (unlikely(nodep == NULL)) {
safety_check_fail(
"<jemalloc>: Invalid deallocation detected in %s: "
"attempting to remove node from tree but node was "
"not found. Possibly caused by double free bugs.",
function_name);
}
}
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
rb_empty_summarize, false) rb_empty_summarize, false)
@ -852,6 +866,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \ } \
} \ } \
} \ } \
rb_remove_safety_checks(nodep, __func__); \
assert(nodep->node == node); \ assert(nodep->node == node); \
pathp--; \ pathp--; \
if (pathp->node != node) { \ if (pathp->node != node) { \