From 9e031c1d1128af879589f5e5c37960edd87238c6 Mon Sep 17 00:00:00 2001 From: Yinan Zhang Date: Wed, 21 Aug 2019 16:38:44 -0700 Subject: [PATCH] Bug fix for prof_active switch The bug is subtle but critical: if application performs the following three actions in sequence: (a) turn `prof_active` off, (b) make at least one allocation that triggers the malloc slow path via the `if (unlikely(bytes_until_sample < 0))` path, and (c) turn `prof_active` back on, then the application would never get another sample (until a very very long time later). The fix is to properly reset `bytes_until_sample` rather than throwing it all the way to `SSIZE_MAX`. A side minor change is to call `prof_active_get_unlocked()` rather than directly grabbing the `prof_active` variable - it is the very reason why we defined the `prof_active_get_unlocked()` function. --- src/jemalloc.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/jemalloc.c b/src/jemalloc.c index 75a40277..dd206884 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -2356,13 +2356,15 @@ je_malloc(size_t size) { /* * Avoid a prof_active check on the fastpath. * If prof_active is false, set bytes_until_sample to - * a large value. If prof_active is set to true, + * sampling interval. If prof_active is set to true, * bytes_until_sample will be reset. */ - if (!prof_active) { - tsd_bytes_until_sample_set(tsd, SSIZE_MAX); + if (!prof_active_get_unlocked()) { + tsd_bytes_until_sample_set(tsd, + ((uint64_t)1U << lg_prof_sample)); + } else { + return malloc_default(size); } - return malloc_default(size); } }