Fix the time-since computation in HPA.

nstime module guarantees monotonic clock update within a single nstime_t.  This
means, if two separate nstime_t variables are read and updated separately,
nstime_subtract between them may result in underflow.  Fixed by switching to the
time since utility provided by nstime.
This commit is contained in:
Qi Wang 2021-12-20 15:04:12 -08:00 committed by Qi Wang
parent 310af725b0
commit 837b37c4ce
4 changed files with 22 additions and 15 deletions

View File

@ -9,6 +9,7 @@ struct hpa_hooks_s {
void (*hugify)(void *ptr, size_t size);
void (*dehugify)(void *ptr, size_t size);
void (*curtime)(nstime_t *r_time, bool first_reading);
uint64_t (*ms_since)(nstime_t *r_time);
};
extern hpa_hooks_t hpa_hooks_default;

View File

@ -479,10 +479,7 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
/* Make sure that it's been hugifiable for long enough. */
nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify);
nstime_t nstime;
shard->central->hooks.curtime(&nstime, /* first_reading */ true);
nstime_subtract(&nstime, &time_hugify_allowed);
uint64_t millis = nstime_msec(&nstime);
uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed);
if (millis < shard->opts.hugify_delay_ms) {
return false;
}
@ -897,17 +894,15 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
if (to_hugify != NULL) {
nstime_t time_hugify_allowed =
hpdata_time_hugify_allowed(to_hugify);
nstime_t nstime;
shard->central->hooks.curtime(&nstime,
/* first_reading */ true);
nstime_subtract(&nstime, &time_hugify_allowed);
uint64_t since_hugify_allowed_ms = nstime_msec(&nstime);
uint64_t since_hugify_allowed_ms =
shard->central->hooks.ms_since(&time_hugify_allowed);
/*
* If not enough time has passed since hugification was allowed,
* sleep for the rest.
*/
if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
time_ns = shard->opts.hugify_delay_ms - since_hugify_allowed_ms;
time_ns = shard->opts.hugify_delay_ms -
since_hugify_allowed_ms;
time_ns *= 1000 * 1000;
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);
@ -924,11 +919,8 @@ hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
malloc_mutex_unlock(tsdn, &shard->mtx);
return BACKGROUND_THREAD_DEFERRED_MIN;
}
nstime_t nstime;
shard->central->hooks.curtime(&nstime,
/* first_reading */ true);
nstime_subtract(&nstime, &shard->last_purge);
uint64_t since_last_purge_ms = nstime_msec(&nstime);
uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
&shard->last_purge);
if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
uint64_t until_purge_ns;

View File

@ -9,6 +9,7 @@ static void hpa_hooks_purge(void *ptr, size_t size);
static void hpa_hooks_hugify(void *ptr, size_t size);
static void hpa_hooks_dehugify(void *ptr, size_t size);
static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_map,
@ -17,6 +18,7 @@ hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_hugify,
&hpa_hooks_dehugify,
&hpa_hooks_curtime,
&hpa_hooks_ms_since
};
static void *
@ -54,3 +56,8 @@ hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
}
nstime_update(r_nstime);
}
static uint64_t
hpa_hooks_ms_since(nstime_t *past_nstime) {
return nstime_ns_since(past_nstime) / 1000 / 1000;
}

View File

@ -1,6 +1,7 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/hpa.h"
#include "jemalloc/internal/nstime.h"
#define SHARD_IND 111
@ -353,6 +354,11 @@ defer_test_curtime(nstime_t *r_time, bool first_reading) {
*r_time = defer_curtime;
}
static uint64_t
defer_test_ms_since(nstime_t *past_time) {
return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
}
TEST_BEGIN(test_defer_time) {
test_skip_if(!hpa_supported());
@ -363,6 +369,7 @@ TEST_BEGIN(test_defer_time) {
hooks.hugify = &defer_test_hugify;
hooks.dehugify = &defer_test_dehugify;
hooks.curtime = &defer_test_curtime;
hooks.ms_since = &defer_test_ms_since;
hpa_shard_opts_t opts = test_hpa_shard_opts_default;
opts.deferral_allowed = true;