Migrate counter to use locked int

This commit is contained in:
Yinan Zhang 2020-04-14 15:08:00 -07:00
parent b543c20a94
commit fc052ff728
4 changed files with 54 additions and 52 deletions

View File

@ -4,50 +4,25 @@
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
typedef struct counter_accum_s { typedef struct counter_accum_s {
#ifndef JEMALLOC_ATOMIC_U64 LOCKEDINT_MTX_DECLARE(mtx)
malloc_mutex_t mtx; locked_u64_t accumbytes;
uint64_t accumbytes;
#else
atomic_u64_t accumbytes;
#endif
uint64_t interval; uint64_t interval;
} counter_accum_t; } counter_accum_t;
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) { counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
bool overflow;
uint64_t a0, a1;
/*
* If the event moves fast enough (and/or if the event handling is slow
* enough), extreme overflow here (a1 >= interval * 2) can cause counter
* trigger coalescing. This is an intentional mechanism that avoids
* rate-limiting allocation.
*/
uint64_t interval = counter->interval; uint64_t interval = counter->interval;
assert(interval > 0); assert(interval > 0);
#ifdef JEMALLOC_ATOMIC_U64 LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
a0 = atomic_load_u64(&counter->accumbytes, ATOMIC_RELAXED); /*
do { * If the event moves fast enough (and/or if the event handling is slow
a1 = a0 + accumbytes; * enough), extreme overflow can cause counter trigger coalescing.
assert(a1 >= a0); * This is an intentional mechanism that avoids rate-limiting
overflow = (a1 >= interval); * allocation.
if (overflow) { */
a1 %= interval; bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
} &counter->accumbytes, bytes, interval);
} while (!atomic_compare_exchange_weak_u64(&counter->accumbytes, &a0, a1, LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &counter->mtx);
a0 = counter->accumbytes;
a1 = a0 + accumbytes;
overflow = (a1 >= interval);
if (overflow) {
a1 %= interval;
}
counter->accumbytes = a1;
malloc_mutex_unlock(tsdn, &counter->mtx);
#endif
return overflow; return overflow;
} }

View File

@ -88,6 +88,36 @@ locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
#endif #endif
} }
/* Increment and take modulus. Returns whether the modulo made any change. */
static inline bool
locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
const uint64_t x, const uint64_t modulus) {
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
uint64_t before, after;
bool overflow;
#ifdef JEMALLOC_ATOMIC_U64
before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
do {
after = before + x;
assert(after >= before);
overflow = (after >= modulus);
if (overflow) {
after %= modulus;
}
} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
before = p->val;
after = before + x;
overflow = (after >= modulus);
if (overflow) {
after %= modulus;
}
p->val = after;
#endif
return overflow;
}
/* /*
* Non-atomically sets *dst += src. *dst needs external synchronization. * Non-atomically sets *dst += src. *dst needs external synchronization.
* This lets us avoid the cost of a fetch_add when its unnecessary (note that * This lets us avoid the cost of a fetch_add when its unnecessary (note that
@ -110,7 +140,15 @@ locked_read_u64_unsynchronized(locked_u64_t *p) {
#else #else
return p->val; return p->val;
#endif #endif
}
static inline void
locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
#else
p->val = x;
#endif
} }
static inline size_t static inline size_t

View File

@ -6,18 +6,12 @@
bool bool
counter_accum_init(counter_accum_t *counter, uint64_t interval) { counter_accum_init(counter_accum_t *counter, uint64_t interval) {
#ifndef JEMALLOC_ATOMIC_U64 if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
if (malloc_mutex_init(&counter->mtx, "counter_accum",
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) { WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
return true; return true;
} }
counter->accumbytes = 0; locked_init_u64_unsynchronized(&counter->accumbytes, 0);
#else
atomic_store_u64(&counter->accumbytes, 0,
ATOMIC_RELAXED);
#endif
counter->interval = interval; counter->interval = interval;
return false; return false;
} }

View File

@ -27,12 +27,7 @@ TEST_END
void void
expect_counter_value(counter_accum_t *c, uint64_t v) { expect_counter_value(counter_accum_t *c, uint64_t v) {
uint64_t accum; uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
#ifdef JEMALLOC_ATOMIC_U64
accum = atomic_load_u64(&(c->accumbytes), ATOMIC_RELAXED);
#else
accum = c->accumbytes;
#endif
expect_u64_eq(accum, v, "Counter value mismatch"); expect_u64_eq(accum, v, "Counter value mismatch");
} }