Migrate counter to use locked int
This commit is contained in:
parent
b543c20a94
commit
fc052ff728
@ -4,50 +4,25 @@
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct counter_accum_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
uint64_t accumbytes;
|
||||
#else
|
||||
atomic_u64_t accumbytes;
|
||||
#endif
|
||||
LOCKEDINT_MTX_DECLARE(mtx)
|
||||
locked_u64_t accumbytes;
|
||||
uint64_t interval;
|
||||
} counter_accum_t;
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
|
||||
bool overflow;
|
||||
uint64_t a0, a1;
|
||||
|
||||
/*
|
||||
* If the event moves fast enough (and/or if the event handling is slow
|
||||
* enough), extreme overflow here (a1 >= interval * 2) can cause counter
|
||||
* trigger coalescing. This is an intentional mechanism that avoids
|
||||
* rate-limiting allocation.
|
||||
*/
|
||||
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
|
||||
uint64_t interval = counter->interval;
|
||||
assert(interval > 0);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&counter->accumbytes, ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = a0 + accumbytes;
|
||||
assert(a1 >= a0);
|
||||
overflow = (a1 >= interval);
|
||||
if (overflow) {
|
||||
a1 %= interval;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(&counter->accumbytes, &a0, a1,
|
||||
ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||
a0 = counter->accumbytes;
|
||||
a1 = a0 + accumbytes;
|
||||
overflow = (a1 >= interval);
|
||||
if (overflow) {
|
||||
a1 %= interval;
|
||||
}
|
||||
counter->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||
#endif
|
||||
LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
|
||||
/*
|
||||
* If the event moves fast enough (and/or if the event handling is slow
|
||||
* enough), extreme overflow can cause counter trigger coalescing.
|
||||
* This is an intentional mechanism that avoids rate-limiting
|
||||
* allocation.
|
||||
*/
|
||||
bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
|
||||
&counter->accumbytes, bytes, interval);
|
||||
LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
|
@ -88,6 +88,36 @@ locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Increment and take modulus. Returns whether the modulo made any change. */
|
||||
static inline bool
|
||||
locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
|
||||
const uint64_t x, const uint64_t modulus) {
|
||||
LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
|
||||
uint64_t before, after;
|
||||
bool overflow;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
|
||||
do {
|
||||
after = before + x;
|
||||
assert(after >= before);
|
||||
overflow = (after >= modulus);
|
||||
if (overflow) {
|
||||
after %= modulus;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
|
||||
ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
before = p->val;
|
||||
after = before + x;
|
||||
overflow = (after >= modulus);
|
||||
if (overflow) {
|
||||
after %= modulus;
|
||||
}
|
||||
p->val = after;
|
||||
#endif
|
||||
return overflow;
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-atomically sets *dst += src. *dst needs external synchronization.
|
||||
* This lets us avoid the cost of a fetch_add when its unnecessary (note that
|
||||
@ -110,7 +140,15 @@ locked_read_u64_unsynchronized(locked_u64_t *p) {
|
||||
#else
|
||||
return p->val;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
p->val = x;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
|
@ -6,18 +6,12 @@
|
||||
|
||||
bool
|
||||
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&counter->mtx, "counter_accum",
|
||||
if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
|
||||
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
counter->accumbytes = 0;
|
||||
#else
|
||||
atomic_store_u64(&counter->accumbytes, 0,
|
||||
ATOMIC_RELAXED);
|
||||
#endif
|
||||
locked_init_u64_unsynchronized(&counter->accumbytes, 0);
|
||||
counter->interval = interval;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -27,12 +27,7 @@ TEST_END
|
||||
|
||||
void
|
||||
expect_counter_value(counter_accum_t *c, uint64_t v) {
|
||||
uint64_t accum;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
accum = atomic_load_u64(&(c->accumbytes), ATOMIC_RELAXED);
|
||||
#else
|
||||
accum = c->accumbytes;
|
||||
#endif
|
||||
uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
|
||||
expect_u64_eq(accum, v, "Counter value mismatch");
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user