2020-01-24 05:18:04 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_COUNTER_H
|
|
|
|
#define JEMALLOC_INTERNAL_COUNTER_H
|
|
|
|
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/lockedint.h"
|
2020-01-24 05:18:04 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
|
|
|
|
typedef struct counter_accum_s {
|
2020-04-15 06:08:00 +08:00
|
|
|
LOCKEDINT_MTX_DECLARE(mtx)
|
|
|
|
locked_u64_t accumbytes;
|
2020-01-14 14:29:17 +08:00
|
|
|
uint64_t interval;
|
2020-01-24 05:18:04 +08:00
|
|
|
} counter_accum_t;
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2020-04-15 06:08:00 +08:00
|
|
|
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
|
|
|
|
uint64_t interval = counter->interval;
|
|
|
|
assert(interval > 0);
|
|
|
|
LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
|
2020-01-24 05:18:04 +08:00
|
|
|
/*
|
|
|
|
* If the event moves fast enough (and/or if the event handling is slow
|
2020-04-15 06:08:00 +08:00
|
|
|
* enough), extreme overflow can cause counter trigger coalescing.
|
|
|
|
* This is an intentional mechanism that avoids rate-limiting
|
|
|
|
* allocation.
|
2020-01-24 05:18:04 +08:00
|
|
|
*/
|
2020-04-15 06:08:00 +08:00
|
|
|
bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
|
|
|
|
&counter->accumbytes, bytes, interval);
|
|
|
|
LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
|
2020-01-24 05:18:04 +08:00
|
|
|
return overflow;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
|
2020-04-16 05:52:01 +08:00
|
|
|
void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
|
|
|
|
void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
|
|
|
|
void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
|
2020-01-24 05:18:04 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
|