Chagne prof_accum_t to counter_accum_t for general purpose.
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/bin.h"
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/counter.h"
|
||||
#include "jemalloc/internal/ecache.h"
|
||||
#include "jemalloc/internal/edata_cache.h"
|
||||
#include "jemalloc/internal/extent_dss.h"
|
||||
@@ -117,7 +118,7 @@ struct arena_s {
|
||||
malloc_mutex_t tcache_ql_mtx;
|
||||
|
||||
/* Synchronization: internal. */
|
||||
prof_accum_t prof_accum;
|
||||
counter_accum_t prof_accum;
|
||||
|
||||
/*
|
||||
* Extent serial number generator state.
|
||||
|
83
include/jemalloc/internal/counter.h
Normal file
83
include/jemalloc/internal/counter.h
Normal file
@@ -0,0 +1,83 @@
|
||||
#ifndef JEMALLOC_INTERNAL_COUNTER_H
|
||||
#define JEMALLOC_INTERNAL_COUNTER_H
|
||||
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
typedef struct counter_accum_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
uint64_t accumbytes;
|
||||
#else
|
||||
atomic_u64_t accumbytes;
|
||||
#endif
|
||||
uint64_t interval;
|
||||
} counter_accum_t;
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
|
||||
bool overflow;
|
||||
uint64_t a0, a1;
|
||||
|
||||
/*
|
||||
* If the event moves fast enough (and/or if the event handling is slow
|
||||
* enough), extreme overflow here (a1 >= interval * 2) can cause counter
|
||||
* trigger coalescing. This is an intentional mechanism that avoids
|
||||
* rate-limiting allocation.
|
||||
*/
|
||||
uint64_t interval = counter->interval;
|
||||
assert(interval > 0);
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&counter->accumbytes, ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = a0 + accumbytes;
|
||||
assert(a1 >= a0);
|
||||
overflow = (a1 >= interval);
|
||||
if (overflow) {
|
||||
a1 %= interval;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(&counter->accumbytes, &a0, a1,
|
||||
ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||
a0 = counter->accumbytes;
|
||||
a1 = a0 + accumbytes;
|
||||
overflow = (a1 >= interval);
|
||||
if (overflow) {
|
||||
a1 %= interval;
|
||||
}
|
||||
counter->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||
#endif
|
||||
return overflow;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, size_t usize) {
|
||||
/*
|
||||
* Cancel out as much of the excessive accumbytes increase as possible
|
||||
* without underflowing. Interval-triggered events occur slightly more
|
||||
* often than intended as a result of incomplete canceling.
|
||||
*/
|
||||
uint64_t a0, a1;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&counter->accumbytes,
|
||||
ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
} while (!atomic_compare_exchange_weak_u64(
|
||||
&counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
|
||||
ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||
a0 = counter->accumbytes;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
counter->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
|
@@ -73,7 +73,7 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
|
||||
#endif
|
||||
int prof_getpid(void);
|
||||
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
|
||||
bool prof_accum_init(tsdn_t *tsdn);
|
||||
bool prof_accum_init(void);
|
||||
void prof_idump(tsdn_t *tsdn);
|
||||
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||
void prof_gdump(tsdn_t *tsdn);
|
||||
|
@@ -21,15 +21,6 @@ typedef struct {
|
||||
} prof_unwind_data_t;
|
||||
#endif
|
||||
|
||||
struct prof_accum_s {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
malloc_mutex_t mtx;
|
||||
uint64_t accumbytes;
|
||||
#else
|
||||
atomic_u64_t accumbytes;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct prof_cnt_s {
|
||||
/* Profiling counters. */
|
||||
uint64_t curobjs;
|
||||
|
@@ -2,7 +2,6 @@
|
||||
#define JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||
|
||||
typedef struct prof_bt_s prof_bt_t;
|
||||
typedef struct prof_accum_s prof_accum_t;
|
||||
typedef struct prof_cnt_s prof_cnt_t;
|
||||
typedef struct prof_tctx_s prof_tctx_t;
|
||||
typedef struct prof_info_s prof_info_t;
|
||||
|
@@ -54,9 +54,9 @@
|
||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_COUNTER_ACCUM WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_DUMP_FILENAME WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
||||
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
||||
|
Reference in New Issue
Block a user