Chagne prof_accum_t to counter_accum_t for general purpose.
This commit is contained in:
@@ -1988,7 +1988,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
||||
}
|
||||
|
||||
if (config_prof) {
|
||||
if (prof_accum_init(tsdn)) {
|
||||
if (prof_accum_init()) {
|
||||
goto label_error;
|
||||
}
|
||||
}
|
||||
|
22
src/counter.c
Normal file
22
src/counter.c
Normal file
@@ -0,0 +1,22 @@
|
||||
#define JEMALLOC_COUNTER_C_
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/counter.h"
|
||||
|
||||
bool
|
||||
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&counter->mtx, "counter_accum",
|
||||
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
counter->accumbytes = 0;
|
||||
#else
|
||||
atomic_store_u64(&counter->accumbytes, 0,
|
||||
ATOMIC_RELAXED);
|
||||
#endif
|
||||
counter->interval = interval;
|
||||
|
||||
return false;
|
||||
}
|
76
src/prof.c
76
src/prof.c
@@ -5,6 +5,7 @@
|
||||
#include "jemalloc/internal/ctl.h"
|
||||
#include "jemalloc/internal/assert.h"
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
#include "jemalloc/internal/counter.h"
|
||||
#include "jemalloc/internal/prof_data.h"
|
||||
#include "jemalloc/internal/prof_log.h"
|
||||
#include "jemalloc/internal/prof_recent.h"
|
||||
@@ -49,7 +50,7 @@ bool opt_prof_accum = false;
|
||||
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
|
||||
|
||||
/* Accessed via prof_idump_[accum/rollback](). */
|
||||
static prof_accum_t prof_idump_accumulated;
|
||||
static counter_accum_t prof_idump_accumulated;
|
||||
|
||||
/*
|
||||
* Initialized as opt_prof_active, and accessed via
|
||||
@@ -553,89 +554,24 @@ prof_fdump(void) {
|
||||
}
|
||||
|
||||
bool
|
||||
prof_accum_init(tsdn_t *tsdn) {
|
||||
prof_accum_init(void) {
|
||||
cassert(config_prof);
|
||||
|
||||
#ifndef JEMALLOC_ATOMIC_U64
|
||||
if (malloc_mutex_init(&prof_idump_accumulated.mtx, "prof_accum",
|
||||
WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
|
||||
return true;
|
||||
}
|
||||
prof_idump_accumulated.accumbytes = 0;
|
||||
#else
|
||||
atomic_store_u64(&prof_idump_accumulated.accumbytes, 0,
|
||||
ATOMIC_RELAXED);
|
||||
#endif
|
||||
return false;
|
||||
return counter_accum_init(&prof_idump_accumulated, prof_interval);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
bool overflow;
|
||||
uint64_t a0, a1;
|
||||
|
||||
/*
|
||||
* If the application allocates fast enough (and/or if idump is slow
|
||||
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
||||
* idump trigger coalescing. This is an intentional mechanism that
|
||||
* avoids rate-limiting allocation.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
||||
ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = a0 + accumbytes;
|
||||
assert(a1 >= a0);
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
} while (!atomic_compare_exchange_weak_u64(
|
||||
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
||||
ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
||||
a0 = prof_idump_accumulated.accumbytes;
|
||||
a1 = a0 + accumbytes;
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
prof_idump_accumulated.accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
||||
#endif
|
||||
return overflow;
|
||||
return counter_accum(tsdn, &prof_idump_accumulated, accumbytes);
|
||||
}
|
||||
|
||||
void
|
||||
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Cancel out as much of the excessive accumbytes increase as possible
|
||||
* without underflowing. Interval-triggered dumps occur slightly more
|
||||
* often than intended as a result of incomplete canceling.
|
||||
*/
|
||||
uint64_t a0, a1;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
||||
ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
} while (!atomic_compare_exchange_weak_u64(
|
||||
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
||||
ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
||||
a0 = prof_idump_accumulated.accumbytes;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
prof_idump_accumulated.accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
||||
#endif
|
||||
return counter_rollback(tsdn, &prof_idump_accumulated, usize);
|
||||
}
|
||||
|
||||
bool
|
||||
|
Reference in New Issue
Block a user