Chagne prof_accum_t to counter_accum_t for general purpose.
This commit is contained in:
parent
ea351a7b52
commit
d71a145ec1
@ -103,6 +103,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
|||||||
$(srcroot)src/bitmap.c \
|
$(srcroot)src/bitmap.c \
|
||||||
$(srcroot)src/buf_writer.c \
|
$(srcroot)src/buf_writer.c \
|
||||||
$(srcroot)src/ckh.c \
|
$(srcroot)src/ckh.c \
|
||||||
|
$(srcroot)src/counter.c \
|
||||||
$(srcroot)src/ctl.c \
|
$(srcroot)src/ctl.c \
|
||||||
$(srcroot)src/div.c \
|
$(srcroot)src/div.c \
|
||||||
$(srcroot)src/ecache.c \
|
$(srcroot)src/ecache.c \
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "jemalloc/internal/atomic.h"
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/bin.h"
|
#include "jemalloc/internal/bin.h"
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
|
#include "jemalloc/internal/counter.h"
|
||||||
#include "jemalloc/internal/ecache.h"
|
#include "jemalloc/internal/ecache.h"
|
||||||
#include "jemalloc/internal/edata_cache.h"
|
#include "jemalloc/internal/edata_cache.h"
|
||||||
#include "jemalloc/internal/extent_dss.h"
|
#include "jemalloc/internal/extent_dss.h"
|
||||||
@ -117,7 +118,7 @@ struct arena_s {
|
|||||||
malloc_mutex_t tcache_ql_mtx;
|
malloc_mutex_t tcache_ql_mtx;
|
||||||
|
|
||||||
/* Synchronization: internal. */
|
/* Synchronization: internal. */
|
||||||
prof_accum_t prof_accum;
|
counter_accum_t prof_accum;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extent serial number generator state.
|
* Extent serial number generator state.
|
||||||
|
83
include/jemalloc/internal/counter.h
Normal file
83
include/jemalloc/internal/counter.h
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
#ifndef JEMALLOC_INTERNAL_COUNTER_H
|
||||||
|
#define JEMALLOC_INTERNAL_COUNTER_H
|
||||||
|
|
||||||
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
|
||||||
|
typedef struct counter_accum_s {
|
||||||
|
#ifndef JEMALLOC_ATOMIC_U64
|
||||||
|
malloc_mutex_t mtx;
|
||||||
|
uint64_t accumbytes;
|
||||||
|
#else
|
||||||
|
atomic_u64_t accumbytes;
|
||||||
|
#endif
|
||||||
|
uint64_t interval;
|
||||||
|
} counter_accum_t;
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
|
||||||
|
bool overflow;
|
||||||
|
uint64_t a0, a1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the event moves fast enough (and/or if the event handling is slow
|
||||||
|
* enough), extreme overflow here (a1 >= interval * 2) can cause counter
|
||||||
|
* trigger coalescing. This is an intentional mechanism that avoids
|
||||||
|
* rate-limiting allocation.
|
||||||
|
*/
|
||||||
|
uint64_t interval = counter->interval;
|
||||||
|
assert(interval > 0);
|
||||||
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
|
a0 = atomic_load_u64(&counter->accumbytes, ATOMIC_RELAXED);
|
||||||
|
do {
|
||||||
|
a1 = a0 + accumbytes;
|
||||||
|
assert(a1 >= a0);
|
||||||
|
overflow = (a1 >= interval);
|
||||||
|
if (overflow) {
|
||||||
|
a1 %= interval;
|
||||||
|
}
|
||||||
|
} while (!atomic_compare_exchange_weak_u64(&counter->accumbytes, &a0, a1,
|
||||||
|
ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
|
#else
|
||||||
|
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||||
|
a0 = counter->accumbytes;
|
||||||
|
a1 = a0 + accumbytes;
|
||||||
|
overflow = (a1 >= interval);
|
||||||
|
if (overflow) {
|
||||||
|
a1 %= interval;
|
||||||
|
}
|
||||||
|
counter->accumbytes = a1;
|
||||||
|
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||||
|
#endif
|
||||||
|
return overflow;
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, size_t usize) {
|
||||||
|
/*
|
||||||
|
* Cancel out as much of the excessive accumbytes increase as possible
|
||||||
|
* without underflowing. Interval-triggered events occur slightly more
|
||||||
|
* often than intended as a result of incomplete canceling.
|
||||||
|
*/
|
||||||
|
uint64_t a0, a1;
|
||||||
|
#ifdef JEMALLOC_ATOMIC_U64
|
||||||
|
a0 = atomic_load_u64(&counter->accumbytes,
|
||||||
|
ATOMIC_RELAXED);
|
||||||
|
do {
|
||||||
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||||
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||||
|
} while (!atomic_compare_exchange_weak_u64(
|
||||||
|
&counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
|
||||||
|
ATOMIC_RELAXED));
|
||||||
|
#else
|
||||||
|
malloc_mutex_lock(tsdn, &counter->mtx);
|
||||||
|
a0 = counter->accumbytes;
|
||||||
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||||
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||||
|
counter->accumbytes = a1;
|
||||||
|
malloc_mutex_unlock(tsdn, &counter->mtx);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_INTERNAL_COUNTER_H */
|
@ -73,7 +73,7 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
|
|||||||
#endif
|
#endif
|
||||||
int prof_getpid(void);
|
int prof_getpid(void);
|
||||||
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
|
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
|
||||||
bool prof_accum_init(tsdn_t *tsdn);
|
bool prof_accum_init(void);
|
||||||
void prof_idump(tsdn_t *tsdn);
|
void prof_idump(tsdn_t *tsdn);
|
||||||
bool prof_mdump(tsd_t *tsd, const char *filename);
|
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||||
void prof_gdump(tsdn_t *tsdn);
|
void prof_gdump(tsdn_t *tsdn);
|
||||||
|
@ -21,15 +21,6 @@ typedef struct {
|
|||||||
} prof_unwind_data_t;
|
} prof_unwind_data_t;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct prof_accum_s {
|
|
||||||
#ifndef JEMALLOC_ATOMIC_U64
|
|
||||||
malloc_mutex_t mtx;
|
|
||||||
uint64_t accumbytes;
|
|
||||||
#else
|
|
||||||
atomic_u64_t accumbytes;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
struct prof_cnt_s {
|
struct prof_cnt_s {
|
||||||
/* Profiling counters. */
|
/* Profiling counters. */
|
||||||
uint64_t curobjs;
|
uint64_t curobjs;
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#define JEMALLOC_INTERNAL_PROF_TYPES_H
|
#define JEMALLOC_INTERNAL_PROF_TYPES_H
|
||||||
|
|
||||||
typedef struct prof_bt_s prof_bt_t;
|
typedef struct prof_bt_s prof_bt_t;
|
||||||
typedef struct prof_accum_s prof_accum_t;
|
|
||||||
typedef struct prof_cnt_s prof_cnt_t;
|
typedef struct prof_cnt_s prof_cnt_t;
|
||||||
typedef struct prof_tctx_s prof_tctx_t;
|
typedef struct prof_tctx_s prof_tctx_t;
|
||||||
typedef struct prof_info_s prof_info_t;
|
typedef struct prof_info_s prof_info_t;
|
||||||
|
@ -54,9 +54,9 @@
|
|||||||
#define WITNESS_RANK_LEAF 0xffffffffU
|
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||||
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
|
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
|
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_COUNTER_ACCUM WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
|
|
||||||
#define WITNESS_RANK_PROF_DUMP_FILENAME WITNESS_RANK_LEAF
|
#define WITNESS_RANK_PROF_DUMP_FILENAME WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
||||||
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
|
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
||||||
|
<ClCompile Include="..\..\..\..\src\counter.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\div.c" />
|
<ClCompile Include="..\..\..\..\src\div.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ecache.c" />
|
<ClCompile Include="..\..\..\..\src\ecache.c" />
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
<ClCompile Include="..\..\..\..\src\ckh.c">
|
<ClCompile Include="..\..\..\..\src\ckh.c">
|
||||||
<Filter>Source Files</Filter>
|
<Filter>Source Files</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
<ClCompile Include="..\..\..\..\src\counter.c">
|
||||||
|
<Filter>Source Files</Filter>
|
||||||
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\..\..\src\ctl.c">
|
<ClCompile Include="..\..\..\..\src\ctl.c">
|
||||||
<Filter>Source Files</Filter>
|
<Filter>Source Files</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
<ClCompile Include="..\..\..\..\src\bitmap.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
|
<ClCompile Include="..\..\..\..\src\buf_writer.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
<ClCompile Include="..\..\..\..\src\ckh.c" />
|
||||||
|
<ClCompile Include="..\..\..\..\src\counter.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
<ClCompile Include="..\..\..\..\src\ctl.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\div.c" />
|
<ClCompile Include="..\..\..\..\src\div.c" />
|
||||||
<ClCompile Include="..\..\..\..\src\ecache.c" />
|
<ClCompile Include="..\..\..\..\src\ecache.c" />
|
||||||
|
@ -28,6 +28,9 @@
|
|||||||
<ClCompile Include="..\..\..\..\src\ckh.c">
|
<ClCompile Include="..\..\..\..\src\ckh.c">
|
||||||
<Filter>Source Files</Filter>
|
<Filter>Source Files</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
<ClCompile Include="..\..\..\..\src\counter.c">
|
||||||
|
<Filter>Source Files</Filter>
|
||||||
|
</ClCompile>
|
||||||
<ClCompile Include="..\..\..\..\src\ctl.c">
|
<ClCompile Include="..\..\..\..\src\ctl.c">
|
||||||
<Filter>Source Files</Filter>
|
<Filter>Source Files</Filter>
|
||||||
</ClCompile>
|
</ClCompile>
|
||||||
|
@ -1988,7 +1988,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
if (prof_accum_init(tsdn)) {
|
if (prof_accum_init()) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
22
src/counter.c
Normal file
22
src/counter.c
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#define JEMALLOC_COUNTER_C_
|
||||||
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||||
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||||
|
|
||||||
|
#include "jemalloc/internal/counter.h"
|
||||||
|
|
||||||
|
bool
|
||||||
|
counter_accum_init(counter_accum_t *counter, uint64_t interval) {
|
||||||
|
#ifndef JEMALLOC_ATOMIC_U64
|
||||||
|
if (malloc_mutex_init(&counter->mtx, "counter_accum",
|
||||||
|
WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
counter->accumbytes = 0;
|
||||||
|
#else
|
||||||
|
atomic_store_u64(&counter->accumbytes, 0,
|
||||||
|
ATOMIC_RELAXED);
|
||||||
|
#endif
|
||||||
|
counter->interval = interval;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
76
src/prof.c
76
src/prof.c
@ -5,6 +5,7 @@
|
|||||||
#include "jemalloc/internal/ctl.h"
|
#include "jemalloc/internal/ctl.h"
|
||||||
#include "jemalloc/internal/assert.h"
|
#include "jemalloc/internal/assert.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
|
#include "jemalloc/internal/counter.h"
|
||||||
#include "jemalloc/internal/prof_data.h"
|
#include "jemalloc/internal/prof_data.h"
|
||||||
#include "jemalloc/internal/prof_log.h"
|
#include "jemalloc/internal/prof_log.h"
|
||||||
#include "jemalloc/internal/prof_recent.h"
|
#include "jemalloc/internal/prof_recent.h"
|
||||||
@ -49,7 +50,7 @@ bool opt_prof_accum = false;
|
|||||||
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
|
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
|
||||||
|
|
||||||
/* Accessed via prof_idump_[accum/rollback](). */
|
/* Accessed via prof_idump_[accum/rollback](). */
|
||||||
static prof_accum_t prof_idump_accumulated;
|
static counter_accum_t prof_idump_accumulated;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialized as opt_prof_active, and accessed via
|
* Initialized as opt_prof_active, and accessed via
|
||||||
@ -553,89 +554,24 @@ prof_fdump(void) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
prof_accum_init(tsdn_t *tsdn) {
|
prof_accum_init(void) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
#ifndef JEMALLOC_ATOMIC_U64
|
return counter_accum_init(&prof_idump_accumulated, prof_interval);
|
||||||
if (malloc_mutex_init(&prof_idump_accumulated.mtx, "prof_accum",
|
|
||||||
WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
prof_idump_accumulated.accumbytes = 0;
|
|
||||||
#else
|
|
||||||
atomic_store_u64(&prof_idump_accumulated.accumbytes, 0,
|
|
||||||
ATOMIC_RELAXED);
|
|
||||||
#endif
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
|
prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
bool overflow;
|
return counter_accum(tsdn, &prof_idump_accumulated, accumbytes);
|
||||||
uint64_t a0, a1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the application allocates fast enough (and/or if idump is slow
|
|
||||||
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
|
||||||
* idump trigger coalescing. This is an intentional mechanism that
|
|
||||||
* avoids rate-limiting allocation.
|
|
||||||
*/
|
|
||||||
#ifdef JEMALLOC_ATOMIC_U64
|
|
||||||
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
|
||||||
ATOMIC_RELAXED);
|
|
||||||
do {
|
|
||||||
a1 = a0 + accumbytes;
|
|
||||||
assert(a1 >= a0);
|
|
||||||
overflow = (a1 >= prof_interval);
|
|
||||||
if (overflow) {
|
|
||||||
a1 %= prof_interval;
|
|
||||||
}
|
|
||||||
} while (!atomic_compare_exchange_weak_u64(
|
|
||||||
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
|
||||||
ATOMIC_RELAXED));
|
|
||||||
#else
|
|
||||||
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
|
||||||
a0 = prof_idump_accumulated.accumbytes;
|
|
||||||
a1 = a0 + accumbytes;
|
|
||||||
overflow = (a1 >= prof_interval);
|
|
||||||
if (overflow) {
|
|
||||||
a1 %= prof_interval;
|
|
||||||
}
|
|
||||||
prof_idump_accumulated.accumbytes = a1;
|
|
||||||
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
|
||||||
#endif
|
|
||||||
return overflow;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
|
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
return counter_rollback(tsdn, &prof_idump_accumulated, usize);
|
||||||
* Cancel out as much of the excessive accumbytes increase as possible
|
|
||||||
* without underflowing. Interval-triggered dumps occur slightly more
|
|
||||||
* often than intended as a result of incomplete canceling.
|
|
||||||
*/
|
|
||||||
uint64_t a0, a1;
|
|
||||||
#ifdef JEMALLOC_ATOMIC_U64
|
|
||||||
a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
|
|
||||||
ATOMIC_RELAXED);
|
|
||||||
do {
|
|
||||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
||||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
|
||||||
} while (!atomic_compare_exchange_weak_u64(
|
|
||||||
&prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
|
|
||||||
ATOMIC_RELAXED));
|
|
||||||
#else
|
|
||||||
malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
|
|
||||||
a0 = prof_idump_accumulated.accumbytes;
|
|
||||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
||||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
|
||||||
prof_idump_accumulated.accumbytes = a1;
|
|
||||||
malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
Loading…
Reference in New Issue
Block a user