diff --git a/Makefile.in b/Makefile.in
index 24ab5421..37941ea1 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -103,6 +103,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/bitmap.c \
$(srcroot)src/buf_writer.c \
$(srcroot)src/ckh.c \
+ $(srcroot)src/counter.c \
$(srcroot)src/ctl.c \
$(srcroot)src/div.c \
$(srcroot)src/ecache.c \
diff --git a/include/jemalloc/internal/arena_structs.h b/include/jemalloc/internal/arena_structs.h
index 48d13b8c..2d5c5680 100644
--- a/include/jemalloc/internal/arena_structs.h
+++ b/include/jemalloc/internal/arena_structs.h
@@ -5,6 +5,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/extent_dss.h"
@@ -117,7 +118,7 @@ struct arena_s {
malloc_mutex_t tcache_ql_mtx;
/* Synchronization: internal. */
- prof_accum_t prof_accum;
+ counter_accum_t prof_accum;
/*
* Extent serial number generator state.
diff --git a/include/jemalloc/internal/counter.h b/include/jemalloc/internal/counter.h
new file mode 100644
index 00000000..302e3504
--- /dev/null
+++ b/include/jemalloc/internal/counter.h
@@ -0,0 +1,83 @@
+#ifndef JEMALLOC_INTERNAL_COUNTER_H
+#define JEMALLOC_INTERNAL_COUNTER_H
+
+#include "jemalloc/internal/mutex.h"
+
+typedef struct counter_accum_s {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_t mtx;
+ uint64_t accumbytes;
+#else
+ atomic_u64_t accumbytes;
+#endif
+ uint64_t interval;
+} counter_accum_t;
+
+JEMALLOC_ALWAYS_INLINE bool
+counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
+ bool overflow;
+ uint64_t a0, a1;
+
+ /*
+ * If the event moves fast enough (and/or if the event handling is slow
+ * enough), extreme overflow here (a1 >= interval * 2) can cause counter
+ * trigger coalescing. This is an intentional mechanism that avoids
+ * rate-limiting allocation.
+ */
+ uint64_t interval = counter->interval;
+ assert(interval > 0);
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&counter->accumbytes, ATOMIC_RELAXED);
+ do {
+ a1 = a0 + accumbytes;
+ assert(a1 >= a0);
+ overflow = (a1 >= interval);
+ if (overflow) {
+ a1 %= interval;
+ }
+ } while (!atomic_compare_exchange_weak_u64(&counter->accumbytes, &a0, a1,
+ ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &counter->mtx);
+ a0 = counter->accumbytes;
+ a1 = a0 + accumbytes;
+ overflow = (a1 >= interval);
+ if (overflow) {
+ a1 %= interval;
+ }
+ counter->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &counter->mtx);
+#endif
+ return overflow;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, size_t usize) {
+ /*
+ * Cancel out as much of the excessive accumbytes increase as possible
+ * without underflowing. Interval-triggered events occur slightly more
+ * often than intended as a result of incomplete canceling.
+ */
+ uint64_t a0, a1;
+#ifdef JEMALLOC_ATOMIC_U64
+ a0 = atomic_load_u64(&counter->accumbytes,
+ ATOMIC_RELAXED);
+ do {
+ a1 = (a0 >= SC_LARGE_MINCLASS - usize)
+ ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
+ } while (!atomic_compare_exchange_weak_u64(
+ &counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
+ ATOMIC_RELAXED));
+#else
+ malloc_mutex_lock(tsdn, &counter->mtx);
+ a0 = counter->accumbytes;
+ a1 = (a0 >= SC_LARGE_MINCLASS - usize)
+ ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
+ counter->accumbytes = a1;
+ malloc_mutex_unlock(tsdn, &counter->mtx);
+#endif
+}
+
+bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
+
+#endif /* JEMALLOC_INTERNAL_COUNTER_H */
diff --git a/include/jemalloc/internal/prof_externs.h b/include/jemalloc/internal/prof_externs.h
index df4f7cd8..36571c8c 100644
--- a/include/jemalloc/internal/prof_externs.h
+++ b/include/jemalloc/internal/prof_externs.h
@@ -73,7 +73,7 @@ void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
#endif
int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
-bool prof_accum_init(tsdn_t *tsdn);
+bool prof_accum_init(void);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
diff --git a/include/jemalloc/internal/prof_structs.h b/include/jemalloc/internal/prof_structs.h
index ee78643e..977eb1c8 100644
--- a/include/jemalloc/internal/prof_structs.h
+++ b/include/jemalloc/internal/prof_structs.h
@@ -21,15 +21,6 @@ typedef struct {
} prof_unwind_data_t;
#endif
-struct prof_accum_s {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_t mtx;
- uint64_t accumbytes;
-#else
- atomic_u64_t accumbytes;
-#endif
-};
-
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
diff --git a/include/jemalloc/internal/prof_types.h b/include/jemalloc/internal/prof_types.h
index 498962db..4abe5b58 100644
--- a/include/jemalloc/internal/prof_types.h
+++ b/include/jemalloc/internal/prof_types.h
@@ -2,7 +2,6 @@
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_accum_s prof_accum_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
typedef struct prof_info_s prof_info_t;
diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h
index 4ed787a2..083bdcc9 100644
--- a/include/jemalloc/internal/witness.h
+++ b/include/jemalloc/internal/witness.h
@@ -54,9 +54,9 @@
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
+#define WITNESS_RANK_COUNTER_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_DUMP_FILENAME WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
index 4b25b856..d8b48986 100644
--- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
+++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
@@ -43,6 +43,7 @@
+
diff --git a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
index 73ee8d1d..404adbe5 100644
--- a/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
+++ b/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
@@ -28,6 +28,9 @@
Source Files
+
+ Source Files
+
Source Files
diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
index ed6f618d..b0d32d93 100644
--- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
+++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
@@ -43,6 +43,7 @@
+
diff --git a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
index 73ee8d1d..404adbe5 100644
--- a/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
+++ b/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
@@ -28,6 +28,9 @@
Source Files
+
+ Source Files
+
Source Files
diff --git a/src/arena.c b/src/arena.c
index d04712a5..9558bb40 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1988,7 +1988,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
if (config_prof) {
- if (prof_accum_init(tsdn)) {
+ if (prof_accum_init()) {
goto label_error;
}
}
diff --git a/src/counter.c b/src/counter.c
new file mode 100644
index 00000000..1b8201e5
--- /dev/null
+++ b/src/counter.c
@@ -0,0 +1,22 @@
+#define JEMALLOC_COUNTER_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/counter.h"
+
+bool
+counter_accum_init(counter_accum_t *counter, uint64_t interval) {
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&counter->mtx, "counter_accum",
+ WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ counter->accumbytes = 0;
+#else
+ atomic_store_u64(&counter->accumbytes, 0,
+ ATOMIC_RELAXED);
+#endif
+ counter->interval = interval;
+
+ return false;
+}
diff --git a/src/prof.c b/src/prof.c
index 791c362f..649e9ca2 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -5,6 +5,7 @@
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/counter.h"
#include "jemalloc/internal/prof_data.h"
#include "jemalloc/internal/prof_log.h"
#include "jemalloc/internal/prof_recent.h"
@@ -49,7 +50,7 @@ bool opt_prof_accum = false;
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
/* Accessed via prof_idump_[accum/rollback](). */
-static prof_accum_t prof_idump_accumulated;
+static counter_accum_t prof_idump_accumulated;
/*
* Initialized as opt_prof_active, and accessed via
@@ -553,89 +554,24 @@ prof_fdump(void) {
}
bool
-prof_accum_init(tsdn_t *tsdn) {
+prof_accum_init(void) {
cassert(config_prof);
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&prof_idump_accumulated.mtx, "prof_accum",
- WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
- return true;
- }
- prof_idump_accumulated.accumbytes = 0;
-#else
- atomic_store_u64(&prof_idump_accumulated.accumbytes, 0,
- ATOMIC_RELAXED);
-#endif
- return false;
+ return counter_accum_init(&prof_idump_accumulated, prof_interval);
}
bool
prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
cassert(config_prof);
- bool overflow;
- uint64_t a0, a1;
-
- /*
- * If the application allocates fast enough (and/or if idump is slow
- * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
- * idump trigger coalescing. This is an intentional mechanism that
- * avoids rate-limiting allocation.
- */
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
- ATOMIC_RELAXED);
- do {
- a1 = a0 + accumbytes;
- assert(a1 >= a0);
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- } while (!atomic_compare_exchange_weak_u64(
- &prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
- ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
- a0 = prof_idump_accumulated.accumbytes;
- a1 = a0 + accumbytes;
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- prof_idump_accumulated.accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
-#endif
- return overflow;
+ return counter_accum(tsdn, &prof_idump_accumulated, accumbytes);
}
void
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
cassert(config_prof);
- /*
- * Cancel out as much of the excessive accumbytes increase as possible
- * without underflowing. Interval-triggered dumps occur slightly more
- * often than intended as a result of incomplete canceling.
- */
- uint64_t a0, a1;
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_idump_accumulated.accumbytes,
- ATOMIC_RELAXED);
- do {
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- } while (!atomic_compare_exchange_weak_u64(
- &prof_idump_accumulated.accumbytes, &a0, a1, ATOMIC_RELAXED,
- ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_idump_accumulated.mtx);
- a0 = prof_idump_accumulated.accumbytes;
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- prof_idump_accumulated.accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_idump_accumulated.mtx);
-#endif
+ return counter_rollback(tsdn, &prof_idump_accumulated, usize);
}
bool