Push locks into arena bins.
For bin-related allocation, protect data structures with bin locks rather than arena locks. Arena locks remain for run allocation/deallocation and other miscellaneous operations. Restructure statistics counters to maintain per bin allocated/nmalloc/ndalloc, but continue to provide arena-wide statistics via aggregation in the ctl code.
This commit is contained in:
@@ -206,6 +206,14 @@ struct arena_run_s {
|
||||
};
|
||||
|
||||
struct arena_bin_s {
|
||||
/*
|
||||
* All operations on runcur, runs, and stats require that lock be
|
||||
* locked. Run allocation/deallocation are protected by the arena lock,
|
||||
* which may be acquired while holding one or more bin locks, but not
|
||||
* vise versa.
|
||||
*/
|
||||
malloc_mutex_t lock;
|
||||
|
||||
/*
|
||||
* Current run being used to service allocations of this bin's size
|
||||
* class.
|
||||
@@ -256,7 +264,10 @@ struct arena_s {
|
||||
/* This arena's index within the arenas array. */
|
||||
unsigned ind;
|
||||
|
||||
/* All operations on this arena require that lock be locked. */
|
||||
/*
|
||||
* All non-bin-related operations on this arena require that lock be
|
||||
* locked.
|
||||
*/
|
||||
malloc_mutex_t lock;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
@@ -459,9 +470,18 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
tcache_dalloc(tcache, ptr);
|
||||
else {
|
||||
#endif
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - ((mapelm->bits &
|
||||
CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
assert(run->magic == ARENA_RUN_MAGIC);
|
||||
bin = run->bin;
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
#ifdef JEMALLOC_TCACHE
|
||||
}
|
||||
#endif
|
||||
|
@@ -33,6 +33,18 @@ struct ctl_arena_stats_s {
|
||||
size_t pdirty;
|
||||
#ifdef JEMALLOC_STATS
|
||||
arena_stats_t astats;
|
||||
|
||||
/* Aggregate stats for small/medium size classes, based on bin stats. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
uint64_t nrequests_small;
|
||||
|
||||
size_t allocated_medium;
|
||||
uint64_t nmalloc_medium;
|
||||
uint64_t ndalloc_medium;
|
||||
uint64_t nrequests_medium;
|
||||
|
||||
malloc_bin_stats_t *bstats; /* nbins elements. */
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
#endif
|
||||
|
@@ -31,8 +31,23 @@ struct tcache_bin_stats_s {
|
||||
|
||||
struct malloc_bin_stats_s {
|
||||
/*
|
||||
* Number of allocation requests that corresponded to the size of this
|
||||
* bin.
|
||||
* Current number of bytes allocated, including objects currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t allocated;
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the bin. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/*
|
||||
* Number of allocation requests that correspond to the size of this
|
||||
* bin. This includes requests served by tcache, though tcache only
|
||||
* periodically merges into this counter.
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
@@ -87,14 +102,6 @@ struct arena_stats_s {
|
||||
uint64_t purged;
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_small;
|
||||
uint64_t nmalloc_small;
|
||||
uint64_t ndalloc_small;
|
||||
|
||||
size_t allocated_medium;
|
||||
uint64_t nmalloc_medium;
|
||||
uint64_t ndalloc_medium;
|
||||
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
|
@@ -64,7 +64,7 @@ extern __thread tcache_t *tcache_tls
|
||||
extern unsigned tcache_gc_incr;
|
||||
|
||||
void tcache_bin_flush(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
#ifdef JEMALLOC_PROF
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache_t *tcache
|
||||
#endif
|
||||
);
|
||||
@@ -130,7 +130,7 @@ tcache_event(tcache_t *tcache)
|
||||
*/
|
||||
tcache_bin_flush(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2)
|
||||
#ifdef JEMALLOC_PROF
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache
|
||||
#endif
|
||||
);
|
||||
@@ -234,7 +234,7 @@ tcache_dalloc(tcache_t *tcache, void *ptr)
|
||||
tbin = &tcache->tbins[binind];
|
||||
if (tbin->ncached == tbin->ncached_max) {
|
||||
tcache_bin_flush(tbin, binind, (tbin->ncached_max >> 1)
|
||||
#ifdef JEMALLOC_PROF
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache
|
||||
#endif
|
||||
);
|
||||
|
Reference in New Issue
Block a user