Add support for sharded bins within an arena.

This makes it possible to have multiple set of bins in an arena, which improves
arena scalability because the bins (especially the small ones) are always the
limiting factor in production workload.

A bin shard is picked on allocation; each extent tracks the bin shard id for
deallocation.  The shard size will be determined using runtime options.
This commit is contained in:
Qi Wang 2018-11-12 15:56:04 -08:00 committed by Qi Wang
parent b23336af96
commit 37b8913925
12 changed files with 217 additions and 73 deletions

View File

@ -63,8 +63,8 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path); bool slow_path);
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
extent_t *extent, void *ptr); szind_t binind, extent_t *extent, void *ptr);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr); void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize); size_t extra, bool zero, size_t *newsize);
@ -86,6 +86,8 @@ size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
bool arena_init_huge(void); bool arena_init_huge(void);
arena_t *arena_choose_huge(tsd_t *tsd); arena_t *arena_choose_huge(tsd_t *tsd);
bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
void arena_boot(sc_data_t *sc_data); void arena_boot(sc_data_t *sc_data);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena); void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena); void arena_prefork1(tsdn_t *tsdn, arena_t *arena);

View File

@ -90,6 +90,9 @@ struct arena_s {
*/ */
atomic_u_t nthreads[2]; atomic_u_t nthreads[2];
/* Next bin shard for binding new threads. Synchronization: atomic. */
atomic_u_t binshard_next;
/* /*
* When percpu_arena is enabled, to amortize the cost of reading / * When percpu_arena is enabled, to amortize the cost of reading /
* updating the current CPU id, track the most recent thread accessing * updating the current CPU id, track the most recent thread accessing
@ -204,7 +207,7 @@ struct arena_s {
* *
* Synchronization: internal. * Synchronization: internal.
*/ */
bin_t bins[SC_NBINS]; bins_t bins[SC_NBINS];
/* /*
* Base allocator, from which arena metadata are allocated. * Base allocator, from which arena metadata are allocated.

View File

@ -7,6 +7,11 @@
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
extern unsigned opt_bin_shard_maxszind;
extern unsigned opt_n_bin_shards;
/* /*
* A bin contains a set of extents that are currently being used for slab * A bin contains a set of extents that are currently being used for slab
* allocations. * allocations.
@ -42,6 +47,9 @@ struct bin_info_s {
/* Total number of regions in a slab for this bin's size class. */ /* Total number of regions in a slab for this bin's size class. */
uint32_t nregs; uint32_t nregs;
/* Number of sharded bins in each arena for this size class. */
uint32_t n_shards;
/* /*
* Metadata used to manipulate bitmaps for slabs associated with this * Metadata used to manipulate bitmaps for slabs associated with this
* bin. * bin.
@ -51,7 +59,6 @@ struct bin_info_s {
extern bin_info_t bin_infos[SC_NBINS]; extern bin_info_t bin_infos[SC_NBINS];
typedef struct bin_s bin_t; typedef struct bin_s bin_t;
struct bin_s { struct bin_s {
/* All operations on bin_t fields require lock ownership. */ /* All operations on bin_t fields require lock ownership. */
@ -79,6 +86,13 @@ struct bin_s {
bin_stats_t stats; bin_stats_t stats;
}; };
/* A set of sharded bins of the same size class. */
typedef struct bins_s bins_t;
struct bins_s {
/* Sharded bins. Dynamically sized. */
bin_t *bin_shards;
};
void bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]); void bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]);
void bin_boot(); void bin_boot();
@ -94,7 +108,7 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
static inline void static inline void
bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock); malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
dst_bin_stats->nmalloc += bin->stats.nmalloc; dst_bin_stats->nmalloc += bin->stats.nmalloc;
dst_bin_stats->ndalloc += bin->stats.ndalloc; dst_bin_stats->ndalloc += bin->stats.ndalloc;
dst_bin_stats->nrequests += bin->stats.nrequests; dst_bin_stats->nrequests += bin->stats.nrequests;

View File

@ -70,6 +70,14 @@ extent_usize_get(const extent_t *extent) {
return sz_index2size(extent_szind_get(extent)); return sz_index2size(extent_szind_get(extent));
} }
static inline unsigned
extent_binshard_get(const extent_t *extent) {
unsigned binshard = (unsigned)((extent->e_bits &
EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
return binshard;
}
static inline size_t static inline size_t
extent_sn_get(const extent_t *extent) { extent_sn_get(const extent_t *extent) {
return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
@ -190,6 +198,14 @@ extent_arena_set(extent_t *extent, arena_t *arena) {
((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
} }
static inline void
extent_binshard_set(extent_t *extent, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
}
static inline void static inline void
extent_addr_set(extent_t *extent, void *addr) { extent_addr_set(extent_t *extent, void *addr) {
extent->e_addr = addr; extent->e_addr = addr;
@ -252,6 +268,16 @@ extent_nfree_set(extent_t *extent, unsigned nfree) {
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
} }
static inline void
extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
/* The assertion assumes szind is set already. */
assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
extent->e_bits = (extent->e_bits &
(~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
}
static inline void static inline void
extent_nfree_inc(extent_t *extent) { extent_nfree_inc(extent_t *extent) {
assert(extent_slab_get(extent)); assert(extent_slab_get(extent));

View File

@ -29,9 +29,10 @@ struct extent_s {
* t: state * t: state
* i: szind * i: szind
* f: nfree * f: nfree
* s: bin_shard
* n: sn * n: sn
* *
* nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
* *
* arena_ind: Arena from which this extent came, or all 1 bits if * arena_ind: Arena from which this extent came, or all 1 bits if
* unassociated. * unassociated.
@ -76,6 +77,8 @@ struct extent_s {
* *
* nfree: Number of free regions in slab. * nfree: Number of free regions in slab.
* *
* bin_shard: the shard of the bin from which this extent came.
*
* sn: Serial number (potentially non-unique). * sn: Serial number (potentially non-unique).
* *
* Serial numbers may wrap around if !opt_retain, but as long as * Serial numbers may wrap around if !opt_retain, but as long as
@ -121,7 +124,15 @@ struct extent_s {
#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) #define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) #define EXTENT_BITS_BINSHARD_WIDTH 6
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
/* Will make dynamic options. */
#define OPT_N_BIN_SHARDS (1)
#define OPT_BIN_SHARD_MAXSZIND (0)
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) #define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
/* Pointer to the extent that this structure is responsible for. */ /* Pointer to the extent that this structure is responsible for. */

View File

@ -263,4 +263,26 @@ malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
} }
static inline void
malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
malloc_mutex_t *mutex) {
mutex_prof_data_t *source = &mutex->prof_data;
/* Can only read holding the mutex. */
malloc_mutex_assert_owner(tsdn, mutex);
nstime_add(&data->tot_wait_time, &source->tot_wait_time);
if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
nstime_copy(&data->max_wait_time, &source->max_wait_time);
}
data->n_wait_times += source->n_wait_times;
data->n_spin_acquired += source->n_spin_acquired;
if (data->max_n_thds < source->max_n_thds) {
data->max_n_thds = source->max_n_thds;
}
/* n_wait_thds is not reported. */
atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
data->n_owner_switches += source->n_owner_switches;
data->n_lock_ops += source->n_lock_ops;
}
#endif /* JEMALLOC_INTERNAL_MUTEX_H */ #endif /* JEMALLOC_INTERNAL_MUTEX_H */

View File

@ -74,6 +74,7 @@ typedef void (*test_callback_t)(int *);
O(iarena, arena_t *, arena_t *) \ O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \ O(arena, arena_t *, arena_t *) \
O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
O(binshard, unsigned, unsigned) \
O(tcache, tcache_t, tcache_t) \ O(tcache, tcache_t, tcache_t) \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD MALLOC_TEST_TSD
@ -93,6 +94,7 @@ typedef void (*test_callback_t)(int *);
NULL, \ NULL, \
NULL, \ NULL, \
NULL, \ NULL, \
((unsigned)-1), \
TCACHE_ZERO_INITIALIZER, \ TCACHE_ZERO_INITIALIZER, \
WITNESS_TSD_INITIALIZER \ WITNESS_TSD_INITIALIZER \
MALLOC_TEST_TSD_INITIALIZER \ MALLOC_TEST_TSD_INITIALIZER \

View File

@ -233,7 +233,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
nstime_subtract(&astats->uptime, &arena->create_time); nstime_subtract(&astats->uptime, &arena->create_time);
for (szind_t i = 0; i < SC_NBINS; i++) { for (szind_t i = 0; i < SC_NBINS; i++) {
bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_stats_merge(tsdn, &bstats[i],
&arena->bins[i].bin_shards[j]);
}
} }
} }
@ -1039,6 +1042,37 @@ arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
extent_list_remove(&bin->slabs_full, slab); extent_list_remove(&bin->slabs_full, slab);
} }
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
extent_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
slab = bin->slabcur;
bin->slabcur = NULL;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
void void
arena_reset(tsd_t *tsd, arena_t *arena) { arena_reset(tsd_t *tsd, arena_t *arena) {
/* /*
@ -1085,34 +1119,10 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */ /* Bins. */
for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
extent_t *slab; for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_t *bin = &arena->bins[i]; arena_bin_reset(tsd, arena,
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); &arena->bins[i].bin_shards[j]);
if (bin->slabcur != NULL) {
slab = bin->slabcur;
bin->slabcur = NULL;
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
} }
while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
slab = extent_list_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
if (config_stats) {
bin->stats.curregs = 0;
bin->stats.curslabs = 0;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
} }
atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
@ -1197,7 +1207,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
} }
static extent_t * static extent_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) { const bin_info_t *bin_info) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
@ -1225,7 +1235,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
/* Initialize slab internals. */ /* Initialize slab internals. */
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
extent_nfree_set(slab, bin_info->nregs); extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
@ -1235,7 +1245,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
static extent_t * static extent_t *
arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind, unsigned binshard) {
extent_t *slab; extent_t *slab;
const bin_info_t *bin_info; const bin_info_t *bin_info;
@ -1251,7 +1261,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
/* Allocate a new slab. */ /* Allocate a new slab. */
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/ /******************************/
slab = arena_slab_alloc(tsdn, arena, binind, bin_info); slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
/********************************/ /********************************/
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
if (slab != NULL) { if (slab != NULL) {
@ -1278,7 +1288,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void * static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
szind_t binind) { szind_t binind, unsigned binshard) {
const bin_info_t *bin_info; const bin_info_t *bin_info;
extent_t *slab; extent_t *slab;
@ -1287,7 +1297,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
arena_bin_slabs_full_insert(arena, bin, bin->slabcur); arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
bin->slabcur = NULL; bin->slabcur = NULL;
} }
slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
if (bin->slabcur != NULL) { if (bin->slabcur != NULL) {
/* /*
* Another thread updated slabcur while this one ran without the * Another thread updated slabcur while this one ran without the
@ -1331,19 +1341,39 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
return arena_slab_reg_alloc(slab, bin_info); return arena_slab_reg_alloc(slab, bin_info);
} }
/* Choose a bin shard and return the locked bin. */
bin_t *
arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard) {
bin_t *bin;
if (binind >= opt_bin_shard_maxszind || tsdn_null(tsdn) ||
tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
*binshard = 0;
} else {
*binshard = tsd_binshard_get(tsdn_tsd(tsdn)) %
bin_infos[binind].n_shards;
}
assert(*binshard < bin_infos[binind].n_shards);
bin = &arena->bins[binind].bin_shards[*binshard];
malloc_mutex_lock(tsdn, &bin->lock);
return bin;
}
void void
arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill, cnt; unsigned i, nfill, cnt;
bin_t *bin;
assert(tbin->ncached == 0); assert(tbin->ncached == 0);
if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
prof_idump(tsdn); prof_idump(tsdn);
} }
bin = &arena->bins[binind];
malloc_mutex_lock(tsdn, &bin->lock); unsigned binshard;
bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tcache->lg_fill_div[binind]); i < nfill; i += cnt) { tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
extent_t *slab; extent_t *slab;
@ -1358,7 +1388,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
} else { } else {
cnt = 1; cnt = 1;
void *ptr = arena_bin_malloc_hard(tsdn, arena, bin, void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
binind); binind, binshard);
/* /*
* OOM. tbin->avail isn't yet filled down to its first * OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must * element, so the successful allocations (if any) must
@ -1417,14 +1447,14 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
extent_t *slab; extent_t *slab;
assert(binind < SC_NBINS); assert(binind < SC_NBINS);
bin = &arena->bins[binind];
usize = sz_index2size(binind); usize = sz_index2size(binind);
unsigned binshard;
bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
malloc_mutex_lock(tsdn, &bin->lock);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
} else { } else {
ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
} }
if (ret == NULL) { if (ret == NULL) {
@ -1623,11 +1653,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void *ptr, bool junked) { szind_t binind, extent_t *slab, void *ptr, bool junked) {
arena_slab_data_t *slab_data = extent_slab_data_get(slab); arena_slab_data_t *slab_data = extent_slab_data_get(slab);
szind_t binind = extent_szind_get(slab);
bin_t *bin = &arena->bins[binind];
const bin_info_t *bin_info = &bin_infos[binind]; const bin_info_t *bin_info = &bin_infos[binind];
if (!junked && config_fill && unlikely(opt_junk_free)) { if (!junked && config_fill && unlikely(opt_junk_free)) {
@ -1651,18 +1679,21 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
void void
arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
void *ptr) { szind_t binind, extent_t *extent, void *ptr) {
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
true);
} }
static void static void
arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
szind_t binind = extent_szind_get(extent); szind_t binind = extent_szind_get(extent);
bin_t *bin = &arena->bins[binind]; unsigned binshard = extent_binshard_get(extent);
bin_t *bin = &arena->bins[binind].bin_shards[binshard];
malloc_mutex_lock(tsdn, &bin->lock); malloc_mutex_lock(tsdn, &bin->lock);
arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
false);
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
} }
@ -1892,7 +1923,10 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
} }
arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); size_t arena_size = sizeof(arena_t) +
sizeof(bin_t) * opt_n_bin_shards * opt_bin_shard_maxszind +
sizeof(bin_t) * (SC_NBINS - opt_bin_shard_maxszind);
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) { if (arena == NULL) {
goto label_error; goto label_error;
} }
@ -1997,12 +2031,20 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} }
/* Initialize bins. */ /* Initialize bins. */
uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bool err = bin_init(&arena->bins[i]); unsigned nshards = bin_infos[i].n_shards;
arena->bins[i].bin_shards = (bin_t *)bin_addr;
bin_addr += nshards * sizeof(bin_t);
for (unsigned j = 0; j < nshards; j++) {
bool err = bin_init(&arena->bins[i].bin_shards[j]);
if (err) { if (err) {
goto label_error; goto label_error;
} }
} }
}
assert(bin_addr == (uintptr_t)arena + arena_size);
arena->base = base; arena->base = base;
/* Set arena before creating background threads. */ /* Set arena before creating background threads. */
@ -2139,7 +2181,9 @@ arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
void void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) { arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < SC_NBINS; i++) { for (unsigned i = 0; i < SC_NBINS; i++) {
bin_prefork(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
}
} }
} }
@ -2148,7 +2192,10 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i; unsigned i;
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bin_postfork_parent(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_postfork_parent(tsdn,
&arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base); base_postfork_parent(tsdn, arena->base);
@ -2192,7 +2239,9 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
} }
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bin_postfork_child(tsdn, &arena->bins[i]); for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
}
} }
malloc_mutex_postfork_child(tsdn, &arena->large_mtx); malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base); base_postfork_child(tsdn, arena->base);

View File

@ -6,6 +6,9 @@
#include "jemalloc/internal/sc.h" #include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h" #include "jemalloc/internal/witness.h"
unsigned opt_bin_shard_maxszind;
unsigned opt_n_bin_shards;
bin_info_t bin_infos[SC_NBINS]; bin_info_t bin_infos[SC_NBINS];
void void
@ -18,6 +21,7 @@ bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]) {
bin_info->slab_size = (sc->pgs << LG_PAGE); bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs = bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size); (uint32_t)(bin_info->slab_size / bin_info->reg_size);
bin_info->n_shards = (i < opt_bin_shard_maxszind) ? opt_n_bin_shards : 1;
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER( bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs); bin_info->nregs);
bin_info->bitmap_info = bitmap_info; bin_info->bitmap_info = bitmap_info;
@ -27,6 +31,8 @@ bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]) {
void void
bin_boot(sc_data_t *sc_data) { bin_boot(sc_data_t *sc_data) {
assert(sc_data->initialized); assert(sc_data->initialized);
opt_bin_shard_maxszind = OPT_BIN_SHARD_MAXSZIND;
opt_n_bin_shards = OPT_N_BIN_SHARDS;
bin_infos_init(sc_data, bin_infos); bin_infos_init(sc_data, bin_infos);
} }

View File

@ -2913,10 +2913,12 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
MUTEX_PROF_RESET(arena->base->mtx); MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < SC_NBINS; i++) { for (szind_t i = 0; i < SC_NBINS; i++) {
bin_t *bin = &arena->bins[i]; for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_t *bin = &arena->bins[i].bin_shards[j];
MUTEX_PROF_RESET(bin->lock); MUTEX_PROF_RESET(bin->lock);
} }
} }
}
#undef MUTEX_PROF_RESET #undef MUTEX_PROF_RESET
return 0; return 0;
} }

View File

@ -379,6 +379,9 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
tsd_iarena_set(tsd, arena); tsd_iarena_set(tsd, arena);
} else { } else {
tsd_arena_set(tsd, arena); tsd_arena_set(tsd, arena);
unsigned binshard = atomic_fetch_add_u(&arena->binshard_next, 1,
ATOMIC_RELAXED) % BIN_SHARDS_MAX;
tsd_binshard_set(tsd, binshard);
} }
} }

View File

@ -121,7 +121,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
/* Lock the arena bin associated with the first object. */ /* Lock the arena bin associated with the first object. */
extent_t *extent = item_extent[0]; extent_t *extent = item_extent[0];
arena_t *bin_arena = extent_arena_get(extent); arena_t *bin_arena = extent_arena_get(extent);
bin_t *bin = &bin_arena->bins[binind]; unsigned binshard = extent_binshard_get(extent);
assert(binshard < bin_infos[binind].n_shards);
bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
if (config_prof && bin_arena == arena) { if (config_prof && bin_arena == arena) {
if (arena_prof_accum(tsd_tsdn(tsd), arena, if (arena_prof_accum(tsd_tsdn(tsd), arena,
@ -145,9 +147,10 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
extent = item_extent[i]; extent = item_extent[i];
assert(ptr != NULL && extent != NULL); assert(ptr != NULL && extent != NULL);
if (extent_arena_get(extent) == bin_arena) { if (extent_arena_get(extent) == bin_arena
&& extent_binshard_get(extent) == binshard) {
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, extent, ptr); bin_arena, bin, binind, extent, ptr);
} else { } else {
/* /*
* This object was allocated via a different * This object was allocated via a different
@ -169,8 +172,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
* The flush loop didn't happen to flush to this thread's * The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
*/ */
bin_t *bin = &arena->bins[binind]; unsigned binshard;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
&binshard);
bin->stats.nflushes++; bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;
@ -557,9 +561,9 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
/* Merge and reset tcache stats. */ /* Merge and reset tcache stats. */
for (i = 0; i < SC_NBINS; i++) { for (i = 0; i < SC_NBINS; i++) {
bin_t *bin = &arena->bins[i];
cache_bin_t *tbin = tcache_small_bin_get(tcache, i); cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
malloc_mutex_lock(tsdn, &bin->lock); unsigned binshard;
bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(tsdn, &bin->lock); malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0; tbin->tstats.nrequests = 0;