Store the bin shard selection in TSD.

This avoids having to choose bin shard on the fly, also will allow flexible bin
binding for each thread.
This commit is contained in:
Qi Wang
2018-11-27 12:38:47 -08:00
committed by Qi Wang
parent 45bb4483ba
commit 98b56ab23d
5 changed files with 30 additions and 10 deletions

View File

@@ -1349,8 +1349,7 @@ arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
*binshard = 0;
} else {
*binshard = tsd_binshard_get(tsdn_tsd(tsdn)) %
bin_infos[binind].n_shards;
*binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
}
assert(*binshard < bin_infos[binind].n_shards);
bin = &arena->bins[binind].bin_shards[*binshard];

View File

@@ -379,9 +379,14 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
tsd_iarena_set(tsd, arena);
} else {
tsd_arena_set(tsd, arena);
unsigned binshard = atomic_fetch_add_u(&arena->binshard_next, 1,
ATOMIC_RELAXED) % BIN_SHARDS_MAX;
tsd_binshard_set(tsd, binshard);
unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
ATOMIC_RELAXED);
tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
for (unsigned i = 0; i < SC_NBINS; i++) {
assert(bin_infos[i].n_shards > 0 &&
bin_infos[i].n_shards <= BIN_SHARDS_MAX);
bins->binshard[i] = shard % bin_infos[i].n_shards;
}
}
}