Revert "Use trylock in tcache_bin_flush when possible."
This reverts commit 8584adc451
. Production
results not favorable. Will investigate separately.
This commit is contained in:
parent
209f2926b8
commit
fc1aaf13fe
@ -33,12 +33,8 @@ void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
szind_t binind, unsigned rem);
|
szind_t binind, unsigned rem);
|
||||||
unsigned tcache_bin_try_flush_small(tsd_t *tsd, tcache_t *tcache,
|
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
tcache_bin_t *tbin, szind_t binind, unsigned rem);
|
unsigned rem, tcache_t *tcache);
|
||||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem);
|
|
||||||
unsigned tcache_bin_try_flush_large(tsd_t *tsd, tcache_t *tcache,
|
|
||||||
tcache_bin_t *tbin, szind_t binind, unsigned rem);
|
|
||||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||||
arena_t *arena);
|
arena_t *arena);
|
||||||
tcache_t *tcache_create_explicit(tsd_t *tsd);
|
tcache_t *tcache_create_explicit(tsd_t *tsd);
|
||||||
|
@ -227,8 +227,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||||||
tbin = tcache_large_bin_get(tcache, binind);
|
tbin = tcache_large_bin_get(tcache, binind);
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||||
tcache_bin_flush_large(tsd, tcache, tbin, binind,
|
tcache_bin_flush_large(tsd, tbin, binind,
|
||||||
(tbin_info->ncached_max >> 1));
|
(tbin_info->ncached_max >> 1), tcache);
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
tbin->ncached++;
|
tbin->ncached++;
|
||||||
|
141
src/tcache.c
141
src/tcache.c
@ -45,16 +45,14 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
} else {
|
} else {
|
||||||
tbin = tcache_large_bin_get(tcache, binind);
|
tbin = tcache_large_bin_get(tcache, binind);
|
||||||
}
|
}
|
||||||
bool repeat_bin;
|
|
||||||
if (tbin->low_water > 0) {
|
if (tbin->low_water > 0) {
|
||||||
/*
|
/*
|
||||||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||||
*/
|
*/
|
||||||
unsigned nflushed;
|
|
||||||
if (binind < NBINS) {
|
if (binind < NBINS) {
|
||||||
nflushed = tcache_bin_try_flush_small(tsd, tcache, tbin,
|
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
||||||
binind, tbin->ncached - tbin->low_water +
|
tbin->ncached - tbin->low_water + (tbin->low_water
|
||||||
(tbin->low_water >> 2));
|
>> 2));
|
||||||
/*
|
/*
|
||||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||||
* the fill count is always at least 1.
|
* the fill count is always at least 1.
|
||||||
@ -65,13 +63,10 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
tcache->lg_fill_div[binind]++;
|
tcache->lg_fill_div[binind]++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
nflushed = tcache_bin_try_flush_large(tsd, tcache, tbin,
|
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||||
binind, tbin->ncached - tbin->low_water +
|
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||||
(tbin->low_water >> 2));
|
|
||||||
}
|
}
|
||||||
repeat_bin = (nflushed == 0);
|
} else if (tbin->low_water < 0) {
|
||||||
} else {
|
|
||||||
if (tbin->low_water < 0) {
|
|
||||||
/*
|
/*
|
||||||
* Increase fill count by 2X for small bins. Make sure
|
* Increase fill count by 2X for small bins. Make sure
|
||||||
* lg_fill_div stays greater than 0.
|
* lg_fill_div stays greater than 0.
|
||||||
@ -80,15 +75,12 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
tcache->lg_fill_div[binind]--;
|
tcache->lg_fill_div[binind]--;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
repeat_bin = false;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
|
||||||
if (!repeat_bin) {
|
|
||||||
tcache->next_gc_bin++;
|
tcache->next_gc_bin++;
|
||||||
if (tcache->next_gc_bin == nhbins) {
|
if (tcache->next_gc_bin == nhbins) {
|
||||||
tcache->next_gc_bin = 0;
|
tcache->next_gc_bin = 0;
|
||||||
}
|
}
|
||||||
tbin->low_water = tbin->ncached;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -107,9 +99,11 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
void
|
||||||
tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
szind_t binind, unsigned rem, bool must_flush) {
|
szind_t binind, unsigned rem) {
|
||||||
|
bool merged_stats = false;
|
||||||
|
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
|
|
||||||
@ -122,12 +116,9 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool merged_stats = false;
|
|
||||||
unsigned nflushed = 0;
|
|
||||||
unsigned nskipped = 0;
|
|
||||||
while (nflush > 0) {
|
while (nflush > 0) {
|
||||||
/* Lock the arena bin associated with the first object. */
|
/* Lock the arena bin associated with the first object. */
|
||||||
extent_t *extent = item_extent[nskipped];
|
extent_t *extent = item_extent[0];
|
||||||
arena_t *bin_arena = extent_arena_get(extent);
|
arena_t *bin_arena = extent_arena_get(extent);
|
||||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||||
|
|
||||||
@ -139,16 +130,7 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
tcache->prof_accumbytes = 0;
|
tcache->prof_accumbytes = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (must_flush) {
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
} else {
|
|
||||||
/* Make best effort to flush w/o blocking. */
|
|
||||||
if (malloc_mutex_trylock(tsd_tsdn(tsd), &bin->lock)) {
|
|
||||||
nskipped++;
|
|
||||||
nflush--;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (config_stats && bin_arena == arena) {
|
if (config_stats && bin_arena == arena) {
|
||||||
assert(!merged_stats);
|
assert(!merged_stats);
|
||||||
merged_stats = true;
|
merged_stats = true;
|
||||||
@ -157,7 +139,7 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
unsigned ndeferred = 0;
|
unsigned ndeferred = 0;
|
||||||
for (unsigned i = nskipped; i < nflush; i++) {
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(tbin->avail - 1 - i);
|
void *ptr = *(tbin->avail - 1 - i);
|
||||||
extent = item_extent[i];
|
extent = item_extent[i];
|
||||||
assert(ptr != NULL && extent != NULL);
|
assert(ptr != NULL && extent != NULL);
|
||||||
@ -172,14 +154,13 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
* locked. Stash the object, so that it can be
|
* locked. Stash the object, so that it can be
|
||||||
* handled in a future pass.
|
* handled in a future pass.
|
||||||
*/
|
*/
|
||||||
*(tbin->avail - 1 - ndeferred - nskipped) = ptr;
|
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||||
item_extent[ndeferred + nskipped] = extent;
|
item_extent[ndeferred] = extent;
|
||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
|
||||||
nflushed += nflush - ndeferred;
|
|
||||||
nflush = ndeferred;
|
nflush = ndeferred;
|
||||||
}
|
}
|
||||||
if (config_stats && !merged_stats) {
|
if (config_stats && !merged_stats) {
|
||||||
@ -188,49 +169,26 @@ tcache_bin_flush_small_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
*/
|
*/
|
||||||
arena_bin_t *bin = &arena->bins[binind];
|
arena_bin_t *bin = &arena->bins[binind];
|
||||||
if (must_flush) {
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
|
||||||
if (must_flush ||
|
|
||||||
!malloc_mutex_trylock(tsd_tsdn(tsd), &bin->lock)) {
|
|
||||||
malloc_mutex_assert_owner(tsd_tsdn(tsd), &bin->lock);
|
|
||||||
bin->stats.nflushes++;
|
bin->stats.nflushes++;
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
assert(nflushed == tbin->ncached - rem - nskipped);
|
|
||||||
assert(nskipped == 0 || !must_flush);
|
|
||||||
|
|
||||||
if (nflushed > 0) {
|
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||||
memmove(tbin->avail - (rem + nskipped), tbin->avail -
|
sizeof(void *));
|
||||||
tbin->ncached, rem * sizeof(void *));
|
tbin->ncached = rem;
|
||||||
}
|
|
||||||
tbin->ncached = rem + nskipped;
|
|
||||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
}
|
||||||
|
|
||||||
return nflushed;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
szind_t binind, unsigned rem) {
|
unsigned rem, tcache_t *tcache) {
|
||||||
tcache_bin_flush_small_impl(tsd, tcache, tbin, binind, rem, true);
|
bool merged_stats = false;
|
||||||
}
|
|
||||||
|
|
||||||
unsigned
|
|
||||||
tcache_bin_try_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem) {
|
|
||||||
return tcache_bin_flush_small_impl(tsd, tcache, tbin, binind, rem,
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned
|
|
||||||
tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem, bool must_flush) {
|
|
||||||
assert(binind < nhbins);
|
assert(binind < nhbins);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
|
|
||||||
@ -243,31 +201,18 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool merged_stats = false;
|
|
||||||
unsigned nflushed = 0;
|
|
||||||
unsigned nskipped = 0;
|
|
||||||
while (nflush > 0) {
|
while (nflush > 0) {
|
||||||
/* Lock the arena associated with the first object. */
|
/* Lock the arena associated with the first object. */
|
||||||
extent_t *extent = item_extent[nskipped];
|
extent_t *extent = item_extent[0];
|
||||||
arena_t *locked_arena = extent_arena_get(extent);
|
arena_t *locked_arena = extent_arena_get(extent);
|
||||||
UNUSED bool idump;
|
UNUSED bool idump;
|
||||||
|
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
idump = false;
|
idump = false;
|
||||||
}
|
}
|
||||||
if (must_flush) {
|
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
|
||||||
} else {
|
|
||||||
/* Make best effort to flush w/o blocking. */
|
|
||||||
if (malloc_mutex_trylock(tsd_tsdn(tsd),
|
|
||||||
&locked_arena->large_mtx)) {
|
|
||||||
nskipped++;
|
|
||||||
nflush--;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (unsigned i = nskipped; i < nflush; i++) {
|
malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||||
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(tbin->avail - 1 - i);
|
void *ptr = *(tbin->avail - 1 - i);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
extent = item_extent[i];
|
extent = item_extent[i];
|
||||||
@ -293,7 +238,7 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
|
||||||
|
|
||||||
unsigned ndeferred = 0;
|
unsigned ndeferred = 0;
|
||||||
for (unsigned i = nskipped; i < nflush; i++) {
|
for (unsigned i = 0; i < nflush; i++) {
|
||||||
void *ptr = *(tbin->avail - 1 - i);
|
void *ptr = *(tbin->avail - 1 - i);
|
||||||
extent = item_extent[i];
|
extent = item_extent[i];
|
||||||
assert(ptr != NULL && extent != NULL);
|
assert(ptr != NULL && extent != NULL);
|
||||||
@ -307,8 +252,8 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
* Stash the object, so that it can be handled
|
* Stash the object, so that it can be handled
|
||||||
* in a future pass.
|
* in a future pass.
|
||||||
*/
|
*/
|
||||||
*(tbin->avail - 1 - ndeferred - nskipped) = ptr;
|
*(tbin->avail - 1 - ndeferred) = ptr;
|
||||||
item_extent[ndeferred + nskipped] = extent;
|
item_extent[ndeferred] = extent;
|
||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -317,7 +262,6 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
}
|
}
|
||||||
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
|
||||||
ndeferred);
|
ndeferred);
|
||||||
nflushed += nflush - ndeferred;
|
|
||||||
nflush = ndeferred;
|
nflush = ndeferred;
|
||||||
}
|
}
|
||||||
if (config_stats && !merged_stats) {
|
if (config_stats && !merged_stats) {
|
||||||
@ -330,31 +274,12 @@ tcache_bin_flush_large_impl(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(nflushed == tbin->ncached - rem - nskipped);
|
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||||
assert(nskipped == 0 || !must_flush);
|
sizeof(void *));
|
||||||
|
tbin->ncached = rem;
|
||||||
if (nflushed > 0) {
|
|
||||||
memmove(tbin->avail - (rem + nskipped), tbin->avail -
|
|
||||||
tbin->ncached, rem * sizeof(void *));
|
|
||||||
}
|
|
||||||
tbin->ncached = rem + nskipped;
|
|
||||||
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
if ((low_water_t)tbin->ncached < tbin->low_water) {
|
||||||
tbin->low_water = tbin->ncached;
|
tbin->low_water = tbin->ncached;
|
||||||
}
|
}
|
||||||
return nflushed;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem) {
|
|
||||||
tcache_bin_flush_large_impl(tsd, tcache, tbin, binind, rem, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned
|
|
||||||
tcache_bin_try_flush_large(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|
||||||
szind_t binind, unsigned rem) {
|
|
||||||
return tcache_bin_flush_large_impl(tsd, tcache, tbin, binind, rem,
|
|
||||||
false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -533,7 +458,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
}
|
}
|
||||||
for (unsigned i = NBINS; i < nhbins; i++) {
|
for (unsigned i = NBINS; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||||
tcache_bin_flush_large(tsd, tcache, tbin, i, 0);
|
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
assert(tbin->tstats.nrequests == 0);
|
assert(tbin->tstats.nrequests == 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user