Fix tcache crash during thread cleanup.

Properly maintain tcache_bin_t's avail pointer such that it is NULL if
no objects are cached.  This only caused problems during thread cache
destruction, since cache flushing otherwise never occurs on an empty
bin.
This commit is contained in:
Jason Evans 2010-04-14 11:27:13 -07:00
parent 38cda690dd
commit 5055f4516c

View File

@ -55,12 +55,14 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
{
void *flush, *deferred, *ptr;
unsigned i, nflush, ndeferred;
bool first_pass;
assert(binind < nbins);
assert(rem <= tbin->ncached);
assert(tbin->ncached > 0 || tbin->avail == NULL);
for (flush = tbin->avail, nflush = tbin->ncached - rem; flush != NULL;
flush = deferred, nflush = ndeferred) {
for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
true; flush != NULL; flush = deferred, nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
arena_t *arena = chunk->arena;
@ -110,12 +112,9 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&bin->lock);
if (flush != NULL) {
/*
* This was the first pass, and rem cached objects
* remain.
*/
if (first_pass) {
tbin->avail = flush;
first_pass = false;
}
}
@ -133,12 +132,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
{
void *flush, *deferred, *ptr;
unsigned i, nflush, ndeferred;
bool first_pass;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
assert(tbin->ncached > 0 || tbin->avail == NULL);
for (flush = tbin->avail, nflush = tbin->ncached - rem; flush != NULL;
flush = deferred, nflush = ndeferred) {
for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
true; flush != NULL; flush = deferred, nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
arena_t *arena = chunk->arena;
@ -183,12 +184,9 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&arena->lock);
if (flush != NULL) {
/*
* This was the first pass, and rem cached objects
* remain.
*/
if (first_pass) {
tbin->avail = flush;
first_pass = false;
}
}