Merge branch '2_2_5_bp'

This commit is contained in:
Jason Evans 2011-11-14 17:15:59 -08:00
commit fc1bb70e5f
6 changed files with 34 additions and 17 deletions

View File

@ -6,6 +6,14 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git git://canonware.com/jemalloc.git
* 2.2.5 (November 14, 2011)
Bug fixes:
- Fix huge_ralloc() race when using mremap(2). This is a serious bug that
could cause memory corruption and/or crashes.
- Fix huge_ralloc() to maintain chunk statistics.
- Fix malloc_stats_print(..., "a") output.
* 2.2.4 (November 5, 2011) * 2.2.4 (November 5, 2011)
Bug fixes: Bug fixes:

View File

@ -50,7 +50,7 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */ extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, bool base, bool *zero); void *chunk_alloc(size_t size, bool base, bool *zero);
void chunk_dealloc(void *chunk, size_t size); void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void); bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */

View File

@ -569,7 +569,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena->ndirty -= spare->ndirty; arena->ndirty -= spare->ndirty;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize); chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize; arena->stats.mapped -= chunksize;

View File

@ -70,7 +70,7 @@ RETURN:
#ifdef JEMALLOC_IVSALLOC #ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) { if (base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size); chunk_dealloc(ret, size, true);
return (NULL); return (NULL);
} }
} }
@ -108,7 +108,7 @@ RETURN:
} }
void void
chunk_dealloc(void *chunk, size_t size) chunk_dealloc(void *chunk, size_t size, bool unmap)
{ {
assert(chunk != NULL); assert(chunk != NULL);
@ -125,6 +125,7 @@ chunk_dealloc(void *chunk, size_t size)
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
#endif #endif
if (unmap) {
#ifdef JEMALLOC_SWAP #ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false) if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return; return;
@ -135,6 +136,7 @@ chunk_dealloc(void *chunk, size_t size)
#endif #endif
chunk_dealloc_mmap(chunk, size); chunk_dealloc_mmap(chunk, size);
} }
}
bool bool
chunk_boot(void) chunk_boot(void)

View File

@ -110,12 +110,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (offset == 0) { if (offset == 0) {
/* Trim trailing space. */ /* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size); - chunk_size, true);
} else { } else {
size_t trailsize; size_t trailsize;
/* Trim leading space. */ /* Trim leading space. */
chunk_dealloc(ret, alignment - offset); chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset)); ret = (void *)((uintptr_t)ret + (alignment - offset));
@ -124,7 +124,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
/* Trim trailing space. */ /* Trim trailing space. */
assert(trailsize < alloc_size); assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize); trailsize, true);
} }
} }
@ -234,6 +234,13 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
) { ) {
size_t newsize = huge_salloc(ret); size_t newsize = huge_salloc(ret);
/*
* Remove ptr from the tree of huge allocations before
* performing the remap operation, in order to avoid the
* possibility of another thread acquiring that mapping before
* this one removes it from the tree.
*/
huge_dalloc(ptr, false);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) { ret) == MAP_FAILED) {
/* /*
@ -253,9 +260,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (opt_abort) if (opt_abort)
abort(); abort();
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
idalloc(ptr); chunk_dealloc_mmap(ptr, oldsize);
} else }
huge_dalloc(ptr, false);
} else } else
#endif #endif
{ {
@ -295,9 +301,10 @@ huge_dalloc(void *ptr, bool unmap)
memset(node->addr, 0x5a, node->size); memset(node->addr, 0x5a, node->size);
#endif #endif
#endif #endif
chunk_dealloc(node->addr, node->size);
} }
chunk_dealloc(node->addr, node->size, unmap);
base_node_dealloc(node); base_node_dealloc(node);
} }

View File

@ -748,7 +748,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++; ninitialized++;
} }
if (ninitialized > 1) { if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */ /* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n"); "\nMerged arenas stats:\n");