Merge branch 'dev'

This commit is contained in:
Jason Evans 2013-06-02 21:05:59 -07:00
commit 0ed518e5da
4 changed files with 41 additions and 8 deletions

View File

@ -6,6 +6,19 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git git://canonware.com/jemalloc.git
* 3.4.0 (June 2, 2013)
This version is essentially a small bugfix release, but the addition of
aarch64 support requires that the minor version be incremented.
Bug fixes:
- Fix race-triggered deadlocks in chunk_record(). These deadlocks were
typically triggered by multiple threads concurrently deallocating huge
objects.
New features:
- Add support for the aarch64 architecture.
* 3.3.1 (March 6, 2013) * 3.3.1 (March 6, 2013)
This version fixes bugs that are typically encountered only when utilizing This version fixes bugs that are typically encountered only when utilizing
@ -15,7 +28,7 @@ found in the git revision history:
- Fix a locking order bug that could cause deadlock during fork if heap - Fix a locking order bug that could cause deadlock during fork if heap
profiling were enabled. profiling were enabled.
- Fix a chunk recycling bug that could cause the allocator to lose track of - Fix a chunk recycling bug that could cause the allocator to lose track of
whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
corruption if allocating via sbrk(2) (unlikely unless running with the corruption if allocating via sbrk(2) (unlikely unless running with the
"dss:primary" option specified). This was completely harmless on Linux "dss:primary" option specified). This was completely harmless on Linux
unless using mlockall(2) (and unlikely even then, unless the unless using mlockall(2) (and unlikely even then, unless the

View File

@ -432,7 +432,14 @@ for (i = 0; i < nbins; i++) {
referenced by the symbolic link named <filename referenced by the symbolic link named <filename
class="symlink">/etc/malloc.conf</filename>, and the value of the class="symlink">/etc/malloc.conf</filename>, and the value of the
environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
that order, from left to right as options.</para> that order, from left to right as options. Note that
<varname>malloc_conf</varname> may be read before
<function>main<parameter/></function> is entered, so the declaration of
<varname>malloc_conf</varname> should specify an initializer that contains
the final value to be read by jemalloc. <varname>malloc_conf</varname> is
a compile-time setting, whereas <filename
class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar>
can be safely set any time prior to program invocation.</para>
<para>An options string is a comma-separated list of option:value pairs. <para>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <link There is one key corresponding to each <link

View File

@ -278,6 +278,9 @@ static const bool config_ivsalloc =
# ifdef __arm__ # ifdef __arm__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__ # ifdef __hppa__
# define LG_QUANTUM 4 # define LG_QUANTUM 4
# endif # endif

View File

@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size) size_t size)
{ {
bool unzeroed; bool unzeroed;
extent_node_t *xnode, *node, *prev, key; extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size); unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size); VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held. * held.
*/ */
xnode = base_node_alloc(); xnode = base_node_alloc();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size); key.addr = (void *)((uintptr_t)chunk + size);
@ -242,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size; node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false)); node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else { } else {
/* Coalescing forward failed, so insert a new node. */ /* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) { if (xnode == NULL) {
@ -253,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual * already been purged, so this is only a virtual
* memory leak. * memory leak.
*/ */
malloc_mutex_unlock(&chunks_mtx); goto label_return;
return;
} }
node = xnode; node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk; node->addr = chunk;
node->size = size; node->size = size;
node->zeroed = (unzeroed == false); node->zeroed = (unzeroed == false);
@ -282,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed); node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev); xprev = prev;
} }
label_return:
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dealloc(xnode);
if (xprev != NULL)
base_node_dealloc(prev);
} }
void void