Merge branch 'dev'
This commit is contained in:
commit
5ef7abf6d8
@ -6,6 +6,13 @@ found in the git revision history:
|
|||||||
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
|
||||||
git://canonware.com/jemalloc.git
|
git://canonware.com/jemalloc.git
|
||||||
|
|
||||||
|
* 2.2.1 (March 30, 2011)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Implement atomic operations for x86/x64. This fixes compilation failures
|
||||||
|
for versions of gcc that are still in wide use.
|
||||||
|
- Fix an assertion in arena_purge().
|
||||||
|
|
||||||
* 2.2.0 (March 22, 2011)
|
* 2.2.0 (March 22, 2011)
|
||||||
|
|
||||||
This version incorporates several improvements to algorithms and data
|
This version incorporates several improvements to algorithms and data
|
||||||
|
@ -40,6 +40,7 @@ uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
||||||
|
/******************************************************************************/
|
||||||
/* 64-bit operations. */
|
/* 64-bit operations. */
|
||||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
||||||
JEMALLOC_INLINE uint64_t
|
JEMALLOC_INLINE uint64_t
|
||||||
@ -69,12 +70,40 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|||||||
|
|
||||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||||
}
|
}
|
||||||
|
#elif (defined(__amd64_) || defined(__x86_64__))
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
asm volatile (
|
||||||
|
"lock; xaddq %0, %1;"
|
||||||
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||||
|
: "m" (*p) /* Inputs. */
|
||||||
|
);
|
||||||
|
|
||||||
|
return (x);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint64_t
|
||||||
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
x = (uint64_t)(-(int64_t)x);
|
||||||
|
asm volatile (
|
||||||
|
"lock; xaddq %0, %1;"
|
||||||
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||||
|
: "m" (*p) /* Inputs. */
|
||||||
|
);
|
||||||
|
|
||||||
|
return (x);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
# if (LG_SIZEOF_PTR == 3)
|
# if (LG_SIZEOF_PTR == 3)
|
||||||
# error "Missing implementation for 64-bit atomic operations"
|
# error "Missing implementation for 64-bit atomic operations"
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/******************************************************************************/
|
||||||
/* 32-bit operations. */
|
/* 32-bit operations. */
|
||||||
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
||||||
JEMALLOC_INLINE uint32_t
|
JEMALLOC_INLINE uint32_t
|
||||||
@ -104,6 +133,33 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|||||||
|
|
||||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||||
}
|
}
|
||||||
|
#elif (defined(__i386__) || defined(__amd64_) || defined(__x86_64__))
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
asm volatile (
|
||||||
|
"lock; xaddl %0, %1;"
|
||||||
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||||
|
: "m" (*p) /* Inputs. */
|
||||||
|
);
|
||||||
|
|
||||||
|
return (x);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE uint32_t
|
||||||
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||||
|
{
|
||||||
|
|
||||||
|
x = (uint32_t)(-(int32_t)x);
|
||||||
|
asm volatile (
|
||||||
|
"lock; xaddl %0, %1;"
|
||||||
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
||||||
|
: "m" (*p) /* Inputs. */
|
||||||
|
);
|
||||||
|
|
||||||
|
return (x);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
# error "Missing implementation for 32-bit atomic operations"
|
# error "Missing implementation for 32-bit atomic operations"
|
||||||
#endif
|
#endif
|
||||||
|
@ -868,9 +868,10 @@ arena_purge(arena_t *arena, bool all)
|
|||||||
}
|
}
|
||||||
assert(ndirty == arena->ndirty);
|
assert(ndirty == arena->ndirty);
|
||||||
#endif
|
#endif
|
||||||
assert(arena->ndirty > arena->npurgatory);
|
assert(arena->ndirty > arena->npurgatory || all);
|
||||||
assert(arena->ndirty > chunk_npages || all);
|
assert(arena->ndirty > chunk_npages || all);
|
||||||
assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
|
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
|
||||||
|
npurgatory) || all);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
arena->stats.npurge++;
|
arena->stats.npurge++;
|
||||||
@ -882,8 +883,10 @@ arena_purge(arena_t *arena, bool all)
|
|||||||
* multiple threads from racing to reduce ndirty below the threshold.
|
* multiple threads from racing to reduce ndirty below the threshold.
|
||||||
*/
|
*/
|
||||||
npurgatory = arena->ndirty - arena->npurgatory;
|
npurgatory = arena->ndirty - arena->npurgatory;
|
||||||
if (all == false)
|
if (all == false) {
|
||||||
|
assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
|
||||||
npurgatory -= arena->nactive >> opt_lg_dirty_mult;
|
npurgatory -= arena->nactive >> opt_lg_dirty_mult;
|
||||||
|
}
|
||||||
arena->npurgatory += npurgatory;
|
arena->npurgatory += npurgatory;
|
||||||
|
|
||||||
while (npurgatory > 0) {
|
while (npurgatory > 0) {
|
||||||
|
Loading…
Reference in New Issue
Block a user