Merge branch 'dev'

This commit is contained in:
Jason Evans 2013-10-20 19:44:27 -07:00
commit 0135fb806e
16 changed files with 208 additions and 115 deletions

View File

@ -6,6 +6,21 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
* 3.4.1 (October 20, 2013)
Bug fixes:
- Fix a race in the "arenas.extend" mallctl that could cause memory corruption
of internal data structures and subsequent crashes.
- Fix Valgrind integration flaws that caused Valgrind warnings about reads of
uninitialized memory in:
+ arena chunk headers
+ internal zero-initialized data structures (relevant to tcache and prof
code)
- Preserve errno during the first allocation. A readlink(2) call during
initialization fails unless /etc/malloc.conf exists, so errno was typically
set during the first allocation prior to this fix.
- Fix compilation warnings reported by gcc 4.8.1.
* 3.4.0 (June 2, 2013)
This version is essentially a small bugfix release, but the addition of
@ -60,7 +75,7 @@ found in the git revision history:
Bug fixes:
- Fix "arenas.extend" mallctl to output the number of arenas.
- Fix chunk_recycyle() to unconditionally inform Valgrind that returned memory
- Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
is undefined.
- Fix build break on FreeBSD related to alloca.h.

View File

@ -55,6 +55,8 @@ PIC_CFLAGS = @PIC_CFLAGS@
CTARGET = @CTARGET@
LDTARGET = @LDTARGET@
MKLIB = @MKLIB@
AR = @AR@
ARFLAGS = @ARFLAGS@
CC_MM = @CC_MM@
ifeq (macho, $(ABI))
@ -185,7 +187,7 @@ $(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(COBJS)
$(STATIC_LIBS):
@mkdir -p $(@D)
$(MKLIB) $+
$(AR) $(ARFLAGS)@AROUT@ $+
$(objroot)test/bitmap$(EXE): $(objroot)src/bitmap.$(O)

18
README
View File

@ -1,10 +1,14 @@
jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
This distribution is a "portable" implementation that currently targets
FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default
allocator in the FreeBSD and NetBSD operating systems, and it is used by the
Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending
on your needs, one of the other divergent versions may suit your needs better
than this distribution.
jemalloc is a general purpose malloc(3) implementation that emphasizes
fragmentation avoidance and scalable concurrency support. jemalloc first came
into use as the FreeBSD libc allocator in 2005, and since then it has found its
way into numerous applications that rely on its predictable behavior. In 2010
jemalloc development efforts broadened to include developer support features
such as heap profiling, Valgrind integration, and extensive monitoring/tuning
hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
and therefore versatility remains critical. Ongoing development efforts trend
toward making jemalloc among the best allocators for a broad range of demanding
applications, and eliminating/mitigating weaknesses that have practical
repercussions for real world applications.
The COPYING file contains copyright and licensing information.

View File

@ -226,9 +226,15 @@ PIC_CFLAGS='-fPIC -DPIC'
CTARGET='-o $@'
LDTARGET='-o $@'
EXTRA_LDFLAGS=
MKLIB='ar crus $@'
ARFLAGS='crus'
AROUT=' $@'
CC_MM=1
AN_MAKEVAR([AR], [AC_PROG_AR])
AN_PROGRAM([ar], [AC_PROG_AR])
AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
AC_PROG_AR
dnl Platform-specific settings. abi and RPATH can probably be determined
dnl programmatically, but doing so is error-prone, which makes it generally
dnl not worth the trouble.
@ -310,7 +316,9 @@ case "${host}" in
EXTRA_LDFLAGS="-link -DEBUG"
CTARGET='-Fo$@'
LDTARGET='-Fe$@'
MKLIB='lib -nologo -out:$@'
AR='lib'
ARFLAGS='-nologo -out:'
AROUT='$@'
CC_MM=
else
importlib="${so}"
@ -343,6 +351,8 @@ AC_SUBST([PIC_CFLAGS])
AC_SUBST([CTARGET])
AC_SUBST([LDTARGET])
AC_SUBST([MKLIB])
AC_SUBST([ARFLAGS])
AC_SUBST([AROUT])
AC_SUBST([CC_MM])
if test "x$abi" != "xpecoff"; then
@ -403,7 +413,6 @@ AC_SUBST([enable_autogen])
AC_PROG_INSTALL
AC_PROG_RANLIB
AC_PATH_PROG([AR], [ar], [false], [$PATH])
AC_PATH_PROG([LD], [ld], [false], [$PATH])
AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])

View File

@ -441,6 +441,7 @@ void arena_postfork_child(arena_t *arena);
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
@ -451,6 +452,7 @@ size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@ -497,11 +499,18 @@ arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
return (&arena_mapp_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
return (*arena_mapbitsp_get(chunk, pageind));
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -584,83 +593,90 @@ arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
{
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
*mapbitsp = size | (*mapbitsp & PAGE_MASK);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
*mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
| unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
CHUNK_MAP_BININD_SHIFT);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
(binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
*mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
flags | unzeroed | CHUNK_MAP_ALLOCATED;
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
size_t *mapbitsp;
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
mapbitsp = arena_mapbitsp_get(chunk, pageind);
*mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
unzeroed);
}
JEMALLOC_INLINE bool

View File

@ -232,9 +232,18 @@ static const bool config_ivsalloc =
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
/*
* JEMALLOC_ALWAYS_INLINE is used within header files for functions that are
* static inline functions if inlining is enabled, and single-definition
* library-private functions if inlining is disabled.
*
* JEMALLOC_ALWAYS_INLINE_C is for use in .c files, in which case the denoted
* functions are always static, regardless of whether inlining is enabled.
*/
#ifdef JEMALLOC_DEBUG
/* Disable inlining to make debugging easier. */
# define JEMALLOC_ALWAYS_INLINE
# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
# define inline
#else
@ -242,8 +251,11 @@ static const bool config_ivsalloc =
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
# define JEMALLOC_ALWAYS_INLINE_C \
static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
# ifdef _MSC_VER

View File

@ -33,6 +33,8 @@
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)

View File

@ -313,6 +313,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@ -321,7 +322,6 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
@ -368,11 +368,11 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;

View File

@ -368,14 +368,21 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
(npages << LG_PAGE));
}
static inline void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
}
static inline void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), PAGE);
arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
@ -458,6 +465,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
} else if (config_debug) {
arena_run_page_validate_zeroed(
chunk, run_ind+i);
} else {
arena_run_page_mark_zeroed(
chunk, run_ind+i);
}
}
} else {
@ -467,6 +477,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
*/
arena_run_zero(chunk, run_ind, need_pages);
}
} else {
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
/*
@ -508,9 +521,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_run_page_validate_zeroed(chunk,
run_ind+need_pages-1);
}
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
@ -569,17 +582,24 @@ arena_chunk_alloc(arena_t *arena)
* unless the chunk is not zeroed.
*/
if (zero == false) {
VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else if (config_debug) {
} else {
VALGRIND_MAKE_MEM_DEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(void *)((uintptr_t)
arena_mapp_get(chunk, chunk_npages-1)
- (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
(size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
assert(arena_mapbits_unzeroed_get(chunk,
i) == unzeroed);
}
}
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1,
@ -1458,6 +1478,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@ -1466,7 +1487,6 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}

View File

@ -294,7 +294,7 @@ label_return:
if (xnode != NULL)
base_node_dealloc(xnode);
if (xprev != NULL)
base_node_dealloc(prev);
base_node_dealloc(xprev);
}
void

View File

@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
static bool
ctl_grow(void)
{
size_t astats_size;
ctl_arena_stats_t *astats;
arena_t **tarenas;
/* Extend arena stats and arenas arrays. */
astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
if (ctl_stats.narenas == narenas_auto) {
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats = (ctl_arena_stats_t *)imalloc(astats_size);
/* Allocate extended arena stats and arenas arrays. */
astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
sizeof(arena_t *));
if (tarenas == NULL) {
idalloc(astats);
return (true);
}
memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
} else {
astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
astats_size, 0, 0, false, false);
if (astats == NULL)
return (true);
tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
sizeof(arena_t *), 0, 0, false, false);
if (tarenas == NULL)
/* Initialize the new astats element. */
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
idalloc(tarenas);
idalloc(astats);
return (true);
}
/* Initialize the new astats and arenas elements. */
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
return (true);
tarenas[ctl_stats.narenas] = NULL;
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
@ -593,13 +580,34 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
ctl_stats.arenas = astats;
ctl_stats.narenas++;
/* Initialize the new arenas element. */
tarenas[ctl_stats.narenas] = NULL;
{
arena_t **arenas_old = arenas;
/*
* Swap extended arenas array into place. Although ctl_mtx
* protects this function from other threads extending the
* array, it does not protect from other threads mutating it
* (i.e. initializing arenas and setting array elements to
* point to them). Therefore, array copying must happen under
* the protection of arenas_lock.
*/
malloc_mutex_lock(&arenas_lock);
arenas = tarenas;
memcpy(arenas, arenas_old, ctl_stats.narenas *
sizeof(arena_t *));
narenas_total++;
arenas_extend(narenas_total - 1);
malloc_mutex_unlock(&arenas_lock);
/*
* Deallocate arenas_old only if it came from imalloc() (not
* base_alloc()).
*/
if (ctl_stats.narenas != narenas_auto)
idalloc(arenas_old);
}
ctl_stats.arenas = astats;
ctl_stats.narenas++;
return (false);
}
@ -1109,7 +1117,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
uint64_t newval;
UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);

View File

@ -282,7 +282,7 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
static JEMALLOC_ATTR(always_inline) void
JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)
{
@ -299,7 +299,7 @@ malloc_thread_init(void)
quarantine_alloc_hook();
}
static JEMALLOC_ATTR(always_inline) bool
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)
{
@ -436,8 +436,9 @@ malloc_conf_init(void)
}
break;
case 1: {
int linklen = 0;
#ifndef _WIN32
int linklen;
int saved_errno = errno;
const char *linkname =
# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
@ -446,21 +447,20 @@ malloc_conf_init(void)
# endif
;
if ((linklen = readlink(linkname, buf,
sizeof(buf) - 1)) != -1) {
/*
* Use the contents of the "/etc/malloc.conf"
* Try to use the contents of the "/etc/malloc.conf"
* symbolic link's name.
*/
linklen = readlink(linkname, buf, sizeof(buf) - 1);
if (linklen == -1) {
/* No configuration specified. */
linklen = 0;
/* restore errno */
set_errno(saved_errno);
}
#endif
buf[linklen] = '\0';
opts = buf;
} else
#endif
{
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
} case 2: {
const char *envname =
@ -1402,7 +1402,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
*/
#ifdef JEMALLOC_EXPERIMENTAL
static JEMALLOC_ATTR(always_inline) void *
JEMALLOC_ALWAYS_INLINE_C void *
iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{

View File

@ -23,7 +23,8 @@ je_thread_start(void *arg)
size_t mib[3];
size_t miblen = sizeof(mib) / sizeof(size_t);
const char *dss_precs[] = {"disabled", "primary", "secondary"};
const char *dss = dss_precs[thread_ind % 4];
const char *dss = dss_precs[thread_ind %
(sizeof(dss_precs)/sizeof(char*))];
if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) {
malloc_printf("Error in mallctlnametomib()\n");
abort();

View File

@ -104,12 +104,12 @@ main(void)
je_thread_start(NULL);
je_thread_create(&thread, je_thread_start, NULL);
je_thread_join(thread, (void *)&ret);
je_thread_join(thread, NULL);
je_thread_start(NULL);
je_thread_create(&thread, je_thread_start, NULL);
je_thread_join(thread, (void *)&ret);
je_thread_join(thread, NULL);
je_thread_start(NULL);

View File

@ -72,8 +72,12 @@ main(void)
(void *)&arena_ind);
}
for (i = 0; i < NTHREADS; i++)
je_thread_join(threads[i], (void *)&ret);
for (i = 0; i < NTHREADS; i++) {
intptr_t join_ret;
je_thread_join(threads[i], (void *)&join_ret);
if (join_ret != 0)
ret = 1;
}
label_return:
malloc_printf("Test end\n");

View File

@ -77,12 +77,12 @@ main(void)
je_thread_start(NULL);
je_thread_create(&thread, je_thread_start, NULL);
je_thread_join(thread, (void *)&ret);
je_thread_join(thread, NULL);
je_thread_start(NULL);
je_thread_create(&thread, je_thread_start, NULL);
je_thread_join(thread, (void *)&ret);
je_thread_join(thread, NULL);
je_thread_start(NULL);