Merge branch.

This commit is contained in:
Jason Evans 2016-05-03 17:34:40 -07:00
commit e02b83cc5e
23 changed files with 330 additions and 173 deletions

View File

@ -4,6 +4,27 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.1.1 (May 3, 2016)
This bugfix release resolves a variety of mostly minor issues, though the
bitmap fix is critical for 64-bit Windows.
Bug fixes:
- Fix the linear scan version of bitmap_sfu() to shift by the proper amount
even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
Windows. (@jasone)
- Fix hashing functions to avoid unaligned memory accesses (and resulting
crashes). This is relevant at least to some ARM-based platforms.
(@rkmisra)
- Fix fork()-related lock rank ordering reversals. These reversals were
unlikely to cause deadlocks in practice except when heap profiling was
enabled and active. (@jasone)
- Fix various chunk leaks in OOM code paths. (@jasone)
- Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
- Fix MSVC-specific build/test issues. (@rustyx, yuslepukhin)
- Fix a variety of test failures that were due to test fragility rather than
core bugs. (@jasone)
* 4.1.0 (February 28, 2016)
This release is primarily about optimizations, but it also incorporates a lot

View File

@ -138,6 +138,7 @@ TESTS_UNIT := $(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \

View File

@ -1016,7 +1016,7 @@ for (i = 0; i < nbins; i++) {
allocate memory during application initialization and then deadlock
internally when jemalloc in turn calls
<function>atexit<parameter/></function>, so this option is not
univerally usable (though the application can register its own
universally usable (though the application can register its own
<function>atexit<parameter/></function> function with equivalent
functionality). Therefore, this option should only be used with care;
it is primarily intended as a performance tuning aid during application
@ -1320,7 +1320,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option. Note that <function>atexit<parameter/></function> may allocate
memory during application initialization and then deadlock internally
when jemalloc in turn calls <function>atexit<parameter/></function>, so
this option is not univerally usable (though the application can
this option is not universally usable (though the application can
register its own <function>atexit<parameter/></function> function with
equivalent functionality). This option is disabled by
default.</para></listitem>
@ -2062,7 +2062,7 @@ typedef struct {
[<option>--enable-prof</option>]
</term>
<listitem><para>Average number of bytes allocated between
inverval-based profile dumps. See the
interval-based profile dumps. See the
<link
linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
option for additional information.</para></listitem>

View File

@ -584,7 +584,10 @@ void arena_nthreads_inc(arena_t *arena);
void arena_nthreads_dec(arena_t *arena);
arena_t *arena_new(unsigned ind);
bool arena_boot(void);
void arena_prefork(arena_t *arena);
void arena_prefork0(arena_t *arena);
void arena_prefork1(arena_t *arena);
void arena_prefork2(arena_t *arena);
void arena_prefork3(arena_t *arena);
void arena_postfork_parent(arena_t *arena);
void arena_postfork_child(arena_t *arena);

View File

@ -223,7 +223,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
i++;
g = bitmap[i];
}
bit = (bit - 1) + (i << 6);
bit = (bit - 1) + (i << LG_BITMAP_GROUP_NBITS);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);

View File

@ -62,12 +62,8 @@ void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed);
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed);
bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
void *chunk, size_t size, bool zeroed, bool committed);
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);

View File

@ -53,7 +53,7 @@ hash_get_block_32(const uint32_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
memcpy(&ret, &p[i], sizeof(uint32_t));
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
@ -68,7 +68,7 @@ hash_get_block_64(const uint64_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
memcpy(&ret, &p[i], sizeof(uint64_t));
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}

View File

@ -21,7 +21,9 @@ arena_dalloc
arena_dalloc_bin
arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_large_impl
arena_dalloc_junk_small
arena_dalloc_junk_small_impl
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
@ -81,7 +83,10 @@ arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
arena_prefork
arena_prefork0
arena_prefork1
arena_prefork2
arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
@ -123,6 +128,11 @@ atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
atomic_write_p
atomic_write_u
atomic_write_uint32
atomic_write_uint64
atomic_write_z
base_alloc
base_boot
base_postfork_child
@ -148,7 +158,6 @@ chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
@ -168,7 +177,6 @@ chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_purge_arena
chunk_purge_wrapper
chunk_register
chunks_rtree
@ -200,6 +208,8 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
extent_node_committed_get
extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
@ -210,6 +220,8 @@ extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
@ -227,6 +239,8 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
extent_tree_szad_destroy
extent_tree_szad_destroy_recurse
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
@ -304,6 +318,7 @@ jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
lg_prof_sample
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
@ -331,6 +346,8 @@ narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
nhclasses
nlclasses
nstime_add
nstime_compare
nstime_copy
@ -344,6 +361,7 @@ nstime_nsec
nstime_sec
nstime_subtract
nstime_update
nstime_update_impl
opt_abort
opt_decay_time
opt_dss
@ -384,6 +402,7 @@ pow2_ceil_u64
pow2_ceil_zu
prng_lg_range
prng_range
prof_active
prof_active_get
prof_active_get_unlocked
prof_active_set
@ -393,6 +412,7 @@ prof_backtrace
prof_boot0
prof_boot1
prof_boot2
prof_bt_count
prof_dump_header
prof_dump_open
prof_free
@ -410,7 +430,8 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
prof_prefork0
prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
@ -419,6 +440,7 @@ prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
@ -506,6 +528,13 @@ ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
tsd_arenap_get
tsd_arenas_tdata_bypass_get
tsd_arenas_tdata_bypass_set
tsd_arenas_tdata_bypassp_get
tsd_arenas_tdata_get
tsd_arenas_tdata_set
tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
@ -514,6 +543,9 @@ tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_get
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_wrapper_get
tsd_wrapper_set
tsd_initialized
@ -523,17 +555,23 @@ tsd_init_head
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
tsd_tcachep_get
tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
u2rz

View File

@ -316,7 +316,8 @@ bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
void prof_prefork(void);
void prof_prefork0(void);
void prof_prefork1(void);
void prof_postfork_parent(void);
void prof_postfork_child(void);
void prof_sample_threshold_update(prof_tdata_t *tdata);

View File

@ -1,26 +1,6 @@
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#ifndef ENOENT
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#include <errno.h>
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */

View File

@ -54,6 +54,7 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
@ -69,6 +70,7 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h" />
@ -103,11 +105,13 @@
<ClCompile Include="..\..\..\..\src\mutex.c" />
<ClCompile Include="..\..\..\..\src\nstime.c" />
<ClCompile Include="..\..\..\..\src\pages.c" />
<ClCompile Include="..\..\..\..\src\prng.c" />
<ClCompile Include="..\..\..\..\src\prof.c" />
<ClCompile Include="..\..\..\..\src\quarantine.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\stats.c" />
<ClCompile Include="..\..\..\..\src\tcache.c" />
<ClCompile Include="..\..\..\..\src\ticker.c" />
<ClCompile Include="..\..\..\..\src\tsd.c" />
<ClCompile Include="..\..\..\..\src\util.c" />
</ItemGroup>
@ -227,7 +231,7 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
@ -236,7 +240,7 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
<OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
<IntDir>$(Platform)\$(Configuration)\</IntDir>
<TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
<TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>

View File

@ -101,6 +101,9 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
@ -146,6 +149,9 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
@ -214,9 +220,15 @@
<ClCompile Include="..\..\..\..\src\mutex.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\pages.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prng.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\prof.c">
<Filter>Source Files</Filter>
</ClCompile>
@ -232,14 +244,14 @@
<ClCompile Include="..\..\..\..\src\tcache.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\ticker.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\tsd.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\util.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\nstime.c">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>

View File

@ -223,7 +223,7 @@
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
@ -306,7 +306,7 @@
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
<AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>

View File

@ -617,8 +617,8 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
chunk_dalloc_wrapper(arena, chunk_hooks,
(void *)chunk, chunksize, *commit);
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *zero, *commit);
chunk = NULL;
}
}
@ -629,7 +629,7 @@ arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
chunksize, *commit);
chunksize, *zero, *commit);
chunk = NULL;
}
@ -1024,7 +1024,7 @@ arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
@ -1050,8 +1050,8 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
}
arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL);
err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize,
zero, true) == NULL);
malloc_mutex_unlock(&arena->lock);
if (err) {
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
@ -1059,7 +1059,7 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero,
true);
err = true;
}
@ -1707,7 +1707,7 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
chunk_dalloc_wrapper(arena, chunk_hooks, addr, size,
zeroed, committed);
} else {
arena_chunk_t *chunk =
@ -2423,7 +2423,7 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
UNUSED bool idump;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
/* Large allocation. */
usize = index2size(binind);
@ -3646,16 +3646,34 @@ arena_boot(void)
}
void
arena_prefork(arena_t *arena)
arena_prefork0(arena_t *arena)
{
malloc_mutex_prefork(&arena->lock);
}
void
arena_prefork1(arena_t *arena)
{
malloc_mutex_prefork(&arena->chunks_mtx);
}
void
arena_prefork2(arena_t *arena)
{
malloc_mutex_prefork(&arena->node_cache_mtx);
}
void
arena_prefork3(arena_t *arena)
{
unsigned i;
malloc_mutex_prefork(&arena->lock);
malloc_mutex_prefork(&arena->huge_mtx);
malloc_mutex_prefork(&arena->chunks_mtx);
malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
malloc_mutex_prefork(&arena->huge_mtx);
}
void
@ -3663,11 +3681,11 @@ arena_postfork_parent(arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_parent(&arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
malloc_mutex_postfork_parent(&arena->node_cache_mtx);
malloc_mutex_postfork_parent(&arena->chunks_mtx);
malloc_mutex_postfork_parent(&arena->huge_mtx);
malloc_mutex_postfork_parent(&arena->lock);
}
@ -3676,10 +3694,10 @@ arena_postfork_child(arena_t *arena)
{
unsigned i;
malloc_mutex_postfork_child(&arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
malloc_mutex_postfork_child(&arena->node_cache_mtx);
malloc_mutex_postfork_child(&arena->chunks_mtx);
malloc_mutex_postfork_child(&arena->huge_mtx);
malloc_mutex_postfork_child(&arena->lock);
}

View File

@ -425,8 +425,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
arena_t *arena;
arena = chunk_arena_get(arena_ind);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
arena->dss_prec);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@ -579,8 +579,18 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
arena_maybe_purge(arena);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool zeroed, bool committed)
{
@ -604,27 +614,6 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
}
static bool
chunk_dalloc_default(void *chunk, size_t size, bool committed,
unsigned arena_ind)
{
if (!have_dss || !chunk_in_dss(chunk))
return (chunk_dalloc_mmap(chunk, size));
return (true);
}
void
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, bool committed)
{
chunk_hooks_assure_initialized(arena, chunk_hooks);
chunk_hooks->dalloc(chunk, size, committed, arena->ind);
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static bool
chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
@ -643,8 +632,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
bool
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
assert(chunk != NULL);
@ -657,15 +647,6 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
static bool
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
unsigned arena_ind)
{
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
length));
}
bool
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
size_t size, size_t offset, size_t length)

View File

@ -136,7 +136,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size,
true);
false, true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(

View File

@ -2644,7 +2644,8 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
unsigned i, narenas;
unsigned i, j, narenas;
arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@ -2652,18 +2653,31 @@ _malloc_prefork(void)
#endif
assert(malloc_initialized());
narenas = narenas_total_get();
/* Acquire all mutexes in a safe order. */
ctl_prefork();
prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(i, false)) != NULL)
arena_prefork(arena);
prof_prefork0();
for (i = 0; i < 3; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(j, false)) != NULL) {
switch (i) {
case 0: arena_prefork0(arena); break;
case 1: arena_prefork1(arena); break;
case 2: arena_prefork2(arena); break;
default: not_reached();
}
}
}
}
chunk_prefork();
base_prefork();
chunk_prefork();
for (i = 0; i < narenas; i++) {
if ((arena = arena_get(i, false)) != NULL)
arena_prefork3(arena);
}
prof_prefork1();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@ -2683,16 +2697,16 @@ _malloc_postfork(void)
assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
base_postfork_parent();
chunk_postfork_parent();
base_postfork_parent();
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(i, false)) != NULL)
arena_postfork_parent(arena);
}
malloc_mutex_postfork_parent(&arenas_lock);
prof_postfork_parent();
malloc_mutex_postfork_parent(&arenas_lock);
ctl_postfork_parent();
}
@ -2704,16 +2718,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
base_postfork_child();
chunk_postfork_child();
base_postfork_child();
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
if ((arena = arena_get(i, false)) != NULL)
arena_postfork_child(arena);
}
malloc_mutex_postfork_child(&arenas_lock);
prof_postfork_child();
malloc_mutex_postfork_child(&arenas_lock);
ctl_postfork_child();
}

View File

@ -2198,20 +2198,32 @@ prof_boot2(void)
}
void
prof_prefork(void)
prof_prefork0(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_prefork(&tdatas_mtx);
malloc_mutex_prefork(&prof_dump_mtx);
malloc_mutex_prefork(&bt2gctx_mtx);
malloc_mutex_prefork(&next_thr_uid_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(&gctx_locks[i]);
malloc_mutex_prefork(&tdatas_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_prefork(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_prefork(&gctx_locks[i]);
}
}
void
prof_prefork1(void)
{
if (opt_prof) {
malloc_mutex_prefork(&prof_active_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
malloc_mutex_prefork(&prof_gdump_mtx);
malloc_mutex_prefork(&next_thr_uid_mtx);
malloc_mutex_prefork(&prof_thread_active_init_mtx);
}
}
@ -2222,14 +2234,18 @@ prof_postfork_parent(void)
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(&tdata_locks[i]);
malloc_mutex_postfork_parent(&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(&next_thr_uid_mtx);
malloc_mutex_postfork_parent(&prof_gdump_mtx);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(&gctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&next_thr_uid_mtx);
malloc_mutex_postfork_parent(&bt2gctx_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_parent(&tdata_locks[i]);
malloc_mutex_postfork_parent(&tdatas_mtx);
malloc_mutex_postfork_parent(&bt2gctx_mtx);
malloc_mutex_postfork_parent(&prof_dump_mtx);
}
}
@ -2240,14 +2256,18 @@ prof_postfork_child(void)
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(&tdata_locks[i]);
malloc_mutex_postfork_child(&prof_thread_active_init_mtx);
malloc_mutex_postfork_child(&next_thr_uid_mtx);
malloc_mutex_postfork_child(&prof_gdump_mtx);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(&gctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&next_thr_uid_mtx);
malloc_mutex_postfork_child(&bt2gctx_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
malloc_mutex_postfork_child(&tdata_locks[i]);
malloc_mutex_postfork_child(&tdatas_mtx);
malloc_mutex_postfork_child(&bt2gctx_mtx);
malloc_mutex_postfork_child(&prof_dump_mtx);
}
}

View File

@ -468,7 +468,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
#define OPT_WRITE_UNSIGNED(n) \
if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
" opt."#n": %u\n", uv); \
}
#define OPT_WRITE_SIZE_T(n) \
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \

View File

@ -121,6 +121,10 @@ TEST_BEGIN(test_chunk)
{
void *p;
size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
unsigned arena_ind;
int flags;
size_t hooks_mib[3], purge_mib[3];
size_t hooks_miblen, purge_miblen;
chunk_hooks_t new_hooks = {
chunk_alloc,
chunk_dalloc,
@ -132,10 +136,19 @@ TEST_BEGIN(test_chunk)
};
bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
/* Install custom chunk hooks. */
hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib,
&hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
hooks_mib[1] = (size_t)arena_ind;
old_size = sizeof(chunk_hooks_t);
new_size = sizeof(chunk_hooks_t);
assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
&new_hooks, new_size), 0, "Unexpected chunk_hooks error");
orig_hooks = old_hooks;
assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
@ -165,45 +178,49 @@ TEST_BEGIN(test_chunk)
"Unexpected arenas.hchunk.2.size failure");
/* Test dalloc/decommit/purge cascade. */
purge_miblen = sizeof(purge_mib)/sizeof(size_t);
assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
0, "Unexpected mallctlnametomib() failure");
purge_mib[1] = (size_t)arena_ind;
do_dalloc = false;
do_decommit = false;
p = mallocx(huge0 * 2, 0);
p = mallocx(huge0 * 2, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_dalloc = false;
did_decommit = false;
did_purge = false;
did_split = false;
xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0);
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.purge error");
xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
if (xallocx_success_a) {
assert_true(did_dalloc, "Expected dalloc");
assert_false(did_decommit, "Unexpected decommit");
assert_true(did_purge, "Expected purge");
}
assert_true(did_split, "Expected split");
dallocx(p, 0);
dallocx(p, flags);
do_dalloc = true;
/* Test decommit/commit and observe split/merge. */
do_dalloc = false;
do_decommit = true;
p = mallocx(huge0 * 2, 0);
p = mallocx(huge0 * 2, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_decommit = false;
did_commit = false;
did_split = false;
did_merge = false;
xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0);
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.purge error");
xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0);
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
if (xallocx_success_b)
assert_true(did_split, "Expected split");
xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2);
xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2);
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
if (xallocx_success_b && xallocx_success_c)
assert_true(did_merge, "Expected merge");
dallocx(p, 0);
dallocx(p, flags);
do_dalloc = true;
do_decommit = false;
@ -214,42 +231,42 @@ TEST_BEGIN(test_chunk)
* successful xallocx() from size=huge2 to size=huge1 is
* guaranteed to leave trailing purgeable memory.
*/
p = mallocx(huge2, 0);
p = mallocx(huge2, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_purge = false;
assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
assert_zu_eq(xallocx(p, huge1, 0, flags), huge1,
"Unexpected xallocx() failure");
assert_true(did_purge, "Expected purge");
dallocx(p, 0);
dallocx(p, flags);
}
/* Test decommit for large allocations. */
do_decommit = true;
p = mallocx(large1, 0);
p = mallocx(large1, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.purge error");
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
did_decommit = false;
assert_zu_eq(xallocx(p, large0, 0, 0), large0,
assert_zu_eq(xallocx(p, large0, 0, flags), large0,
"Unexpected xallocx() failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected arena.0.purge error");
assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
0, "Unexpected arena.%u.purge error", arena_ind);
did_commit = false;
assert_zu_eq(xallocx(p, large1, 0, 0), large1,
assert_zu_eq(xallocx(p, large1, 0, flags), large1,
"Unexpected xallocx() failure");
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
dallocx(p, 0);
dallocx(p, flags);
do_decommit = false;
/* Make sure non-huge allocation succeeds. */
p = mallocx(42, 0);
p = mallocx(42, flags);
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
dallocx(p, flags);
/* Restore chunk hooks. */
assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
new_size), 0, "Unexpected chunk_hooks error");
assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
&old_hooks, new_size), 0, "Unexpected chunk_hooks error");
assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, &old_hooks, &old_size,
NULL, 0), 0, "Unexpected chunk_hooks error");
assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
"Unexpected alloc error");

View File

@ -70,22 +70,27 @@ TEST_END
TEST_BEGIN(test_oom)
{
size_t hugemax, size, alignment;
hugemax = get_huge_size(get_nhuge()-1);
bool oom;
void *ptrs[3];
unsigned i;
/*
* It should be impossible to allocate two objects that each consume
* more than half the virtual address space.
* It should be impossible to allocate three objects that each consume
* nearly half the virtual address space.
*/
{
void *p;
p = mallocx(hugemax, 0);
if (p != NULL) {
assert_ptr_null(mallocx(hugemax, 0),
"Expected OOM for mallocx(size=%#zx, 0)", hugemax);
dallocx(p, 0);
}
hugemax = get_huge_size(get_nhuge()-1);
oom = false;
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
ptrs[i] = mallocx(hugemax, 0);
if (ptrs[i] == NULL)
oom = true;
}
assert_true(oom,
"Expected OOM during series of calls to mallocx(size=%zu, 0)",
hugemax);
for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
if (ptrs[i] != NULL)
dallocx(ptrs[i], 0);
}
#if LG_SIZEOF_PTR == 3

45
test/unit/fork.c Normal file
View File

@ -0,0 +1,45 @@
#include "test/jemalloc_test.h"
#ifndef _WIN32
#include <sys/wait.h>
#endif
TEST_BEGIN(test_fork)
{
#ifndef _WIN32
void *p;
pid_t pid;
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
pid = fork();
if (pid == -1) {
/* Error. */
test_fail("Unexpected fork() failure");
} else if (pid == 0) {
/* Child. */
exit(0);
} else {
int status;
/* Parent. */
free(p);
do {
if (waitpid(pid, &status, 0) == -1)
test_fail("Unexpected waitpid() failure");
} while (!WIFEXITED(status) && !WIFSIGNALED(status));
}
#else
test_skip("fork(2) is irrelevant to Windows");
#endif
}
TEST_END
int
main(void)
{
return (test(
test_fork));
}

View File

@ -64,14 +64,15 @@ static void
hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
{
const int hashbytes = hash_variant_bits(variant) / 8;
VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
const int hashes_size = hashbytes * 256;
VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
memset(key, 0, KEY_SIZE);
memset(hashes, 0, sizeof(hashes));
memset(final, 0, sizeof(final));
memset(hashes, 0, hashes_size);
memset(final, 0, hashbytes);
/*
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
@ -102,17 +103,17 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
/* Hash the result array. */
switch (variant) {
case hash_variant_x86_32: {
uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
uint32_t out = hash_x86_32(hashes, hashes_size, 0);
memcpy(final, &out, sizeof(out));
break;
} case hash_variant_x86_128: {
uint64_t out[2];
hash_x86_128(hashes, hashbytes*256, 0, out);
hash_x86_128(hashes, hashes_size, 0, out);
memcpy(final, out, sizeof(out));
break;
} case hash_variant_x64_128: {
uint64_t out[2];
hash_x64_128(hashes, hashbytes*256, 0, out);
hash_x64_128(hashes, hashes_size, 0, out);
memcpy(final, out, sizeof(out));
break;
} default: not_reached();