Make dss non-optional, and fix an "arena.<i>.dss" mallctl bug.
Make dss non-optional on all platforms which support sbrk(2). Fix the "arena.<i>.dss" mallctl to return an error if "primary" or "secondary" precedence is specified, but sbrk(2) is not supported.
This commit is contained in:
parent
644d414bc9
commit
4d434adb14
4
INSTALL
4
INSTALL
@ -145,10 +145,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
||||
memory allocation algorithm that causes semi-permanent VM map holes under
|
||||
normal jemalloc operation.
|
||||
|
||||
--enable-dss
|
||||
Enable support for page allocation/deallocation via sbrk(2), in addition to
|
||||
mmap(2).
|
||||
|
||||
--disable-fill
|
||||
Disable support for junk/zero filling of memory, quarantine, and redzones.
|
||||
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
|
||||
|
23
configure.ac
23
configure.ac
@ -836,34 +836,22 @@ if test "x$enable_munmap" = "x1" ; then
|
||||
fi
|
||||
AC_SUBST([enable_munmap])
|
||||
|
||||
dnl Do not enable allocation from DSS by default.
|
||||
AC_ARG_ENABLE([dss],
|
||||
[AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])],
|
||||
[if test "x$enable_dss" = "xno" ; then
|
||||
enable_dss="0"
|
||||
else
|
||||
enable_dss="1"
|
||||
fi
|
||||
],
|
||||
[enable_dss="0"]
|
||||
)
|
||||
dnl Enable allocation from DSS if supported by the OS.
|
||||
have_dss="1"
|
||||
dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
|
||||
AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
|
||||
if test "x$have_sbrk" = "x1" ; then
|
||||
if test "x$sbrk_deprecated" == "x1" ; then
|
||||
AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
|
||||
enable_dss="0"
|
||||
else
|
||||
AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ])
|
||||
have_dss="0"
|
||||
fi
|
||||
else
|
||||
enable_dss="0"
|
||||
have_dss="0"
|
||||
fi
|
||||
|
||||
if test "x$enable_dss" = "x1" ; then
|
||||
if test "x$have_dss" = "x1" ; then
|
||||
AC_DEFINE([JEMALLOC_DSS], [ ])
|
||||
fi
|
||||
AC_SUBST([enable_dss])
|
||||
|
||||
dnl Support the junk/zero filling option by default.
|
||||
AC_ARG_ENABLE([fill],
|
||||
@ -1461,7 +1449,6 @@ AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||
AC_MSG_RESULT([mremap : ${enable_mremap}])
|
||||
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
||||
AC_MSG_RESULT([dss : ${enable_dss}])
|
||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||
AC_MSG_RESULT([tls : ${enable_tls}])
|
||||
AC_MSG_RESULT([===============================================================================])
|
||||
|
@ -448,8 +448,10 @@ for (i = 0; i < nbins; i++) {
|
||||
<manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
|
||||
suboptimal for several reasons, including race conditions, increased
|
||||
fragmentation, and artificial limitations on maximum usable memory. If
|
||||
<option>--enable-dss</option> is specified during configuration, this
|
||||
allocator uses both <citerefentry><refentrytitle>mmap</refentrytitle>
|
||||
<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> is supported by the operating
|
||||
system, this allocator uses both
|
||||
<citerefentry><refentrytitle>mmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> and
|
||||
<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry>, in that order of preference;
|
||||
@ -625,16 +627,6 @@ for (i = 0; i < nbins; i++) {
|
||||
build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.dss">
|
||||
<term>
|
||||
<mallctl>config.dss</mallctl>
|
||||
(<type>bool</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para><option>--enable-dss</option> was specified during
|
||||
build configuration.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="config.fill">
|
||||
<term>
|
||||
<mallctl>config.fill</mallctl>
|
||||
@ -790,10 +782,15 @@ for (i = 0; i < nbins; i++) {
|
||||
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
|
||||
related to <citerefentry><refentrytitle>mmap</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> allocation. The following
|
||||
settings are supported: “disabled”, “primary”,
|
||||
and “secondary”. The default is “secondary” if
|
||||
<link linkend="config.dss"><mallctl>config.dss</mallctl></link> is
|
||||
true, “disabled” otherwise.
|
||||
settings are supported if
|
||||
<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> is supported by the operating
|
||||
system: “disabled”, “primary”, and
|
||||
“secondary”; otherwise only “disabled” is
|
||||
supported. The default is “secondary” if
|
||||
<citerefentry><refentrytitle>sbrk</refentrytitle>
|
||||
<manvolnum>2</manvolnum></citerefentry> is supported by the operating
|
||||
system; “disabled” otherwise.
|
||||
</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
|
@ -434,7 +434,7 @@ void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc);
|
||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||
void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
|
||||
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
malloc_large_stats_t *lstats);
|
||||
|
@ -85,7 +85,7 @@ static const bool config_debug =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_dss =
|
||||
static const bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
|
@ -76,9 +76,6 @@
|
||||
*/
|
||||
#undef JEMALLOC_MUTEX_INIT_CB
|
||||
|
||||
/* Defined if sbrk() is supported. */
|
||||
#undef JEMALLOC_HAVE_SBRK
|
||||
|
||||
/* Non-empty if the tls_model attribute is supported. */
|
||||
#undef JEMALLOC_TLS_MODEL
|
||||
|
||||
|
@ -2243,13 +2243,16 @@ arena_dss_prec_get(arena_t *arena)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
bool
|
||||
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
if (have_dss == false)
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->dss_prec = dss_prec;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -153,7 +153,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_primary) {
|
||||
if (have_dss && dss_prec == dss_prec_primary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
@ -167,7 +167,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
||||
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||
goto label_return;
|
||||
/* "secondary" dss. */
|
||||
if (config_dss && dss_prec == dss_prec_secondary) {
|
||||
if (have_dss && dss_prec == dss_prec_secondary) {
|
||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||
alignment, base, zero)) != NULL)
|
||||
goto label_return;
|
||||
@ -305,7 +305,7 @@ chunk_unmap(void *chunk, size_t size)
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (config_dss && chunk_in_dss(chunk))
|
||||
if (have_dss && chunk_in_dss(chunk))
|
||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||
else if (chunk_dealloc_mmap(chunk, size))
|
||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||
@ -348,7 +348,7 @@ chunk_boot(void)
|
||||
return (true);
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
}
|
||||
if (config_dss && chunk_dss_boot())
|
||||
if (have_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
extent_tree_szad_new(&chunks_szad_mmap);
|
||||
extent_tree_ad_new(&chunks_ad_mmap);
|
||||
|
@ -32,7 +32,7 @@ static void *
|
||||
chunk_dss_sbrk(intptr_t increment)
|
||||
{
|
||||
|
||||
#ifdef JEMALLOC_HAVE_SBRK
|
||||
#ifdef JEMALLOC_DSS
|
||||
return (sbrk(increment));
|
||||
#else
|
||||
not_implemented();
|
||||
@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
|
||||
{
|
||||
dss_prec_t ret;
|
||||
|
||||
if (config_dss == false)
|
||||
if (have_dss == false)
|
||||
return (dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
ret = dss_prec_default;
|
||||
@ -57,8 +57,8 @@ bool
|
||||
chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||
{
|
||||
|
||||
if (config_dss == false)
|
||||
return (true);
|
||||
if (have_dss == false)
|
||||
return (dss_prec != dss_prec_disabled);
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
dss_prec_default = dss_prec;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
@ -70,7 +70,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||
|
||||
@ -143,7 +143,7 @@ chunk_in_dss(void *chunk)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
@ -160,7 +160,7 @@ bool
|
||||
chunk_dss_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_dss);
|
||||
cassert(have_dss);
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
@ -175,7 +175,7 @@ void
|
||||
chunk_dss_prefork(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_prefork(&dss_mtx);
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ void
|
||||
chunk_dss_postfork_parent(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_parent(&dss_mtx);
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ void
|
||||
chunk_dss_postfork_child(void)
|
||||
{
|
||||
|
||||
if (config_dss)
|
||||
if (have_dss)
|
||||
malloc_mutex_postfork_child(&dss_mtx);
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,6 @@ CTL_PROTO(thread_allocatedp)
|
||||
CTL_PROTO(thread_deallocated)
|
||||
CTL_PROTO(thread_deallocatedp)
|
||||
CTL_PROTO(config_debug)
|
||||
CTL_PROTO(config_dss)
|
||||
CTL_PROTO(config_fill)
|
||||
CTL_PROTO(config_lazy_lock)
|
||||
CTL_PROTO(config_mremap)
|
||||
@ -213,7 +212,6 @@ static const ctl_named_node_t thread_node[] = {
|
||||
|
||||
static const ctl_named_node_t config_node[] = {
|
||||
{NAME("debug"), CTL(config_debug)},
|
||||
{NAME("dss"), CTL(config_dss)},
|
||||
{NAME("fill"), CTL(config_fill)},
|
||||
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
||||
{NAME("mremap"), CTL(config_mremap)},
|
||||
@ -1136,7 +1134,6 @@ label_return:
|
||||
/******************************************************************************/
|
||||
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_dss)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
||||
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
|
||||
@ -1356,8 +1353,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
arena_t *arena = arenas[arena_ind];
|
||||
if (arena != NULL) {
|
||||
dss_prec_old = arena_dss_prec_get(arena);
|
||||
arena_dss_prec_set(arena, dss_prec);
|
||||
err = false;
|
||||
err = arena_dss_prec_set(arena, dss_prec);
|
||||
} else
|
||||
err = true;
|
||||
} else {
|
||||
|
@ -140,7 +140,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
||||
* source nor the destination are in dss.
|
||||
*/
|
||||
if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
|
||||
if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
|
||||
== false && chunk_in_dss(ret) == false))) {
|
||||
size_t newsize = huge_salloc(ret);
|
||||
|
||||
@ -198,12 +198,12 @@ static void
|
||||
huge_dalloc_junk(void *ptr, size_t usize)
|
||||
{
|
||||
|
||||
if (config_fill && config_dss && opt_junk) {
|
||||
if (config_fill && have_dss && opt_junk) {
|
||||
/*
|
||||
* Only bother junk filling if the chunk isn't about to be
|
||||
* unmapped.
|
||||
*/
|
||||
if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
|
||||
if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
|
||||
memset(ptr, 0x5a, usize);
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,14 @@
|
||||
|
||||
#define NTHREADS 10
|
||||
|
||||
static bool have_dss =
|
||||
#ifdef JEMALLOC_DSS
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
|
||||
void *
|
||||
thd_start(void *arg)
|
||||
{
|
||||
@ -18,13 +26,16 @@ thd_start(void *arg)
|
||||
size_t mib[3];
|
||||
size_t miblen = sizeof(mib) / sizeof(size_t);
|
||||
const char *dss_precs[] = {"disabled", "primary", "secondary"};
|
||||
const char *dss = dss_precs[thread_ind %
|
||||
(sizeof(dss_precs)/sizeof(char*))];
|
||||
unsigned prec_ind = thread_ind %
|
||||
(sizeof(dss_precs)/sizeof(char*));
|
||||
const char *dss = dss_precs[prec_ind];
|
||||
int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
|
||||
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
|
||||
"Error in mallctlnametomib()");
|
||||
mib[1] = arena_ind;
|
||||
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
|
||||
sizeof(const char *)), 0, "Error in mallctlbymib()");
|
||||
sizeof(const char *)), expected_err,
|
||||
"Error in mallctlbymib()");
|
||||
}
|
||||
|
||||
p = mallocx(1, MALLOCX_ARENA(arena_ind));
|
||||
|
@ -127,7 +127,6 @@ TEST_BEGIN(test_mallctl_config)
|
||||
} while (0)
|
||||
|
||||
TEST_MALLCTL_CONFIG(debug);
|
||||
TEST_MALLCTL_CONFIG(dss);
|
||||
TEST_MALLCTL_CONFIG(fill);
|
||||
TEST_MALLCTL_CONFIG(lazy_lock);
|
||||
TEST_MALLCTL_CONFIG(mremap);
|
||||
@ -255,15 +254,28 @@ TEST_BEGIN(test_arena_i_dss)
|
||||
{
|
||||
const char *dss_prec_old, *dss_prec_new;
|
||||
size_t sz = sizeof(dss_prec_old);
|
||||
size_t mib[3];
|
||||
size_t miblen;
|
||||
|
||||
dss_prec_new = "primary";
|
||||
assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new,
|
||||
miblen = sizeof(mib)/sizeof(size_t);
|
||||
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
|
||||
"Unexpected mallctlnametomib() error");
|
||||
|
||||
dss_prec_new = "disabled";
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
|
||||
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected default for dss precedence");
|
||||
|
||||
assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old,
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
|
||||
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
|
||||
|
||||
mib[1] = narenas_total_get();
|
||||
dss_prec_new = "disabled";
|
||||
assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
|
||||
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
|
||||
assert_str_ne(dss_prec_old, "primary",
|
||||
"Unexpected default for dss precedence");
|
||||
}
|
||||
TEST_END
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user