Remove the swap feature.

Remove the swap feature, which enabled per application swap files.  In
practice this feature has not proven itself useful to users.
This commit is contained in:
Jason Evans 2012-02-13 10:56:17 -08:00
parent fd56043c53
commit 4162627757
17 changed files with 26 additions and 738 deletions

View File

@ -102,11 +102,6 @@ any of the following arguments (not a definitive list) to 'configure':
released in bulk, thus reducing the total number of mutex operations. See released in bulk, thus reducing the total number of mutex operations. See
the "opt.tcache" option for usage details. the "opt.tcache" option for usage details.
--enable-swap
Enable mmap()ed swap file support. When this feature is built in, it is
possible to specify one or more files that act as backing store. This
effectively allows for per application swap files.
--enable-dss --enable-dss
Enable support for page allocation/deallocation via sbrk(2), in addition to Enable support for page allocation/deallocation via sbrk(2), in addition to
mmap(2). mmap(2).

View File

@ -47,14 +47,14 @@ CHDRS := @objroot@include/jemalloc/jemalloc@install_suffix@.h \
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \ CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \
@srcroot@src/base.c @srcroot@src/bitmap.c @srcroot@src/chunk.c \ @srcroot@src/base.c @srcroot@src/bitmap.c @srcroot@src/chunk.c \
@srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \ @srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \
@srcroot@src/chunk_swap.c @srcroot@src/ckh.c @srcroot@src/ctl.c \ @srcroot@src/ckh.c @srcroot@src/ctl.c @srcroot@src/extent.c \
@srcroot@src/extent.c @srcroot@src/hash.c @srcroot@src/huge.c \ @srcroot@src/hash.c @srcroot@src/huge.c @srcroot@src/mb.c \
@srcroot@src/mb.c @srcroot@src/mutex.c @srcroot@src/prof.c \ @srcroot@src/mutex.c @srcroot@src/prof.c @srcroot@src/rtree.c \
@srcroot@src/rtree.c @srcroot@src/stats.c @srcroot@src/tcache.c @srcroot@src/stats.c @srcroot@src/tcache.c
ifeq (macho, @abi@) ifeq (macho, @abi@)
CSRCS += @srcroot@src/zone.c CSRCS += @srcroot@src/zone.c
endif endif
STATIC_LIBS := @objroot@lib/libjemalloc@install_suffix@.a STATIC_LIBS := @objroot@lib/libjemalloc@install_suffix@.a
DSOS := @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) \ DSOS := @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) \
@objroot@lib/libjemalloc@install_suffix@.$(SO) \ @objroot@lib/libjemalloc@install_suffix@.$(SO) \
@objroot@lib/libjemalloc@install_suffix@_pic.a @objroot@lib/libjemalloc@install_suffix@_pic.a

View File

@ -592,22 +592,6 @@ if test "x$enable_tcache" = "x1" ; then
fi fi
AC_SUBST([enable_tcache]) AC_SUBST([enable_tcache])
dnl Do not enable mmap()ped swap files by default.
AC_ARG_ENABLE([swap],
[AS_HELP_STRING([--enable-swap], [Enable mmap()ped swap files])],
[if test "x$enable_swap" = "xno" ; then
enable_swap="0"
else
enable_swap="1"
fi
],
[enable_swap="0"]
)
if test "x$enable_swap" = "x1" ; then
AC_DEFINE([JEMALLOC_SWAP], [ ])
fi
AC_SUBST([enable_swap])
dnl Do not enable allocation from DSS by default. dnl Do not enable allocation from DSS by default.
AC_ARG_ENABLE([dss], AC_ARG_ENABLE([dss],
[AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])], [AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])],
@ -955,7 +939,6 @@ AC_MSG_RESULT([tcache : ${enable_tcache}])
AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([fill : ${enable_fill}])
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
AC_MSG_RESULT([sysv : ${enable_sysv}]) AC_MSG_RESULT([sysv : ${enable_sysv}])
AC_MSG_RESULT([swap : ${enable_swap}])
AC_MSG_RESULT([dss : ${enable_dss}]) AC_MSG_RESULT([dss : ${enable_dss}])
AC_MSG_RESULT([dynamic_page_shift : ${enable_dynamic_page_shift}]) AC_MSG_RESULT([dynamic_page_shift : ${enable_dynamic_page_shift}])
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])

View File

@ -660,16 +660,6 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem> build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<mallctl>config.swap</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para><option>--enable-swap</option> was specified during
build configuration.</para></listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<mallctl>config.sysv</mallctl> <mallctl>config.sysv</mallctl>
@ -1118,25 +1108,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
by default.</para></listitem> by default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.overcommit">
<term>
<mallctl>opt.overcommit</mallctl>
(<type>bool</type>)
<literal>r-</literal>
[<option>--enable-swap</option>]
</term>
<listitem><para>Over-commit enabled/disabled. If enabled, over-commit
memory as a side effect of using anonymous
<citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> or
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> for virtual memory allocation.
In order for overcommit to be disabled, the <link
linkend="swap.fds"><mallctl>swap.fds</mallctl></link> mallctl must have
been successfully written to. This option is enabled by
default.</para></listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<mallctl>tcache.flush</mallctl> <mallctl>tcache.flush</mallctl>
@ -1590,8 +1561,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
application. This is a multiple of the chunk size, and is at least as application. This is a multiple of the chunk size, and is at least as
large as <link large as <link
linkend="stats.active"><mallctl>stats.active</mallctl></link>. This linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
does not include inactive chunks backed by swap files. his does not does not include inactive chunks embedded in the DSS.</para></listitem>
include inactive chunks embedded in the DSS.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
@ -1602,8 +1572,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
[<option>--enable-stats</option>] [<option>--enable-stats</option>]
</term> </term>
<listitem><para>Total number of chunks actively mapped on behalf of the <listitem><para>Total number of chunks actively mapped on behalf of the
application. This does not include inactive chunks backed by swap application. This does not include inactive chunks embedded in the DSS.
files. This does not include inactive chunks embedded in the DSS.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
@ -1983,65 +1952,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Current number of runs for this size class. <listitem><para>Current number of runs for this size class.
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<mallctl>swap.avail</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats --enable-swap</option>]
</term>
<listitem><para>Number of swap file bytes that are currently not
associated with any chunk (i.e. mapped, but otherwise completely
unmanaged).</para></listitem>
</varlistentry>
<varlistentry id="swap.prezeroed">
<term>
<mallctl>swap.prezeroed</mallctl>
(<type>bool</type>)
<literal>rw</literal>
[<option>--enable-swap</option>]
</term>
<listitem><para>If true, the allocator assumes that the swap file(s)
contain nothing but nil bytes. If this assumption is violated,
allocator behavior is undefined. This value becomes read-only after
<link linkend="swap.fds"><mallctl>swap.fds</mallctl></link> is
successfully written to.</para></listitem>
</varlistentry>
<varlistentry>
<term>
<mallctl>swap.nfds</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-swap</option>]
</term>
<listitem><para>Number of file descriptors in use for swap.
</para></listitem>
</varlistentry>
<varlistentry id="swap.fds">
<term>
<mallctl>swap.fds</mallctl>
(<type>int *</type>)
<literal>rw</literal>
[<option>--enable-swap</option>]
</term>
<listitem><para>When written to, the files associated with the
specified file descriptors are contiguously mapped via
<citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>. The resulting virtual memory
region is preferred over anonymous
<citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> and
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> memory. Note that if a file's
size is not a multiple of the page size, it is automatically truncated
to the nearest page size multiple. See the
<link linkend="swap.prezeroed"><mallctl>swap.prezeroed</mallctl></link>
mallctl for specifying that the files are pre-zeroed.</para></listitem>
</varlistentry>
</variablelist> </variablelist>
</refsect1> </refsect1>
<refsect1 id="debugging_malloc_problems"> <refsect1 id="debugging_malloc_problems">

View File

@ -28,7 +28,6 @@
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk; extern size_t opt_lg_chunk;
extern bool opt_overcommit;
/* Protects stats_chunks; currently not used for any other purpose. */ /* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx; extern malloc_mutex_t chunks_mtx;
@ -54,6 +53,5 @@ bool chunk_boot(void);
#endif /* JEMALLOC_H_INLINES */ #endif /* JEMALLOC_H_INLINES */
/******************************************************************************/ /******************************************************************************/
#include "jemalloc/internal/chunk_swap.h"
#include "jemalloc/internal/chunk_dss.h" #include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h" #include "jemalloc/internal/chunk_mmap.h"

View File

@ -1,30 +0,0 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern malloc_mutex_t swap_mtx;
extern bool swap_enabled;
extern bool swap_prezeroed;
extern size_t swap_nfds;
extern int *swap_fds;
extern size_t swap_avail;
void *chunk_alloc_swap(size_t size, bool *zero);
bool chunk_in_swap(void *chunk);
bool chunk_dealloc_swap(void *chunk, size_t size);
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
bool chunk_swap_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -59,7 +59,6 @@ struct ctl_stats_s {
uint64_t ndalloc; /* huge_ndalloc */ uint64_t ndalloc; /* huge_ndalloc */
} huge; } huge;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
size_t swap_avail;
}; };
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */

View File

@ -104,13 +104,6 @@ static const bool config_stats =
false false
#endif #endif
; ;
static const bool config_swap =
#ifdef JEMALLOC_SWAP
true
#else
false
#endif
;
static const bool config_sysv = static const bool config_sysv =
#ifdef JEMALLOC_SYSV #ifdef JEMALLOC_SYSV
true true

View File

@ -48,18 +48,13 @@
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_mmap_noreserve JEMALLOC_N(chunk_alloc_mmap_noreserve) #define chunk_alloc_mmap_noreserve JEMALLOC_N(chunk_alloc_mmap_noreserve)
#define chunk_alloc_swap JEMALLOC_N(chunk_alloc_swap)
#define chunk_boot JEMALLOC_N(chunk_boot) #define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dealloc JEMALLOC_N(chunk_dealloc) #define chunk_dealloc JEMALLOC_N(chunk_dealloc)
#define chunk_dealloc_dss JEMALLOC_N(chunk_dealloc_dss) #define chunk_dealloc_dss JEMALLOC_N(chunk_dealloc_dss)
#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) #define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
#define chunk_dealloc_swap JEMALLOC_N(chunk_dealloc_swap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss) #define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_in_swap JEMALLOC_N(chunk_in_swap)
#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot) #define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot)
#define chunk_swap_boot JEMALLOC_N(chunk_swap_boot)
#define chunk_swap_enable JEMALLOC_N(chunk_swap_enable)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) #define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count) #define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete) #define ckh_delete JEMALLOC_N(ckh_delete)

View File

@ -98,9 +98,6 @@
*/ */
#undef JEMALLOC_DSS #undef JEMALLOC_DSS
/* JEMALLOC_SWAP enables mmap()ed swap file support. */
#undef JEMALLOC_SWAP
/* Support memory filling (junk/zero). */ /* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL #undef JEMALLOC_FILL

View File

@ -671,10 +671,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
* mappings, but not for file-backed mappings. * mappings, but not for file-backed mappings.
*/ */
(config_swap && swap_enabled) ? CHUNK_MAP_UNZEROED : 0; 0
#else #else
CHUNK_MAP_UNZEROED; CHUNK_MAP_UNZEROED
#endif #endif
;
/* /*
* If chunk is the spare, temporarily re-allocate it, 1) so that its * If chunk is the spare, temporarily re-allocate it, 1) so that its

View File

@ -5,7 +5,6 @@
/* Data. */ /* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT; size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
bool opt_overcommit = true;
malloc_mutex_t chunks_mtx; malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks; chunk_stats_t stats_chunks;
@ -35,23 +34,15 @@ chunk_alloc(size_t size, bool base, bool *zero)
assert(size != 0); assert(size != 0);
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
if (config_swap && swap_enabled) { if (config_dss) {
ret = chunk_alloc_swap(size, zero); ret = chunk_alloc_dss(size, zero);
if (ret != NULL) if (ret != NULL)
goto RETURN; goto RETURN;
} }
ret = chunk_alloc_mmap(size);
if (swap_enabled == false || opt_overcommit) { if (ret != NULL) {
if (config_dss) { *zero = true;
ret = chunk_alloc_dss(size, zero); goto RETURN;
if (ret != NULL)
goto RETURN;
}
ret = chunk_alloc_mmap(size);
if (ret != NULL) {
*zero = true;
goto RETURN;
}
} }
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
@ -102,9 +93,6 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
} }
if (unmap) { if (unmap) {
if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
size) == false)
return;
if (config_dss && chunk_dealloc_dss(chunk, size) == false) if (config_dss && chunk_dealloc_dss(chunk, size) == false)
return; return;
chunk_dealloc_mmap(chunk, size); chunk_dealloc_mmap(chunk, size);
@ -126,8 +114,6 @@ chunk_boot(void)
return (true); return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t)); memset(&stats_chunks, 0, sizeof(chunk_stats_t));
} }
if (config_swap && chunk_swap_boot())
return (true);
if (chunk_mmap_boot()) if (chunk_mmap_boot())
return (true); return (true);
if (config_dss && chunk_dss_boot()) if (config_dss && chunk_dss_boot())

View File

@ -1,403 +0,0 @@
#define JEMALLOC_CHUNK_SWAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
malloc_mutex_t swap_mtx;
bool swap_enabled;
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
size_t swap_avail;
/* Base address of the mmap()ed file(s). */
static void *swap_base;
/* Current end of the space in use (<= swap_max). */
static void *swap_end;
/* Absolute upper limit on file-backed addresses. */
static void *swap_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
static extent_tree_t swap_chunks_szad;
static extent_tree_t swap_chunks_ad;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static void *chunk_recycle_swap(size_t size, bool *zero);
static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
/******************************************************************************/
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
cassert(config_swap);
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&swap_mtx);
node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
extent_tree_szad_remove(&swap_chunks_szad, node);
if (node->size == size) {
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within swap_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
extent_tree_szad_insert(&swap_chunks_szad, node);
}
if (config_stats)
swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (*zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
void *
chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
cassert(config_swap);
assert(swap_enabled);
ret = chunk_recycle_swap(size, zero);
if (ret != NULL)
return (ret);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
ret = swap_end;
swap_end = (void *)((uintptr_t)swap_end + size);
if (config_stats)
swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (swap_prezeroed)
*zero = true;
else if (*zero)
memset(ret, 0, size);
} else {
malloc_mutex_unlock(&swap_mtx);
return (NULL);
}
return (ret);
}
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
cassert(config_swap);
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range.
* This does not change the position within
* swap_chunks_ad, so only remove/insert from/into
* swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = chunk;
node->size += size;
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
} else if (xnode == NULL) {
/*
* It is possible that base_node_alloc() will cause a
* new base chunk to be allocated, so take care not to
* deadlock on swap_mtx, and recover if another thread
* deallocates an adjacent chunk while this one is busy
* allocating xnode.
*/
malloc_mutex_unlock(&swap_mtx);
xnode = base_node_alloc();
malloc_mutex_lock(&swap_mtx);
if (xnode == NULL)
return (NULL);
} else {
/* Coalescing forward failed, so insert a new node. */
node = xnode;
xnode = NULL;
node->addr = chunk;
node->size = size;
extent_tree_ad_insert(&swap_chunks_ad, node);
extent_tree_szad_insert(&swap_chunks_szad, node);
break;
}
}
/* Discard xnode if it ended up unused do to a race. */
if (xnode != NULL)
base_node_dealloc(xnode);
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(&swap_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within swap_chunks_ad, so only
* remove/insert node from/into swap_chunks_szad.
*/
extent_tree_szad_remove(&swap_chunks_szad, prev);
extent_tree_ad_remove(&swap_chunks_ad, prev);
extent_tree_szad_remove(&swap_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
extent_tree_szad_insert(&swap_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
bool
chunk_in_swap(void *chunk)
{
bool ret;
cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
bool ret;
cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max) {
extent_node_t *node;
/* Try to coalesce with other unused chunks. */
node = chunk_dealloc_swap_record(chunk, size);
if (node != NULL) {
chunk = node->addr;
size = node->size;
}
/*
* Try to shrink the in-use memory if this chunk is at the end
* of the in-use memory.
*/
if ((void *)((uintptr_t)chunk + size) == swap_end) {
swap_end = (void *)((uintptr_t)swap_end - size);
if (node != NULL) {
extent_tree_szad_remove(&swap_chunks_szad,
node);
extent_tree_ad_remove(&swap_chunks_ad, node);
base_node_dealloc(node);
}
} else
madvise(chunk, size, MADV_DONTNEED);
if (config_stats)
swap_avail += size;
ret = false;
goto RETURN;
}
ret = true;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
bool ret;
unsigned i;
off_t off;
void *vaddr;
size_t cumsize, voff;
size_t sizes[nfds];
cassert(config_swap);
malloc_mutex_lock(&swap_mtx);
/* Get file sizes. */
for (i = 0, cumsize = 0; i < nfds; i++) {
off = lseek(fds[i], 0, SEEK_END);
if (off == ((off_t)-1)) {
ret = true;
goto RETURN;
}
if (PAGE_CEILING(off) != off) {
/* Truncate to a multiple of the page size. */
off &= ~PAGE_MASK;
if (ftruncate(fds[i], off) != 0) {
ret = true;
goto RETURN;
}
}
sizes[i] = off;
if (cumsize + off < cumsize) {
/*
* Cumulative file size is greater than the total
* address space. Bail out while it's still obvious
* what the problem is.
*/
ret = true;
goto RETURN;
}
cumsize += off;
}
/* Round down to a multiple of the chunk size. */
cumsize &= ~chunksize_mask;
if (cumsize == 0) {
ret = true;
goto RETURN;
}
/*
* Allocate a chunk-aligned region of anonymous memory, which will
* be the final location for the memory-mapped files.
*/
vaddr = chunk_alloc_mmap_noreserve(cumsize);
if (vaddr == NULL) {
ret = true;
goto RETURN;
}
/* Overlay the files onto the anonymous mapping. */
for (i = 0, voff = 0; i < nfds; i++) {
void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
if (addr == MAP_FAILED) {
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write(
"<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
if (munmap(vaddr, voff) == -1) {
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in munmap(): ");
malloc_write(buf);
malloc_write("\n");
}
ret = true;
goto RETURN;
}
assert(addr == (void *)((uintptr_t)vaddr + voff));
/*
* Tell the kernel that the mapping will be accessed randomly,
* and that it should not gratuitously sync pages to the
* filesystem.
*/
#ifdef MADV_RANDOM
madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
madvise(addr, sizes[i], MADV_NOSYNC);
#endif
voff += sizes[i];
}
swap_prezeroed = prezeroed;
swap_base = vaddr;
swap_end = swap_base;
swap_max = (void *)((uintptr_t)vaddr + cumsize);
/* Copy the fds array for mallctl purposes. */
swap_fds = (int *)base_alloc(nfds * sizeof(int));
if (swap_fds == NULL) {
ret = true;
goto RETURN;
}
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
if (config_stats)
swap_avail = cumsize;
swap_enabled = true;
ret = false;
RETURN:
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_swap_boot(void)
{
cassert(config_swap);
if (malloc_mutex_init(&swap_mtx))
return (true);
swap_enabled = false;
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
if (config_stats)
swap_avail = 0;
swap_base = NULL;
swap_end = NULL;
swap_max = NULL;
extent_tree_szad_new(&swap_chunks_szad);
extent_tree_ad_new(&swap_chunks_ad);
return (false);
}

111
src/ctl.c
View File

@ -8,8 +8,6 @@
* ctl_mtx protects the following: * ctl_mtx protects the following:
* - ctl_stats.* * - ctl_stats.*
* - opt_prof_active * - opt_prof_active
* - swap_enabled
* - swap_prezeroed
*/ */
static malloc_mutex_t ctl_mtx; static malloc_mutex_t ctl_mtx;
static bool ctl_initialized; static bool ctl_initialized;
@ -56,7 +54,6 @@ CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats) CTL_PROTO(config_stats)
CTL_PROTO(config_swap)
CTL_PROTO(config_sysv) CTL_PROTO(config_sysv)
CTL_PROTO(config_tcache) CTL_PROTO(config_tcache)
CTL_PROTO(config_tiny) CTL_PROTO(config_tiny)
@ -85,7 +82,6 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum) CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax) CTL_PROTO(opt_lg_prof_tcmax)
CTL_PROTO(opt_overcommit)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size) CTL_PROTO(arenas_bin_i_run_size)
@ -162,10 +158,6 @@ CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated) CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active) CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped) CTL_PROTO(stats_mapped)
CTL_PROTO(swap_avail)
CTL_PROTO(swap_prezeroed)
CTL_PROTO(swap_nfds)
CTL_PROTO(swap_fds)
/******************************************************************************/ /******************************************************************************/
/* mallctl tree. */ /* mallctl tree. */
@ -205,7 +197,6 @@ static const ctl_node_t config_node[] = {
{NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)}, {NAME("stats"), CTL(config_stats)},
{NAME("swap"), CTL(config_swap)},
{NAME("sysv"), CTL(config_sysv)}, {NAME("sysv"), CTL(config_sysv)},
{NAME("tcache"), CTL(config_tcache)}, {NAME("tcache"), CTL(config_tcache)},
{NAME("tiny"), CTL(config_tiny)}, {NAME("tiny"), CTL(config_tiny)},
@ -236,8 +227,7 @@ static const ctl_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)}, {NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)}, {NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)}, {NAME("prof_accum"), CTL(opt_prof_accum)},
{NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}, {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
{NAME("overcommit"), CTL(opt_overcommit)}
}; };
static const ctl_node_t arenas_bin_i_node[] = { static const ctl_node_t arenas_bin_i_node[] = {
@ -391,13 +381,6 @@ static const ctl_node_t stats_node[] = {
{NAME("arenas"), CHILD(stats_arenas)} {NAME("arenas"), CHILD(stats_arenas)}
}; };
static const ctl_node_t swap_node[] = {
{NAME("avail"), CTL(swap_avail)},
{NAME("prezeroed"), CTL(swap_prezeroed)},
{NAME("nfds"), CTL(swap_nfds)},
{NAME("fds"), CTL(swap_fds)}
};
static const ctl_node_t root_node[] = { static const ctl_node_t root_node[] = {
{NAME("version"), CTL(version)}, {NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)}, {NAME("epoch"), CTL(epoch)},
@ -408,8 +391,6 @@ static const ctl_node_t root_node[] = {
{NAME("arenas"), CHILD(arenas)}, {NAME("arenas"), CHILD(arenas)},
{NAME("prof"), CHILD(prof)}, {NAME("prof"), CHILD(prof)},
{NAME("stats"), CHILD(stats)} {NAME("stats"), CHILD(stats)}
,
{NAME("swap"), CHILD(swap)}
}; };
static const ctl_node_t super_root_node[] = { static const ctl_node_t super_root_node[] = {
{NAME(""), CHILD(root)} {NAME(""), CHILD(root)}
@ -597,12 +578,6 @@ ctl_refresh(void)
ctl_stats.active = (ctl_stats.arenas[narenas].pactive << ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
PAGE_SHIFT) + ctl_stats.huge.allocated; PAGE_SHIFT) + ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
if (config_swap) {
malloc_mutex_lock(&swap_mtx);
ctl_stats.swap_avail = swap_avail;
malloc_mutex_unlock(&swap_mtx);
}
} }
ctl_epoch++; ctl_epoch++;
@ -1138,7 +1113,6 @@ CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
CTL_RO_BOOL_CONFIG_GEN(config_stats) CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_swap)
CTL_RO_BOOL_CONFIG_GEN(config_sysv) CTL_RO_BOOL_CONFIG_GEN(config_sysv)
CTL_RO_BOOL_CONFIG_GEN(config_tcache) CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tiny) CTL_RO_BOOL_CONFIG_GEN(config_tiny)
@ -1171,7 +1145,6 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
CTL_RO_NL_CGEN(config_swap, opt_overcommit, opt_overcommit, bool)
/******************************************************************************/ /******************************************************************************/
@ -1450,85 +1423,3 @@ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
/******************************************************************************/
CTL_RO_CGEN(config_swap && config_stats, swap_avail, ctl_stats.swap_avail,
size_t)
static int
swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
if (config_swap == false)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else {
/*
* swap_prezeroed isn't actually used by the swap code until it
* is set during a successful chunk_swap_enabled() call. We
* use it here to store the value that we'll pass to
* chunk_swap_enable() in a swap.fds mallctl(). This is not
* very clean, but the obvious alternatives are even worse.
*/
WRITE(swap_prezeroed, bool);
}
READ(swap_prezeroed, bool);
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_CGEN(config_swap, swap_nfds, swap_nfds, size_t)
static int
swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
if (config_swap == false)
return (ENOENT);
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else if (newp != NULL) {
size_t nfds = newlen / sizeof(int);
{
int fds[nfds];
memcpy(fds, newp, nfds * sizeof(int));
if (chunk_swap_enable(fds, nfds, swap_prezeroed)) {
ret = EFAULT;
goto RETURN;
}
}
}
if (oldp != NULL && oldlenp != NULL) {
if (*oldlenp != swap_nfds * sizeof(int)) {
size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp)
? swap_nfds * sizeof(int) : *oldlenp;
memcpy(oldp, swap_fds, copylen);
ret = EINVAL;
goto RETURN;
} else
memcpy(oldp, swap_fds, *oldlenp);
}
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}

View File

@ -212,13 +212,11 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
/* /*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the * Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss. * source nor the destination are in dss.
*/ */
#ifdef JEMALLOC_MREMAP_FIXED #ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize && (config_swap == false || swap_enabled == if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) == == false && chunk_in_dss(ret) == false))) {
false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret); size_t newsize = huge_salloc(ret);
/* /*
@ -280,7 +278,7 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
if (unmap && config_fill && (config_swap || config_dss) && opt_junk) if (unmap && config_fill && config_dss && opt_junk)
memset(node->addr, 0x5a, node->size); memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap); chunk_dealloc(node->addr, node->size, unmap);

View File

@ -610,9 +610,6 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(prof_gdump) CONF_HANDLE_BOOL(prof_gdump)
CONF_HANDLE_BOOL(prof_leak) CONF_HANDLE_BOOL(prof_leak)
} }
if (config_swap) {
CONF_HANDLE_BOOL(overcommit)
}
malloc_conf_error("Invalid conf pair", k, klen, v, malloc_conf_error("Invalid conf pair", k, klen, v,
vlen); vlen);
#undef CONF_HANDLE_BOOL #undef CONF_HANDLE_BOOL
@ -1629,9 +1626,6 @@ jemalloc_prefork(void)
if (config_dss) if (config_dss)
malloc_mutex_lock(&dss_mtx); malloc_mutex_lock(&dss_mtx);
if (config_swap)
malloc_mutex_lock(&swap_mtx);
} }
void void
@ -1641,9 +1635,6 @@ jemalloc_postfork(void)
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
if (config_swap)
malloc_mutex_unlock(&swap_mtx);
if (config_dss) if (config_dss)
malloc_mutex_unlock(&dss_mtx); malloc_mutex_unlock(&dss_mtx);

View File

@ -525,7 +525,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_leak) OPT_WRITE_BOOL(prof_leak)
OPT_WRITE_BOOL(overcommit)
#undef OPT_WRITE_BOOL #undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T #undef OPT_WRITE_SIZE_T
@ -668,11 +667,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
} }
if (config_stats) { if (config_stats) {
int err;
size_t sszp, ssz; size_t sszp, ssz;
size_t *cactive; size_t *cactive;
size_t allocated, active, mapped; size_t allocated, active, mapped;
size_t chunks_current, chunks_high, swap_avail; size_t chunks_current, chunks_high;
uint64_t chunks_total; uint64_t chunks_total;
size_t huge_allocated; size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc; uint64_t huge_nmalloc, huge_ndalloc;
@ -694,24 +692,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.total", &chunks_total, uint64_t); CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
CTL_GET("stats.chunks.high", &chunks_high, size_t); CTL_GET("stats.chunks.high", &chunks_high, size_t);
CTL_GET("stats.chunks.current", &chunks_current, size_t); CTL_GET("stats.chunks.current", &chunks_current, size_t);
if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz, malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
NULL, 0)) == 0) { "highchunks curchunks\n");
size_t lg_chunk; malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
chunks_total, chunks_high, chunks_current);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks swap_avail\n");
CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu%13zu\n",
chunks_total, chunks_high, chunks_current,
swap_avail << lg_chunk);
} else {
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64"%13zu%13zu\n",
chunks_total, chunks_high, chunks_current);
}
/* Print huge stats. */ /* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);