Merge branch 'dev'

This commit is contained in:
Jason Evans 2010-12-03 17:05:01 -08:00
commit 1c4b088b08
22 changed files with 2676 additions and 1867 deletions

7
.gitignore vendored
View File

@ -3,6 +3,10 @@
/jemalloc/config.log
/jemalloc/config.status
/jemalloc/configure
/jemalloc/doc/html.xsl
/jemalloc/doc/manpages.xsl
/jemalloc/doc/jemalloc.xml
/jemalloc/doc/jemalloc.html
/jemalloc/doc/jemalloc.3
/jemalloc/lib/
/jemalloc/Makefile
@ -13,4 +17,7 @@
/jemalloc/src/*.[od]
/jemalloc/test/*.[od]
/jemalloc/test/*.out
/jemalloc/test/[a-z]*
!/jemalloc/test/*.c
!/jemalloc/test/*.exp
/jemalloc/VERSION

View File

@ -6,6 +6,23 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
* 2.1.0
This version incorporates some optimizations that can't quite be considered
bug fixes.
New features:
- Use Linux's mremap(2) for huge object reallocation when possible.
- Avoid locking in mallctl*() when possible.
- Add the "thread.[de]allocatedp" mallctl's.
- Convert the manual page source from roff to DocBook, and generate both roff
and HTML manuals.
Bug fixes:
- Fix a crash due to incorrect bootstrap ordering. This only impacted
--enable-debug --enable-dss configurations.
- Fix a minor statistics bug for mallctl("swap.avail", ...).
* 2.0.1
Bug fixes:

View File

@ -132,8 +132,11 @@ any of the following arguments (not a definitive list) to 'configure':
--disable-tls
Disable thread-local storage (TLS), which allows for fast access to
thread-local variables via the __thread keyword. If TLS is available,
jemalloc uses it for several purposes. Note that disabling TLS implies
--disable-tcache.
jemalloc uses it for several purposes.
--with-xslroot=<path>
Specify where to find DocBook XSL stylesheets when building the
documentation.
The following environment variables (not a definitive list) impact configure's
behavior:
@ -172,7 +175,7 @@ To install only parts of jemalloc, use the following targets:
install_bin
install_include
install_lib
install_man
install_doc
To clean up build results to varying degrees, use the following make targets:
@ -232,11 +235,12 @@ directory, issue configuration and build commands:
=== Documentation ==============================================================
The manual page that the configure script generates can be manually formatted
The manual page is generated in both html and roff formats. Any web browser
can be used to view the html manual. The roff manual page can be formatted
prior to installation via any of the following commands:
nroff -man -man-ext -t doc/jemalloc.3
nroff -man -t doc/jemalloc.3
groff -man -man-ext -t -Tps doc/jemalloc.3 | ps2pdf - doc/jemalloc.3.pdf
groff -man -t -Tps doc/jemalloc.3 | ps2pdf - doc/jemalloc.3.pdf
(cd doc; groff -man -man-ext -t -Thtml jemalloc.3 > jemalloc.3.html)

View File

@ -15,6 +15,7 @@ DESTDIR =
BINDIR := $(DESTDIR)@BINDIR@
INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
LIBDIR := $(DESTDIR)@LIBDIR@
DATADIR := $(DESTDIR)@DATADIR@
MANDIR := $(DESTDIR)@MANDIR@
# Build parameters.
@ -58,15 +59,34 @@ DSOS := @objroot@lib/libjemalloc@install_suffix@.$(SO).$(REV) \
@objroot@lib/libjemalloc@install_suffix@.$(SO) \
@objroot@lib/libjemalloc@install_suffix@_pic.a
MAN3 := @objroot@doc/jemalloc@install_suffix@.3
DOCS_XML := @objroot@doc/jemalloc@install_suffix@.xml
DOCS_HTML := $(DOCS_XML:@objroot@%.xml=@srcroot@%.html)
DOCS_MAN3 := $(DOCS_XML:@objroot@%.xml=@srcroot@%.3)
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
CTESTS := @srcroot@test/allocated.c @srcroot@test/allocm.c \
@srcroot@test/posix_memalign.c \
@srcroot@test/mremap.c @srcroot@test/posix_memalign.c \
@srcroot@test/rallocm.c @srcroot@test/thread_arena.c
.PHONY: all dist install check clean distclean relclean
.PHONY: all dist doc_html doc_man doc
.PHONY: install_bin install_include install_lib
.PHONY: install_html install_man install_doc install
.PHONY: tests check clean distclean relclean
# Default target.
all: $(DSOS)
dist: doc
@srcroot@doc/%.html : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/html.xsl
@XSLTPROC@ -o $@ @objroot@doc/html.xsl $<
@srcroot@doc/%.3 : @objroot@doc/%.xml @srcroot@doc/stylesheet.xsl @objroot@doc/manpages.xsl
@XSLTPROC@ -o $@ @objroot@doc/manpages.xsl $<
doc_html: $(DOCS_HTML)
doc_man: $(DOCS_MAN3)
doc: $(DOCS)
#
# Include generated dependency files.
#
@ -123,14 +143,23 @@ install_lib: $(DSOS)
ln -sf libjemalloc@install_suffix@.$(SO).$(REV) $(LIBDIR)/libjemalloc@install_suffix@.$(SO)
install -m 755 @objroot@lib/libjemalloc@install_suffix@_pic.a $(LIBDIR)
install_man:
install -d $(MANDIR)/man3
@for m in $(MAN3); do \
echo "install -m 644 $$m $(MANDIR)/man3"; \
install -m 644 $$m $(MANDIR)/man3; \
install_html:
install -d $(DATADIR)/doc/jemalloc@install_suffix@
@for d in $(DOCS_HTML); do \
echo "install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@"; \
install -m 644 $$d $(DATADIR)/doc/jemalloc@install_suffix@; \
done
install: install_bin install_include install_lib install_man
install_man:
install -d $(MANDIR)/man3
@for d in $(DOCS_MAN3); do \
echo "install -m 644 $$d $(MANDIR)/man3"; \
install -m 644 $$d $(MANDIR)/man3; \
done
install_doc: install_html install_man
install: install_bin install_include install_lib install_doc
tests: $(CTESTS:@srcroot@%.c=@objroot@%)
@ -182,6 +211,8 @@ distclean: clean
relclean: distclean
rm -f @objroot@configure
rm -f @srcroot@VERSION
rm -f $(DOCS_HTML)
rm -f $(DOCS_MAN3)
#===============================================================================
# Re-configuration rules.

View File

@ -80,6 +80,19 @@ MANDIR=`eval echo $mandir`
MANDIR=`eval echo $MANDIR`
AC_SUBST([MANDIR])
dnl Support for building documentation.
AC_PATH_PROG([XSLTPROC], [xsltproc], , [$PATH])
AC_ARG_WITH([xslroot],
[AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])],
if test "x$with_xslroot" = "xno" ; then
XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
else
XSLROOT="${with_xslroot}"
fi,
XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
)
AC_SUBST([XSLROOT])
dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise,
dnl just prevent autoconf from molesting CFLAGS.
CFLAGS=$CFLAGS
@ -214,6 +227,16 @@ esac
AC_SUBST([abi])
AC_SUBST([RPATH])
JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
#define _GNU_SOURCE
#include <sys/mman.h>
], [
void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
], [mremap_fixed])
if test "x${mremap_fixed}" = "xyes" ; then
AC_DEFINE([JEMALLOC_MREMAP_FIXED])
fi
dnl Support optional additions to rpath.
AC_ARG_WITH([rpath],
[AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
@ -275,17 +298,26 @@ AC_ARG_WITH([install_suffix],
install_suffix="$INSTALL_SUFFIX"
AC_SUBST([install_suffix])
cfgoutputs_in="${srcroot}Makefile.in ${srcroot}doc/jemalloc.3.in"
cfgoutputs_in="${srcroot}Makefile.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in"
cfgoutputs_out="Makefile doc/jemalloc${install_suffix}.3"
cfgoutputs_out="Makefile"
cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h"
cfgoutputs_tup="Makefile doc/jemalloc${install_suffix}.3:doc/jemalloc.3.in"
cfgoutputs_tup="Makefile"
cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in"
@ -329,15 +361,6 @@ if test "x$enable_debug" = "x1" ; then
AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
fi
AC_SUBST([enable_debug])
if test "x$enable_debug" = "x0" ; then
roff_debug=".\\\" "
roff_no_debug=""
else
roff_debug=""
roff_no_debug=".\\\" "
fi
AC_SUBST([roff_debug])
AC_SUBST([roff_no_debug])
dnl Only optimize if not debugging.
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
@ -369,12 +392,6 @@ if test "x$enable_stats" = "x1" ; then
AC_DEFINE([JEMALLOC_STATS], [ ])
fi
AC_SUBST([enable_stats])
if test "x$enable_stats" = "x0" ; then
roff_stats=".\\\" "
else
roff_stats=""
fi
AC_SUBST([roff_stats])
dnl Do not enable profiling by default.
AC_ARG_ENABLE([prof],
@ -438,15 +455,6 @@ if test "x$enable_prof" = "x1" ; then
fi
fi
AC_SUBST([enable_prof])
if test "x$enable_prof" = "x0" ; then
roff_prof=".\\\" "
roff_no_prof=""
else
roff_prof=""
roff_no_prof=".\\\" "
fi
AC_SUBST([roff_prof])
AC_SUBST([roff_no_prof])
dnl If libunwind isn't enabled, try to use libgcc rather than gcc intrinsics
dnl for backtracing.
@ -478,15 +486,6 @@ if test "x$enable_tiny" = "x1" ; then
AC_DEFINE([JEMALLOC_TINY], [ ])
fi
AC_SUBST([enable_tiny])
if test "x$enable_tiny" = "x0" ; then
roff_tiny=".\\\" "
roff_no_tiny=""
else
roff_tiny=""
roff_no_tiny=".\\\" "
fi
AC_SUBST([roff_tiny])
AC_SUBST([roff_no_tiny])
dnl Enable thread-specific caching by default.
AC_ARG_ENABLE([tcache],
@ -503,15 +502,6 @@ if test "x$enable_tcache" = "x1" ; then
AC_DEFINE([JEMALLOC_TCACHE], [ ])
fi
AC_SUBST([enable_tcache])
if test "x$enable_tcache" = "x0" ; then
roff_tcache=".\\\" "
roff_no_tcache=""
else
roff_tcache=""
roff_no_tcache=".\\\" "
fi
AC_SUBST([roff_tcache])
AC_SUBST([roff_no_tcache])
dnl Do not enable mmap()ped swap files by default.
AC_ARG_ENABLE([swap],
@ -528,12 +518,6 @@ if test "x$enable_swap" = "x1" ; then
AC_DEFINE([JEMALLOC_SWAP], [ ])
fi
AC_SUBST([enable_swap])
if test "x$enable_swap" = "x0" ; then
roff_swap=".\\\" "
else
roff_swap=""
fi
AC_SUBST([roff_swap])
dnl Do not enable allocation from DSS by default.
AC_ARG_ENABLE([dss],
@ -550,12 +534,6 @@ if test "x$enable_dss" = "x1" ; then
AC_DEFINE([JEMALLOC_DSS], [ ])
fi
AC_SUBST([enable_dss])
if test "x$enable_dss" = "x0" ; then
roff_dss=".\\\" "
else
roff_dss=""
fi
AC_SUBST([roff_dss])
dnl Do not support the junk/zero filling option by default.
AC_ARG_ENABLE([fill],
@ -572,12 +550,6 @@ if test "x$enable_fill" = "x1" ; then
AC_DEFINE([JEMALLOC_FILL], [ ])
fi
AC_SUBST([enable_fill])
if test "x$enable_fill" = "x0" ; then
roff_fill=".\\\" "
else
roff_fill=""
fi
AC_SUBST([roff_fill])
dnl Do not support the xmalloc option by default.
AC_ARG_ENABLE([xmalloc],
@ -594,12 +566,6 @@ if test "x$enable_xmalloc" = "x1" ; then
AC_DEFINE([JEMALLOC_XMALLOC], [ ])
fi
AC_SUBST([enable_xmalloc])
if test "x$enable_xmalloc" = "x0" ; then
roff_xmalloc=".\\\" "
else
roff_xmalloc=""
fi
AC_SUBST([roff_xmalloc])
dnl Do not support the SYSV option by default.
AC_ARG_ENABLE([sysv],
@ -616,12 +582,6 @@ if test "x$enable_sysv" = "x1" ; then
AC_DEFINE([JEMALLOC_SYSV], [ ])
fi
AC_SUBST([enable_sysv])
if test "x$enable_sysv" = "x0" ; then
roff_sysv=".\\\" "
else
roff_sysv=""
fi
AC_SUBST([roff_sysv])
dnl Do not determine page shift at run time by default.
AC_ARG_ENABLE([dynamic_page_shift],
@ -828,6 +788,9 @@ AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
AC_MSG_RESULT([LIBS : ${LIBS}])
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
AC_MSG_RESULT([])
AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
AC_MSG_RESULT([XSLROOT : ${XSLROOT}])
AC_MSG_RESULT([])
AC_MSG_RESULT([PREFIX : ${PREFIX}])
AC_MSG_RESULT([BINDIR : ${BINDIR}])
AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}])

4
jemalloc/doc/html.xsl.in Normal file
View File

@ -0,0 +1,4 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="@XSLROOT@/html/docbook.xsl"/>
<xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
</xsl:stylesheet>

File diff suppressed because it is too large Load Diff

2251
jemalloc/doc/jemalloc.xml.in Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
<xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
</xsl:stylesheet>

View File

@ -0,0 +1,7 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:param name="funcsynopsis.style">ansi</xsl:param>
<xsl:param name="function.parens" select="1"/>
<xsl:template match="mallctl">
"<xsl:call-template name="inline.monoseq"/>"
</xsl:template>
</xsl:stylesheet>

View File

@ -17,6 +17,7 @@
extern malloc_mutex_t dss_mtx;
void *chunk_alloc_dss(size_t size, bool *zero);
bool chunk_in_dss(void *chunk);
bool chunk_dealloc_dss(void *chunk, size_t size);
bool chunk_dss_boot(void);

View File

@ -20,6 +20,7 @@ extern size_t swap_avail;
#endif
void *chunk_alloc_swap(size_t size, bool *zero);
bool chunk_in_swap(void *chunk);
bool chunk_dealloc_swap(void *chunk, size_t size);
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed);
bool chunk_swap_boot(void);

View File

@ -25,7 +25,7 @@ void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
void huge_dalloc(void *ptr);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
#ifdef JEMALLOC_PROF
prof_ctx_t *huge_prof_ctx_get(const void *ptr);

View File

@ -666,7 +666,7 @@ idalloc(void *ptr)
if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr);
else
huge_dalloc(ptr);
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void *

View File

@ -115,6 +115,9 @@
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */
#undef JEMALLOC_MREMAP_FIXED
/*
* Methods for purging unused pages differ between operating systems.
*

View File

@ -146,11 +146,6 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
#ifdef JEMALLOC_IVSALLOC
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (malloc_mutex_init(&chunks_mtx))
return (true);
@ -166,6 +161,11 @@ chunk_boot(void)
if (chunk_dss_boot())
return (true);
#endif
#ifdef JEMALLOC_IVSALLOC
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
#endif
return (false);
}

View File

@ -199,6 +199,22 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
return (node);
}
bool
chunk_in_dss(void *chunk)
{
bool ret;
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dealloc_dss(void *chunk, size_t size)
{

View File

@ -184,6 +184,24 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
return (node);
}
bool
chunk_in_swap(void *chunk)
{
bool ret;
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
if ((uintptr_t)chunk >= (uintptr_t)swap_base
&& (uintptr_t)chunk < (uintptr_t)swap_max)
ret = true;
else
ret = false;
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
@ -219,15 +237,15 @@ chunk_dealloc_swap(void *chunk, size_t size)
} else
madvise(chunk, size, MADV_DONTNEED);
#ifdef JEMALLOC_STATS
swap_avail += size;
#endif
ret = false;
goto RETURN;
}
ret = true;
RETURN:
#ifdef JEMALLOC_STATS
swap_avail += size;
#endif
malloc_mutex_unlock(&swap_mtx);
return (ret);
}

View File

@ -4,6 +4,13 @@
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
* - swap_enabled
* - swap_prezeroed
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
static uint64_t ctl_epoch;
@ -44,7 +51,9 @@ CTL_PROTO(tcache_flush)
CTL_PROTO(thread_arena)
#ifdef JEMALLOC_STATS
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
#endif
CTL_PROTO(config_debug)
CTL_PROTO(config_dss)
@ -223,7 +232,9 @@ static const ctl_node_t thread_node[] = {
#ifdef JEMALLOC_STATS
,
{NAME("allocated"), CTL(thread_allocated)},
{NAME("deallocated"), CTL(thread_deallocated)}
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)}
#endif
};
@ -680,7 +691,9 @@ ctl_refresh(void)
static bool
ctl_init(void)
{
bool ret;
malloc_mutex_lock(&ctl_mtx);
if (ctl_initialized == false) {
#ifdef JEMALLOC_STATS
unsigned i;
@ -692,8 +705,10 @@ ctl_init(void)
*/
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
(narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL)
return (true);
if (ctl_stats.arenas == NULL) {
ret = true;
goto RETURN;
}
memset(ctl_stats.arenas, 0, (narenas + 1) *
sizeof(ctl_arena_stats_t));
@ -704,8 +719,10 @@ ctl_init(void)
*/
#ifdef JEMALLOC_STATS
for (i = 0; i <= narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i]))
return (true);
if (ctl_arena_init(&ctl_stats.arenas[i])) {
ret = true;
goto RETURN;
}
}
#endif
ctl_stats.arenas[narenas].initialized = true;
@ -715,7 +732,10 @@ ctl_init(void)
ctl_initialized = true;
}
return (false);
ret = false;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
@ -825,8 +845,7 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
malloc_mutex_lock(&ctl_mtx);
if (ctl_init()) {
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
@ -841,10 +860,9 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
ret = ENOENT;
goto RETURN;
}
ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return(ret);
}
@ -853,16 +871,13 @@ ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
malloc_mutex_lock(&ctl_mtx);
if (ctl_init()) {
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
ret = ctl_lookup(name, NULL, mibp, miblenp);
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return(ret);
}
@ -874,8 +889,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
const ctl_node_t *node;
size_t i;
malloc_mutex_lock(&ctl_mtx);
if (ctl_init()) {
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
goto RETURN;
}
@ -912,7 +926,6 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return(ret);
}
@ -975,6 +988,29 @@ ctl_boot(void)
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = v; \
READ(oldval, t); \
\
ret = 0; \
RETURN: \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
@ -1024,7 +1060,7 @@ RETURN: \
return (ret); \
}
CTL_RO_GEN(version, JEMALLOC_VERSION, const char *)
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@ -1033,6 +1069,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
newval = 0;
WRITE(newval, uint64_t);
if (newval != 0)
@ -1041,6 +1078,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@ -1107,8 +1145,10 @@ RETURN:
}
#ifdef JEMALLOC_STATS
CTL_RO_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
CTL_RO_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
CTL_RO_NL_GEN(thread_allocatedp, &ALLOCATED_GET(), uint64_t *);
CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
CTL_RO_NL_GEN(thread_deallocatedp, &DEALLOCATED_GET(), uint64_t *);
#endif
/******************************************************************************/
@ -1205,48 +1245,48 @@ CTL_RO_FALSE_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_GEN(opt_abort, opt_abort, bool)
CTL_RO_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
CTL_RO_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
CTL_RO_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
#ifdef JEMALLOC_FILL
CTL_RO_GEN(opt_junk, opt_junk, bool)
CTL_RO_GEN(opt_zero, opt_zero, bool)
CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
#endif
#ifdef JEMALLOC_SYSV
CTL_RO_GEN(opt_sysv, opt_sysv, bool)
CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
#endif
#ifdef JEMALLOC_XMALLOC
CTL_RO_GEN(opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
#endif
#ifdef JEMALLOC_TCACHE
CTL_RO_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
#endif
#ifdef JEMALLOC_PROF
CTL_RO_GEN(opt_prof, opt_prof, bool)
CTL_RO_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_GEN(opt_prof_active, opt_prof_active, bool)
CTL_RO_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
CTL_RO_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_GEN(opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_GEN(opt_prof_leak, opt_prof_leak, bool)
CTL_RO_GEN(opt_prof_accum, opt_prof_accum, bool)
CTL_RO_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
#endif
#ifdef JEMALLOC_SWAP
CTL_RO_GEN(opt_overcommit, opt_overcommit, bool)
CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
#endif
/******************************************************************************/
CTL_RO_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t)
CTL_RO_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t)
CTL_RO_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t)
const ctl_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
@ -1256,7 +1296,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node);
}
CTL_RO_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
const ctl_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@ -1266,7 +1306,7 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
CTL_RO_GEN(arenas_narenas, narenas, unsigned)
CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@ -1275,6 +1315,7 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned nread, i;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != narenas * sizeof(bool)) {
ret = EINVAL;
@ -1289,36 +1330,37 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_GEN(arenas_cacheline, CACHELINE, size_t)
CTL_RO_GEN(arenas_subpage, SUBPAGE, size_t)
CTL_RO_GEN(arenas_pagesize, PAGE_SIZE, size_t)
CTL_RO_GEN(arenas_chunksize, chunksize, size_t)
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
#ifdef JEMALLOC_TINY
CTL_RO_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
CTL_RO_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
#endif
CTL_RO_GEN(arenas_qspace_min, qspace_min, size_t)
CTL_RO_GEN(arenas_qspace_max, qspace_max, size_t)
CTL_RO_GEN(arenas_cspace_min, cspace_min, size_t)
CTL_RO_GEN(arenas_cspace_max, cspace_max, size_t)
CTL_RO_GEN(arenas_sspace_min, sspace_min, size_t)
CTL_RO_GEN(arenas_sspace_max, sspace_max, size_t)
CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
#ifdef JEMALLOC_TCACHE
CTL_RO_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
#endif
CTL_RO_GEN(arenas_ntbins, ntbins, unsigned)
CTL_RO_GEN(arenas_nqbins, nqbins, unsigned)
CTL_RO_GEN(arenas_ncbins, ncbins, unsigned)
CTL_RO_GEN(arenas_nsbins, nsbins, unsigned)
CTL_RO_GEN(arenas_nbins, nbins, unsigned)
CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
#ifdef JEMALLOC_TCACHE
CTL_RO_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
#endif
CTL_RO_GEN(arenas_nlruns, nlclasses, size_t)
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@ -1368,6 +1410,7 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
bool oldval;
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
/*
@ -1382,6 +1425,7 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@ -1405,7 +1449,7 @@ RETURN:
return (ret);
}
CTL_RO_GEN(prof_interval, prof_interval, uint64_t)
CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
#endif
/******************************************************************************/
@ -1503,10 +1547,18 @@ CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged,
const ctl_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_node_t * ret;
if (ctl_stats.arenas[i].initialized == false)
return (NULL);
return (super_stats_arenas_i_node);
malloc_mutex_lock(&ctl_mtx);
if (ctl_stats.arenas[i].initialized == false) {
ret = NULL;
goto RETURN;
}
ret = super_stats_arenas_i_node;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
#ifdef JEMALLOC_STATS
@ -1528,6 +1580,7 @@ swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
{
int ret;
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else {
@ -1545,6 +1598,7 @@ swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@ -1556,6 +1610,7 @@ swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
{
int ret;
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
} else if (newp != NULL) {
@ -1586,6 +1641,7 @@ swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
RETURN:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
#endif

View File

@ -215,13 +215,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in swap or dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
size_t newsize = huge_salloc(ret);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) {
/*
* Assuming no chunk management bugs in the allocator,
* the only documented way an error can occur here is
* if the application changed the map type for a
* portion of the old allocation. This is firmly in
* undefined behavior territory, so write a diagnostic
* message, and optionally abort.
*/
char buf[BUFERROR_BUF];
buferror(errno, buf, sizeof(buf));
malloc_write("<jemalloc>: Error in mremap(): ");
malloc_write(buf);
malloc_write("\n");
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
idalloc(ptr);
} else
huge_dalloc(ptr, false);
} else
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
}
return (ret);
}
void
huge_dalloc(void *ptr)
huge_dalloc(void *ptr, bool unmap)
{
extent_node_t *node, key;
@ -241,14 +284,16 @@ huge_dalloc(void *ptr)
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
chunk_dealloc(node->addr, node->size);
chunk_dealloc(node->addr, node->size);
}
base_node_dealloc(node);
}

67
jemalloc/test/mremap.c Normal file
View File

@ -0,0 +1,67 @@
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
int
main(void)
{
int ret, err;
size_t sz, lg_chunk, chunksize, i;
char *p, *q;
fprintf(stderr, "Test begin\n");
sz = sizeof(lg_chunk);
if ((err = JEMALLOC_P(mallctl)("opt.lg_chunk", &lg_chunk, &sz, NULL,
0))) {
assert(err != ENOENT);
fprintf(stderr, "%s(): Error in mallctl(): %s\n", __func__,
strerror(err));
ret = 1;
goto RETURN;
}
chunksize = ((size_t)1U) << lg_chunk;
p = (char *)malloc(chunksize);
if (p == NULL) {
fprintf(stderr, "malloc(%zu) --> %p\n", chunksize, p);
ret = 1;
goto RETURN;
}
memset(p, 'a', chunksize);
q = (char *)realloc(p, chunksize * 2);
if (q == NULL) {
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize * 2,
q);
ret = 1;
goto RETURN;
}
for (i = 0; i < chunksize; i++) {
assert(q[i] == 'a');
}
p = q;
q = (char *)realloc(p, chunksize);
if (q == NULL) {
fprintf(stderr, "realloc(%p, %zu) --> %p\n", p, chunksize, q);
ret = 1;
goto RETURN;
}
for (i = 0; i < chunksize; i++) {
assert(q[i] == 'a');
}
free(q);
ret = 0;
RETURN:
fprintf(stderr, "Test end\n");
return (ret);
}

2
jemalloc/test/mremap.exp Normal file
View File

@ -0,0 +1,2 @@
Test begin
Test end